Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Next line prediction: <|code_start|>
class FeatureRegistrationTransformerSpec(Specification):
@mock.patch('nimoy.ast_tools.features.FeatureBlockRuleEnforcer')
@mock.patch('nimoy.ast_tools.features.FeatureBlockTransformer')
def a_feature_was_added(self, feature_block_transformer, feature_block_rule_enforcer):
with setup:
<|code_end|>
. Use current file imports:
(import ast
from unittest import mock
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.ast_tools.features import FeatureRegistrationTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification)
and context including class names, function names, or small code snippets from other files:
# Path: nimoy/ast_tools/features.py
# class FeatureRegistrationTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: SpecMetadata) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_FunctionDef(self, feature_node):
# if FeatureRegistrationTransformer._skip_feature(feature_node):
# return feature_node
#
# feature_name = feature_node.name
# if not feature_name.startswith('_'):
#
# feature_name_specified = hasattr(self.spec_location, 'feature_name')
#
# if not feature_name_specified or (
# feature_name_specified and self.spec_location.feature_name == feature_name):
# self.spec_metadata.add_feature(feature_name)
# FeatureBlockTransformer(self.runner_context, self.spec_metadata, feature_name).visit(feature_node)
# FeatureBlockRuleEnforcer(self.spec_metadata, feature_name, feature_node).enforce_tail_end_rules()
#
# feature_variables = self.spec_metadata.feature_variables.get(feature_name)
# if feature_variables:
# existing_arg_names = [existing_arg.arg for existing_arg in feature_node.args.args]
#
# for feature_variable in feature_variables:
# if feature_variable in existing_arg_names:
# continue
# feature_node.args.args.append(_ast.arg(arg=feature_variable))
# feature_node.args.defaults.append(ast.NameConstant(value=None))
#
# if self._feature_has_a_where_function(feature_name):
# self._remove_where_function_from_node(feature_name, feature_node)
# return feature_node
#
# @staticmethod
# def _remove_where_function_from_node(feature_name, feature_node):
# where_function = FeatureRegistrationTransformer._locate_where_function_within_feature(feature_name,
# feature_node)
# feature_node.body.remove(where_function)
#
# def _feature_has_a_where_function(self, feature_name):
# return self.spec_metadata.where_functions.get(feature_name)
#
# @staticmethod
# def _locate_where_function_within_feature(feature_name, feature_node):
# def _is_a_where_function(body_element):
# return hasattr(body_element, 'name') and body_element.name == feature_name + '_where'
#
# return next(body_element for body_element in feature_node.body if _is_a_where_function(body_element))
#
# @staticmethod
# def _skip_feature(feature_node):
# decorators = feature_node.decorator_list
# return any((hasattr(decorator, 'attr') and decorator.attr == 'skip') for decorator in decorators)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | runner_context = RunnerContext() |
Predict the next line after this snippet: <|code_start|>
class FeatureRegistrationTransformerSpec(Specification):
@mock.patch('nimoy.ast_tools.features.FeatureBlockRuleEnforcer')
@mock.patch('nimoy.ast_tools.features.FeatureBlockTransformer')
def a_feature_was_added(self, feature_block_transformer, feature_block_rule_enforcer):
with setup:
runner_context = RunnerContext()
module_definition = """
class JSpec:
def test_jim(self):
pass
def _jim(self):
pass
"""
node = ast.parse(module_definition, mode='exec')
metadata = SpecMetadata('jim')
with when:
<|code_end|>
using the current file's imports:
import ast
from unittest import mock
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.ast_tools.features import FeatureRegistrationTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and any relevant context from other files:
# Path: nimoy/ast_tools/features.py
# class FeatureRegistrationTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: SpecMetadata) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_FunctionDef(self, feature_node):
# if FeatureRegistrationTransformer._skip_feature(feature_node):
# return feature_node
#
# feature_name = feature_node.name
# if not feature_name.startswith('_'):
#
# feature_name_specified = hasattr(self.spec_location, 'feature_name')
#
# if not feature_name_specified or (
# feature_name_specified and self.spec_location.feature_name == feature_name):
# self.spec_metadata.add_feature(feature_name)
# FeatureBlockTransformer(self.runner_context, self.spec_metadata, feature_name).visit(feature_node)
# FeatureBlockRuleEnforcer(self.spec_metadata, feature_name, feature_node).enforce_tail_end_rules()
#
# feature_variables = self.spec_metadata.feature_variables.get(feature_name)
# if feature_variables:
# existing_arg_names = [existing_arg.arg for existing_arg in feature_node.args.args]
#
# for feature_variable in feature_variables:
# if feature_variable in existing_arg_names:
# continue
# feature_node.args.args.append(_ast.arg(arg=feature_variable))
# feature_node.args.defaults.append(ast.NameConstant(value=None))
#
# if self._feature_has_a_where_function(feature_name):
# self._remove_where_function_from_node(feature_name, feature_node)
# return feature_node
#
# @staticmethod
# def _remove_where_function_from_node(feature_name, feature_node):
# where_function = FeatureRegistrationTransformer._locate_where_function_within_feature(feature_name,
# feature_node)
# feature_node.body.remove(where_function)
#
# def _feature_has_a_where_function(self, feature_name):
# return self.spec_metadata.where_functions.get(feature_name)
#
# @staticmethod
# def _locate_where_function_within_feature(feature_name, feature_node):
# def _is_a_where_function(body_element):
# return hasattr(body_element, 'name') and body_element.name == feature_name + '_where'
#
# return next(body_element for body_element in feature_node.body if _is_a_where_function(body_element))
#
# @staticmethod
# def _skip_feature(feature_node):
# decorators = feature_node.decorator_list
# return any((hasattr(decorator, 'attr') and decorator.attr == 'skip') for decorator in decorators)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | FeatureRegistrationTransformer(runner_context, Location('some_spec.py'), metadata).visit(node) |
Next line prediction: <|code_start|>
class SpecificationLoaderSpec(Specification):
def load(self):
with given:
ast_chain = mock.Mock()
metadata = SpecMetadata('Jimbob')
ast_chain.apply.return_value = [metadata]
with when:
<|code_end|>
. Use current file imports:
(from unittest import mock
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_loader import SpecLoader
from nimoy.specification import Specification)
and context including class names, function names, or small code snippets from other files:
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_loader.py
# class SpecLoader:
# def __init__(self, runner_context: RunnerContext, ast_chain) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.ast_chain = ast_chain
#
# def load(self, spec_locations_and_contents):
# def specs():
# for spec_location, text in spec_locations_and_contents:
# node = ast.parse(text, mode='exec')
#
# metadata_of_specs_from_node = self.ast_chain.apply(self.runner_context, spec_location, node)
# ast.fix_missing_locations(node)
# compiled = compile(node, spec_location.spec_path, 'exec')
#
# spec_namespace = {}
# exec(compiled, spec_namespace)
#
# for spec_metadata in metadata_of_specs_from_node:
# spec_metadata.set_owning_module(spec_namespace[spec_metadata.name])
# yield spec_metadata
#
# return specs()
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | returned_spec_metadata = SpecLoader(RunnerContext(), ast_chain).load( |
Next line prediction: <|code_start|>
class SpecificationLoaderSpec(Specification):
def load(self):
with given:
ast_chain = mock.Mock()
metadata = SpecMetadata('Jimbob')
ast_chain.apply.return_value = [metadata]
with when:
returned_spec_metadata = SpecLoader(RunnerContext(), ast_chain).load(
<|code_end|>
. Use current file imports:
(from unittest import mock
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_loader import SpecLoader
from nimoy.specification import Specification)
and context including class names, function names, or small code snippets from other files:
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_loader.py
# class SpecLoader:
# def __init__(self, runner_context: RunnerContext, ast_chain) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.ast_chain = ast_chain
#
# def load(self, spec_locations_and_contents):
# def specs():
# for spec_location, text in spec_locations_and_contents:
# node = ast.parse(text, mode='exec')
#
# metadata_of_specs_from_node = self.ast_chain.apply(self.runner_context, spec_location, node)
# ast.fix_missing_locations(node)
# compiled = compile(node, spec_location.spec_path, 'exec')
#
# spec_namespace = {}
# exec(compiled, spec_namespace)
#
# for spec_metadata in metadata_of_specs_from_node:
# spec_metadata.set_owning_module(spec_namespace[spec_metadata.name])
# yield spec_metadata
#
# return specs()
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | [(Location('/path/to/spec.py'), 'class Jimbob:\n pass')]) |
Given snippet: <|code_start|>
class SpecificationLoaderSpec(Specification):
def load(self):
with given:
ast_chain = mock.Mock()
metadata = SpecMetadata('Jimbob')
ast_chain.apply.return_value = [metadata]
with when:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import mock
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_loader import SpecLoader
from nimoy.specification import Specification
and context:
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_loader.py
# class SpecLoader:
# def __init__(self, runner_context: RunnerContext, ast_chain) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.ast_chain = ast_chain
#
# def load(self, spec_locations_and_contents):
# def specs():
# for spec_location, text in spec_locations_and_contents:
# node = ast.parse(text, mode='exec')
#
# metadata_of_specs_from_node = self.ast_chain.apply(self.runner_context, spec_location, node)
# ast.fix_missing_locations(node)
# compiled = compile(node, spec_location.spec_path, 'exec')
#
# spec_namespace = {}
# exec(compiled, spec_namespace)
#
# for spec_metadata in metadata_of_specs_from_node:
# spec_metadata.set_owning_module(spec_namespace[spec_metadata.name])
# yield spec_metadata
#
# return specs()
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
which might include code, classes, or functions. Output only the next line. | returned_spec_metadata = SpecLoader(RunnerContext(), ast_chain).load( |
Using the snippet: <|code_start|>
class MockAssertionsSpec(Specification):
def skipped_spec_is_not_reported(self):
with given:
spec_contents = """import unittest
from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with expect:
1 == 1
@unittest.skip
def test2(self):
with expect:
1 == 2
"""
with when:
<|code_end|>
, determine the next line of code. You have imports:
import re
from nimoy.specification import Specification
from specs.nimoy.runner_helper import run_spec_contents
and context (class names, function names, or code) available:
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
#
# Path: specs/nimoy/runner_helper.py
# def run_spec_contents(spec_contents):
# str_io = StringIO()
# execution_framework = UnitTestExecutionFramework(stream=str_io)
# return SpecRunner._run_on_contents(RunnerContext(), execution_framework,
# [(Location('/fake/path.py'), spec_contents)])
. Output only the next line. | result = run_spec_contents(spec_contents) |
Predict the next line after this snippet: <|code_start|>
class MockAssertionsSpec(Specification):
def mock_was_never_called(self):
with setup:
the_mock = mock.Mock()
with when:
<|code_end|>
using the current file's imports:
from nimoy.assertions.mocks import MockAssertions
from nimoy.specification import Specification
from unittest import mock
and any relevant context from other files:
# Path: nimoy/assertions/mocks.py
# class MockAssertions:
# def assert_mock(self, number_of_invocations, mock, method, *args):
# if not hasattr(mock, method) and number_of_invocations > 0:
# raise AssertionError(method + " was never invoked") from None
#
# mocked_method = getattr(mock, method)
#
# if (number_of_invocations == 0) and (mocked_method.call_count == 0):
# return
#
# if (number_of_invocations >= 0) and (mocked_method.call_count != number_of_invocations):
# raise AssertionError(
# method + " was to be invoked " + str(number_of_invocations) + " times but was invoked " + str(
# mocked_method.call_count)) from None
#
# for value, expected_value in zip(mocked_method.call_args[0], args):
# if expected_value not in ('__nimoy_argument_wildcard', value):
# raise AssertionError(
# method + " expected argument " + str(expected_value) + " but was invoked with " + str(
# value)) from None
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | MockAssertions().assert_mock(4, the_mock, 'non_existing') |
Continue the code snippet: <|code_start|>
def run_spec_contents(spec_contents):
str_io = StringIO()
execution_framework = UnitTestExecutionFramework(stream=str_io)
<|code_end|>
. Use current file imports:
from io import StringIO
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.runner.unittest_execution_framework import UnitTestExecutionFramework
from nimoy.spec_runner import SpecRunner
and context (classes, functions, or code) from other files:
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/unittest_execution_framework.py
# class UnitTestExecutionFramework:
# def __init__(self, stream=None):
# self.suite = unittest.TestSuite()
# self.stream = stream
#
# def append_test(self, test):
# self.suite.addTest(test)
#
# def run(self):
# return unittest.TextTestRunner(stream=self.stream, verbosity=2).run(self.suite)
#
# Path: nimoy/spec_runner.py
# class SpecRunner:
# def run(self, execution_framework):
# parser = argparse.ArgumentParser(prog='nimoy', description='Run a suite of Nimoy specs.')
# parser.add_argument('--power-assertions', metavar='P', type=bool, nargs=1, default=False,
# help="Should Nimoy evaluate comparison expressions using power assertions (beta)")
# parser.add_argument('specs', metavar='S', type=str, nargs='*',
# help="""A path to a spec file to execute or a directory to scan for spec files.
# When naming a file it is possible to select which spec or feature to run. some_spec.py[::SpecName[::feature_name]]
# """)
#
# args = parser.parse_args()
#
# spec_locations = SpecRunner._find_specs(args)
# spec_locations_and_contents = SpecRunner._read_specs(spec_locations)
# return SpecRunner._run_on_contents(RunnerContext(use_power_assertions=args.power_assertions),
# execution_framework, spec_locations_and_contents)
#
# @staticmethod
# def _run_on_contents(runner_context: RunnerContext, execution_framework, spec_locations_and_contents):
# specs = SpecRunner._load_specs(runner_context, spec_locations_and_contents)
# return SpecRunner._execute_specs(execution_framework, specs)
#
# @staticmethod
# def _find_specs(args):
# return SpecFinder(os.getcwd()).find(args.specs)
#
# @staticmethod
# def _read_specs(spec_locations):
# return SpecReader(fs_resource_reader).read(spec_locations)
#
# @staticmethod
# def _load_specs(runner_context: RunnerContext, spec_locations_and_contents):
# return SpecLoader(runner_context, ast_chain).load(spec_locations_and_contents)
#
# @staticmethod
# def _execute_specs(execution_framework, specs):
# return SpecExecutor(execution_framework).execute(specs)
. Output only the next line. | return SpecRunner._run_on_contents(RunnerContext(), execution_framework, |
Predict the next line after this snippet: <|code_start|>
def run_spec_contents(spec_contents):
str_io = StringIO()
execution_framework = UnitTestExecutionFramework(stream=str_io)
return SpecRunner._run_on_contents(RunnerContext(), execution_framework,
<|code_end|>
using the current file's imports:
from io import StringIO
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.runner.unittest_execution_framework import UnitTestExecutionFramework
from nimoy.spec_runner import SpecRunner
and any relevant context from other files:
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/unittest_execution_framework.py
# class UnitTestExecutionFramework:
# def __init__(self, stream=None):
# self.suite = unittest.TestSuite()
# self.stream = stream
#
# def append_test(self, test):
# self.suite.addTest(test)
#
# def run(self):
# return unittest.TextTestRunner(stream=self.stream, verbosity=2).run(self.suite)
#
# Path: nimoy/spec_runner.py
# class SpecRunner:
# def run(self, execution_framework):
# parser = argparse.ArgumentParser(prog='nimoy', description='Run a suite of Nimoy specs.')
# parser.add_argument('--power-assertions', metavar='P', type=bool, nargs=1, default=False,
# help="Should Nimoy evaluate comparison expressions using power assertions (beta)")
# parser.add_argument('specs', metavar='S', type=str, nargs='*',
# help="""A path to a spec file to execute or a directory to scan for spec files.
# When naming a file it is possible to select which spec or feature to run. some_spec.py[::SpecName[::feature_name]]
# """)
#
# args = parser.parse_args()
#
# spec_locations = SpecRunner._find_specs(args)
# spec_locations_and_contents = SpecRunner._read_specs(spec_locations)
# return SpecRunner._run_on_contents(RunnerContext(use_power_assertions=args.power_assertions),
# execution_framework, spec_locations_and_contents)
#
# @staticmethod
# def _run_on_contents(runner_context: RunnerContext, execution_framework, spec_locations_and_contents):
# specs = SpecRunner._load_specs(runner_context, spec_locations_and_contents)
# return SpecRunner._execute_specs(execution_framework, specs)
#
# @staticmethod
# def _find_specs(args):
# return SpecFinder(os.getcwd()).find(args.specs)
#
# @staticmethod
# def _read_specs(spec_locations):
# return SpecReader(fs_resource_reader).read(spec_locations)
#
# @staticmethod
# def _load_specs(runner_context: RunnerContext, spec_locations_and_contents):
# return SpecLoader(runner_context, ast_chain).load(spec_locations_and_contents)
#
# @staticmethod
# def _execute_specs(execution_framework, specs):
# return SpecExecutor(execution_framework).execute(specs)
. Output only the next line. | [(Location('/fake/path.py'), spec_contents)]) |
Given the code snippet: <|code_start|>
def run_spec_contents(spec_contents):
str_io = StringIO()
execution_framework = UnitTestExecutionFramework(stream=str_io)
<|code_end|>
, generate the next line using the imports in this file:
from io import StringIO
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.runner.unittest_execution_framework import UnitTestExecutionFramework
from nimoy.spec_runner import SpecRunner
and context (functions, classes, or occasionally code) from other files:
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/unittest_execution_framework.py
# class UnitTestExecutionFramework:
# def __init__(self, stream=None):
# self.suite = unittest.TestSuite()
# self.stream = stream
#
# def append_test(self, test):
# self.suite.addTest(test)
#
# def run(self):
# return unittest.TextTestRunner(stream=self.stream, verbosity=2).run(self.suite)
#
# Path: nimoy/spec_runner.py
# class SpecRunner:
# def run(self, execution_framework):
# parser = argparse.ArgumentParser(prog='nimoy', description='Run a suite of Nimoy specs.')
# parser.add_argument('--power-assertions', metavar='P', type=bool, nargs=1, default=False,
# help="Should Nimoy evaluate comparison expressions using power assertions (beta)")
# parser.add_argument('specs', metavar='S', type=str, nargs='*',
# help="""A path to a spec file to execute or a directory to scan for spec files.
# When naming a file it is possible to select which spec or feature to run. some_spec.py[::SpecName[::feature_name]]
# """)
#
# args = parser.parse_args()
#
# spec_locations = SpecRunner._find_specs(args)
# spec_locations_and_contents = SpecRunner._read_specs(spec_locations)
# return SpecRunner._run_on_contents(RunnerContext(use_power_assertions=args.power_assertions),
# execution_framework, spec_locations_and_contents)
#
# @staticmethod
# def _run_on_contents(runner_context: RunnerContext, execution_framework, spec_locations_and_contents):
# specs = SpecRunner._load_specs(runner_context, spec_locations_and_contents)
# return SpecRunner._execute_specs(execution_framework, specs)
#
# @staticmethod
# def _find_specs(args):
# return SpecFinder(os.getcwd()).find(args.specs)
#
# @staticmethod
# def _read_specs(spec_locations):
# return SpecReader(fs_resource_reader).read(spec_locations)
#
# @staticmethod
# def _load_specs(runner_context: RunnerContext, spec_locations_and_contents):
# return SpecLoader(runner_context, ast_chain).load(spec_locations_and_contents)
#
# @staticmethod
# def _execute_specs(execution_framework, specs):
# return SpecExecutor(execution_framework).execute(specs)
. Output only the next line. | return SpecRunner._run_on_contents(RunnerContext(), execution_framework, |
Based on the snippet: <|code_start|>
class WhereBlocksSpec(Specification):
def single_variable(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with given:
a = value_of_a
with expect:
(a % 2) == 0
with where:
value_of_a = [2, 4, 6, 8]
"""
with when:
<|code_end|>
, predict the immediate next line with the help of imports:
from nimoy.specification import Specification
from specs.nimoy.runner_helper import run_spec_contents
and context (classes, functions, sometimes code) from other files:
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
#
# Path: specs/nimoy/runner_helper.py
# def run_spec_contents(spec_contents):
# str_io = StringIO()
# execution_framework = UnitTestExecutionFramework(stream=str_io)
# return SpecRunner._run_on_contents(RunnerContext(), execution_framework,
# [(Location('/fake/path.py'), spec_contents)])
. Output only the next line. | result = run_spec_contents(spec_contents) |
Predict the next line for this snippet: <|code_start|>
class SpecificationReaderSpec(Specification):
def read(self):
with given:
<|code_end|>
with the help of current file imports:
from unittest import mock
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_reader import SpecReader
from nimoy.specification import Specification
and context from other files:
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_reader.py
# class SpecReader:
# def __init__(self, resource_reader) -> None:
# super().__init__()
# self.resource_reader = resource_reader
#
# def read(self, spec_locations):
# def spec_contents():
# for spec_file_location in spec_locations:
# text = self.resource_reader.read(spec_file_location.spec_path)
# yield (spec_file_location, text)
#
# return spec_contents()
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
, which may contain function names, class names, or code. Output only the next line. | location = Location('/path/to/spec.py') |
Next line prediction: <|code_start|>
class SpecificationReaderSpec(Specification):
def read(self):
with given:
location = Location('/path/to/spec.py')
reader_mock = mock.Mock()
reader_mock.read.return_value = 'class Jimbob:\n pass'
with when:
<|code_end|>
. Use current file imports:
(from unittest import mock
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_reader import SpecReader
from nimoy.specification import Specification)
and context including class names, function names, or small code snippets from other files:
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_reader.py
# class SpecReader:
# def __init__(self, resource_reader) -> None:
# super().__init__()
# self.resource_reader = resource_reader
#
# def read(self, spec_locations):
# def spec_contents():
# for spec_file_location in spec_locations:
# text = self.resource_reader.read(spec_file_location.spec_path)
# yield (spec_file_location, text)
#
# return spec_contents()
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | spec_contents = SpecReader(reader_mock).read([location]) |
Given the code snippet: <|code_start|>
class AstChainSpec(Specification):
@mock.patch('nimoy.ast_tools.ast_chain.SpecTransformer')
def apply_chain(self, spec_transformer_mock):
with given:
node = {}
with when:
<|code_end|>
, generate the next line using the imports in this file:
from unittest import mock
from nimoy.ast_tools import ast_chain
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context (functions, classes, or occasionally code) from other files:
# Path: nimoy/ast_tools/ast_chain.py
# def apply(runner_context: RunnerContext, spec_location, node):
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | spec_metadata = ast_chain.apply(RunnerContext(), Location('some_spec.py'), node) |
Here is a snippet: <|code_start|>
class AstChainSpec(Specification):
@mock.patch('nimoy.ast_tools.ast_chain.SpecTransformer')
def apply_chain(self, spec_transformer_mock):
with given:
node = {}
with when:
<|code_end|>
. Write the next line using the current file imports:
from unittest import mock
from nimoy.ast_tools import ast_chain
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context from other files:
# Path: nimoy/ast_tools/ast_chain.py
# def apply(runner_context: RunnerContext, spec_location, node):
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
, which may include functions, classes, or code. Output only the next line. | spec_metadata = ast_chain.apply(RunnerContext(), Location('some_spec.py'), node) |
Given the following code snippet before the placeholder: <|code_start|>
class AstChainSpec(Specification):
@mock.patch('nimoy.ast_tools.ast_chain.SpecTransformer')
def apply_chain(self, spec_transformer_mock):
with given:
node = {}
with when:
<|code_end|>
, predict the next line using imports from the current file:
from unittest import mock
from nimoy.ast_tools import ast_chain
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context including class names, function names, and sometimes code from other files:
# Path: nimoy/ast_tools/ast_chain.py
# def apply(runner_context: RunnerContext, spec_location, node):
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | spec_metadata = ast_chain.apply(RunnerContext(), Location('some_spec.py'), node) |
Given the code snippet: <|code_start|> spec_locations[0].spec_path @ temp_spec.name
def explicit_spec_directory(self):
with when:
spec_locations = SpecFinder('/some/working/dir').find([os.path.dirname(temp_spec.name)])
with then:
len(spec_locations) == 1
spec_locations[0].spec_path @ temp_spec.name
def relative_spec_path(self):
with when:
spec_locations = SpecFinder('/some/working/dir').find(['jim_spec.py'])
with then:
len(spec_locations) == 1
spec_locations[0].spec_path @ '/some/working/dir/jim_spec.py'
def full_path(self):
with when:
spec_locations = SpecFinder('/some/working/dir').find(['jim_spec.py::SpecName::feature_name'])
with then:
len(spec_locations) == 1
spec_locations[0].spec_path @ '/some/working/dir/jim_spec.py'
spec_locations[0].spec_name == 'SpecName'
spec_locations[0].feature_name == 'feature_name'
class SpecificationLocationSpec(Specification):
def spec_path(self):
with when:
<|code_end|>
, generate the next line using the imports in this file:
import os
import tempfile
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_finder import SpecFinder
from nimoy.specification import Specification
and context (functions, classes, or occasionally code) from other files:
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_finder.py
# class SpecFinder:
# def __init__(self, working_directory) -> None:
# super().__init__()
# self.working_directory = working_directory
# self.spec_locations = []
#
# def find(self, suggested_locations):
# if not suggested_locations:
# suggested_locations.append(self.working_directory)
# self._find_specs_in_suggested_locations(suggested_locations)
#
# return self.spec_locations
#
# def _find_specs_in_suggested_locations(self, suggested_locations):
#
# for suggested_location in suggested_locations:
# normalized_suggested_location = self._normalize_suggested_location(suggested_location)
#
# if 'spec.py' in normalized_suggested_location:
# self.spec_locations.append(Location(normalized_suggested_location))
# else:
# self._find_specs_in_directory(normalized_suggested_location)
#
# def _normalize_suggested_location(self, suggested_location):
#
# if os.path.isabs(suggested_location):
# return suggested_location
# return os.path.join(self.working_directory, suggested_location)
#
# def _find_specs_in_directory(self, directory):
# for root, _, file_names in os.walk(directory):
# for filename in fnmatch.filter(file_names, '*spec.py'):
# self.spec_locations.append(Location(os.path.join(root, filename)))
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | location = Location('some_spec.py') |
Predict the next line for this snippet: <|code_start|>
temp_spec = tempfile.NamedTemporaryFile(suffix='_spec.py')
class SpecificationFinderSpec(Specification):
def implicit_location(self):
with when:
<|code_end|>
with the help of current file imports:
import os
import tempfile
from nimoy.runner.spec_finder import Location
from nimoy.runner.spec_finder import SpecFinder
from nimoy.specification import Specification
and context from other files:
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/runner/spec_finder.py
# class SpecFinder:
# def __init__(self, working_directory) -> None:
# super().__init__()
# self.working_directory = working_directory
# self.spec_locations = []
#
# def find(self, suggested_locations):
# if not suggested_locations:
# suggested_locations.append(self.working_directory)
# self._find_specs_in_suggested_locations(suggested_locations)
#
# return self.spec_locations
#
# def _find_specs_in_suggested_locations(self, suggested_locations):
#
# for suggested_location in suggested_locations:
# normalized_suggested_location = self._normalize_suggested_location(suggested_location)
#
# if 'spec.py' in normalized_suggested_location:
# self.spec_locations.append(Location(normalized_suggested_location))
# else:
# self._find_specs_in_directory(normalized_suggested_location)
#
# def _normalize_suggested_location(self, suggested_location):
#
# if os.path.isabs(suggested_location):
# return suggested_location
# return os.path.join(self.working_directory, suggested_location)
#
# def _find_specs_in_directory(self, directory):
# for root, _, file_names in os.walk(directory):
# for filename in fnmatch.filter(file_names, '*spec.py'):
# self.spec_locations.append(Location(os.path.join(root, filename)))
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
, which may contain function names, class names, or code. Output only the next line. | spec_locations = SpecFinder(os.path.dirname(temp_spec.name)).find([]) |
Given the code snippet: <|code_start|>
class SpecificationTransformerSpec(Specification):
def where_methods_are_extracted_from_features(self):
with given:
spec_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def my_feature(self):
with setup:
a = value_of_a
with expect:
a == 5
with where:
value_of_a = [5]
"""
node = ast.parse(spec_definition, mode='exec')
with when:
found_metadata = []
<|code_end|>
, generate the next line using the imports in this file:
import ast
from nimoy.ast_tools.specs import SpecTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context (functions, classes, or occasionally code) from other files:
# Path: nimoy/ast_tools/specs.py
# class SpecTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_ClassDef(self, class_node):
#
# class_is_spec = class_node.name.endswith('Spec')
#
# if class_is_spec:
# has_spec_name = hasattr(self.spec_location, 'spec_name')
# if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
#
# metadata = SpecMetadata(class_node.name)
# self._register_spec(metadata)
# FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node)
#
# for feature_name in metadata.where_functions:
# feature = next(
# feature for feature in class_node.body if
# hasattr(feature, 'name') and feature_name == feature.name)
# index_of_feature = class_node.body.index(feature)
#
# index_to_insert_where = index_of_feature + 1
# where_function_to_insert = metadata.where_functions[feature_name]
#
# if (index_to_insert_where + 1) > len(class_node.body):
# class_node.body.append(where_function_to_insert)
# else:
# class_node.body.insert(index_to_insert_where, where_function_to_insert)
#
# return class_node
#
# def _register_spec(self, metadata):
# self.spec_metadata.append(metadata)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | SpecTransformer(RunnerContext(), Location('some_spec.py'), found_metadata).visit(node) |
Using the snippet: <|code_start|>
class SpecificationTransformerSpec(Specification):
def where_methods_are_extracted_from_features(self):
with given:
spec_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def my_feature(self):
with setup:
a = value_of_a
with expect:
a == 5
with where:
value_of_a = [5]
"""
node = ast.parse(spec_definition, mode='exec')
with when:
found_metadata = []
<|code_end|>
, determine the next line of code. You have imports:
import ast
from nimoy.ast_tools.specs import SpecTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context (class names, function names, or code) available:
# Path: nimoy/ast_tools/specs.py
# class SpecTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_ClassDef(self, class_node):
#
# class_is_spec = class_node.name.endswith('Spec')
#
# if class_is_spec:
# has_spec_name = hasattr(self.spec_location, 'spec_name')
# if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
#
# metadata = SpecMetadata(class_node.name)
# self._register_spec(metadata)
# FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node)
#
# for feature_name in metadata.where_functions:
# feature = next(
# feature for feature in class_node.body if
# hasattr(feature, 'name') and feature_name == feature.name)
# index_of_feature = class_node.body.index(feature)
#
# index_to_insert_where = index_of_feature + 1
# where_function_to_insert = metadata.where_functions[feature_name]
#
# if (index_to_insert_where + 1) > len(class_node.body):
# class_node.body.append(where_function_to_insert)
# else:
# class_node.body.insert(index_to_insert_where, where_function_to_insert)
#
# return class_node
#
# def _register_spec(self, metadata):
# self.spec_metadata.append(metadata)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | SpecTransformer(RunnerContext(), Location('some_spec.py'), found_metadata).visit(node) |
Next line prediction: <|code_start|>
class SpecificationTransformerSpec(Specification):
def where_methods_are_extracted_from_features(self):
with given:
spec_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def my_feature(self):
with setup:
a = value_of_a
with expect:
a == 5
with where:
value_of_a = [5]
"""
node = ast.parse(spec_definition, mode='exec')
with when:
found_metadata = []
<|code_end|>
. Use current file imports:
(import ast
from nimoy.ast_tools.specs import SpecTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification)
and context including class names, function names, or small code snippets from other files:
# Path: nimoy/ast_tools/specs.py
# class SpecTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_ClassDef(self, class_node):
#
# class_is_spec = class_node.name.endswith('Spec')
#
# if class_is_spec:
# has_spec_name = hasattr(self.spec_location, 'spec_name')
# if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
#
# metadata = SpecMetadata(class_node.name)
# self._register_spec(metadata)
# FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node)
#
# for feature_name in metadata.where_functions:
# feature = next(
# feature for feature in class_node.body if
# hasattr(feature, 'name') and feature_name == feature.name)
# index_of_feature = class_node.body.index(feature)
#
# index_to_insert_where = index_of_feature + 1
# where_function_to_insert = metadata.where_functions[feature_name]
#
# if (index_to_insert_where + 1) > len(class_node.body):
# class_node.body.append(where_function_to_insert)
# else:
# class_node.body.insert(index_to_insert_where, where_function_to_insert)
#
# return class_node
#
# def _register_spec(self, metadata):
# self.spec_metadata.append(metadata)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | SpecTransformer(RunnerContext(), Location('some_spec.py'), found_metadata).visit(node) |
Given the code snippet: <|code_start|>
class SpecTransformerSpec(Specification):
@mock.patch('nimoy.ast_tools.specs.FeatureRegistrationTransformer')
def find_specs_in_module(self, feature_registration_transformer):
with setup:
spec_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
pass
class JonesSpec(Specification):
pass
class Bobson:
pass
"""
node = ast.parse(spec_definition, mode='exec')
found_metadata = []
with when:
<|code_end|>
, generate the next line using the imports in this file:
import ast
from unittest import mock
from nimoy.ast_tools.specs import SpecTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context (functions, classes, or occasionally code) from other files:
# Path: nimoy/ast_tools/specs.py
# class SpecTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_ClassDef(self, class_node):
#
# class_is_spec = class_node.name.endswith('Spec')
#
# if class_is_spec:
# has_spec_name = hasattr(self.spec_location, 'spec_name')
# if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
#
# metadata = SpecMetadata(class_node.name)
# self._register_spec(metadata)
# FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node)
#
# for feature_name in metadata.where_functions:
# feature = next(
# feature for feature in class_node.body if
# hasattr(feature, 'name') and feature_name == feature.name)
# index_of_feature = class_node.body.index(feature)
#
# index_to_insert_where = index_of_feature + 1
# where_function_to_insert = metadata.where_functions[feature_name]
#
# if (index_to_insert_where + 1) > len(class_node.body):
# class_node.body.append(where_function_to_insert)
# else:
# class_node.body.insert(index_to_insert_where, where_function_to_insert)
#
# return class_node
#
# def _register_spec(self, metadata):
# self.spec_metadata.append(metadata)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | SpecTransformer(RunnerContext(), Location('some_spec.py'), found_metadata).visit(node) |
Given the following code snippet before the placeholder: <|code_start|>
class SpecTransformerSpec(Specification):
@mock.patch('nimoy.ast_tools.specs.FeatureRegistrationTransformer')
def find_specs_in_module(self, feature_registration_transformer):
with setup:
spec_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
pass
class JonesSpec(Specification):
pass
class Bobson:
pass
"""
node = ast.parse(spec_definition, mode='exec')
found_metadata = []
with when:
<|code_end|>
, predict the next line using imports from the current file:
import ast
from unittest import mock
from nimoy.ast_tools.specs import SpecTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context including class names, function names, and sometimes code from other files:
# Path: nimoy/ast_tools/specs.py
# class SpecTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_ClassDef(self, class_node):
#
# class_is_spec = class_node.name.endswith('Spec')
#
# if class_is_spec:
# has_spec_name = hasattr(self.spec_location, 'spec_name')
# if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
#
# metadata = SpecMetadata(class_node.name)
# self._register_spec(metadata)
# FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node)
#
# for feature_name in metadata.where_functions:
# feature = next(
# feature for feature in class_node.body if
# hasattr(feature, 'name') and feature_name == feature.name)
# index_of_feature = class_node.body.index(feature)
#
# index_to_insert_where = index_of_feature + 1
# where_function_to_insert = metadata.where_functions[feature_name]
#
# if (index_to_insert_where + 1) > len(class_node.body):
# class_node.body.append(where_function_to_insert)
# else:
# class_node.body.insert(index_to_insert_where, where_function_to_insert)
#
# return class_node
#
# def _register_spec(self, metadata):
# self.spec_metadata.append(metadata)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | SpecTransformer(RunnerContext(), Location('some_spec.py'), found_metadata).visit(node) |
Given the code snippet: <|code_start|>
class SpecTransformerSpec(Specification):
@mock.patch('nimoy.ast_tools.specs.FeatureRegistrationTransformer')
def find_specs_in_module(self, feature_registration_transformer):
with setup:
spec_definition = """from nimoy.specification import Specification
class JimbobSpec(Specification):
pass
class JonesSpec(Specification):
pass
class Bobson:
pass
"""
node = ast.parse(spec_definition, mode='exec')
found_metadata = []
with when:
<|code_end|>
, generate the next line using the imports in this file:
import ast
from unittest import mock
from nimoy.ast_tools.specs import SpecTransformer
from nimoy.runner.metadata import RunnerContext
from nimoy.runner.spec_finder import Location
from nimoy.specification import Specification
and context (functions, classes, or occasionally code) from other files:
# Path: nimoy/ast_tools/specs.py
# class SpecTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_ClassDef(self, class_node):
#
# class_is_spec = class_node.name.endswith('Spec')
#
# if class_is_spec:
# has_spec_name = hasattr(self.spec_location, 'spec_name')
# if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
#
# metadata = SpecMetadata(class_node.name)
# self._register_spec(metadata)
# FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node)
#
# for feature_name in metadata.where_functions:
# feature = next(
# feature for feature in class_node.body if
# hasattr(feature, 'name') and feature_name == feature.name)
# index_of_feature = class_node.body.index(feature)
#
# index_to_insert_where = index_of_feature + 1
# where_function_to_insert = metadata.where_functions[feature_name]
#
# if (index_to_insert_where + 1) > len(class_node.body):
# class_node.body.append(where_function_to_insert)
# else:
# class_node.body.insert(index_to_insert_where, where_function_to_insert)
#
# return class_node
#
# def _register_spec(self, metadata):
# self.spec_metadata.append(metadata)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
#
# Path: nimoy/runner/spec_finder.py
# class Location:
# def __init__(self, suggested_location):
#
# # Format may be:
# # - some_spec.py
# # - some_spec.py::SpecName
# # - some_spec.py::feature_name
# # - some_spec.py::SpecName::feature_name
# split_suggested_location = suggested_location.split("::")
#
# # some_spec.py
# if len(split_suggested_location) == 1:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName or some_spec.py::feature_name
# if len(split_suggested_location) == 2:
# self.spec_path = split_suggested_location[0]
#
# # some_spec.py::SpecName
# if split_suggested_location[1][0].isupper():
# self.spec_name = split_suggested_location[1]
#
# # some_spec.py::feature_name
# else:
# self.feature_name = split_suggested_location[1]
#
# # some_spec.py::SpecName::feature_name
# if len(split_suggested_location) == 3:
# self.spec_path = split_suggested_location[0]
# self.spec_name = split_suggested_location[1]
# self.feature_name = split_suggested_location[2]
#
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
. Output only the next line. | SpecTransformer(RunnerContext(), Location('some_spec.py'), found_metadata).visit(node) |
Next line prediction: <|code_start|>
class FeatureBlock:
def __init__(self, block_type, thrown_exceptions) -> None:
super().__init__()
self.block_type = block_type
self.thrown_exceptions = thrown_exceptions
def __enter__(self):
pass
def __exit__(self, the_type, value, traceback):
<|code_end|>
. Use current file imports:
(from nimoy.ast_tools.feature_blocks import WHEN, THEN)
and context including class names, function names, or small code snippets from other files:
# Path: nimoy/ast_tools/feature_blocks.py
# WHEN = 'when'
#
# THEN = 'then'
. Output only the next line. | if self.block_type == WHEN and the_type: |
Predict the next line after this snippet: <|code_start|>
class FeatureBlock:
def __init__(self, block_type, thrown_exceptions) -> None:
super().__init__()
self.block_type = block_type
self.thrown_exceptions = thrown_exceptions
def __enter__(self):
pass
def __exit__(self, the_type, value, traceback):
if self.block_type == WHEN and the_type:
self.thrown_exceptions.append((the_type, value, traceback))
return True
<|code_end|>
using the current file's imports:
from nimoy.ast_tools.feature_blocks import WHEN, THEN
and any relevant context from other files:
# Path: nimoy/ast_tools/feature_blocks.py
# WHEN = 'when'
#
# THEN = 'then'
. Output only the next line. | if self.block_type == THEN and not the_type and self.thrown_exceptions: |
Given snippet: <|code_start|>
class MockAssertionsSpec(Specification):
def successful_assertion(self):
with given:
spec_contents = """from unittest import mock
from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with setup:
the_mock = mock.Mock()
with when:
the_mock.some_method()
with then:
1 * the_mock.some_method()
"""
with when:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from nimoy.specification import Specification
from specs.nimoy.runner_helper import run_spec_contents
and context:
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
#
# Path: specs/nimoy/runner_helper.py
# def run_spec_contents(spec_contents):
# str_io = StringIO()
# execution_framework = UnitTestExecutionFramework(stream=str_io)
# return SpecRunner._run_on_contents(RunnerContext(), execution_framework,
# [(Location('/fake/path.py'), spec_contents)])
which might include code, classes, or functions. Output only the next line. | result = run_spec_contents(spec_contents) |
Here is a snippet: <|code_start|>
class VerboseDiffsSpec(Specification):
def string_diffs(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with given:
a = 'The quick brown fox'
with expect:
a == 'The quick frown box'
"""
with when:
<|code_end|>
. Write the next line using the current file imports:
from nimoy.specification import Specification
from specs.nimoy.runner_helper import run_spec_contents
and context from other files:
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
#
# Path: specs/nimoy/runner_helper.py
# def run_spec_contents(spec_contents):
# str_io = StringIO()
# execution_framework = UnitTestExecutionFramework(stream=str_io)
# return SpecRunner._run_on_contents(RunnerContext(), execution_framework,
# [(Location('/fake/path.py'), spec_contents)])
, which may include functions, classes, or code. Output only the next line. | result = run_spec_contents(spec_contents) |
Given the following code snippet before the placeholder: <|code_start|>
class ExpectBlocksSpec(Specification):
def successful_given(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with given:
a = 3
with expect:
a != 4
"""
with when:
<|code_end|>
, predict the next line using imports from the current file:
from nimoy.runner.exceptions import InvalidFeatureBlockException
from nimoy.specification import Specification
from specs.nimoy.runner_helper import run_spec_contents
and context including class names, function names, and sometimes code from other files:
# Path: nimoy/specification.py
# class Specification(TestCase, metaclass=DataDrivenSpecification):
#
# def __init__(self, methodName='runTest'):
# super().__init__(methodName)
# self.thrown_exceptions = []
#
# def _feature_block_context(self, block_name):
# return FeatureBlock(block_name, self.thrown_exceptions)
#
# def _compare(self, left, right, comparison_type_name):
# Compare().compare(left, right, comparison_type_name)
#
# def _power_assert(self, expression: Dict):
# PowerAssertions().assert_and_raise(expression)
#
# def _assert_mock(self, number_of_invocations, mock, method, *args):
# MockAssertions().assert_mock(number_of_invocations, mock, method, *args)
#
# def _exception_thrown(self, expected_exception_type):
# return ExceptionAssertions().assert_exception(self.thrown_exceptions, expected_exception_type)
#
# Path: specs/nimoy/runner_helper.py
# def run_spec_contents(spec_contents):
# str_io = StringIO()
# execution_framework = UnitTestExecutionFramework(stream=str_io)
# return SpecRunner._run_on_contents(RunnerContext(), execution_framework,
# [(Location('/fake/path.py'), spec_contents)])
. Output only the next line. | result = run_spec_contents(spec_contents) |
Using the snippet: <|code_start|>
class SpecTransformer(ast.NodeTransformer):
def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: List) -> None:
super().__init__()
self.runner_context = runner_context
self.spec_location = spec_location
self.spec_metadata = spec_metadata
def visit_ClassDef(self, class_node):
class_is_spec = class_node.name.endswith('Spec')
if class_is_spec:
has_spec_name = hasattr(self.spec_location, 'spec_name')
if not has_spec_name or (has_spec_name and self.spec_location.spec_name == class_node.name):
metadata = SpecMetadata(class_node.name)
self._register_spec(metadata)
<|code_end|>
, determine the next line of code. You have imports:
import ast
from typing import List
from nimoy.ast_tools.ast_metadata import SpecMetadata
from nimoy.ast_tools.features import FeatureRegistrationTransformer
from nimoy.runner.metadata import RunnerContext
and context (class names, function names, or code) available:
# Path: nimoy/ast_tools/features.py
# class FeatureRegistrationTransformer(ast.NodeTransformer):
# def __init__(self, runner_context: RunnerContext, spec_location, spec_metadata: SpecMetadata) -> None:
# super().__init__()
# self.runner_context = runner_context
# self.spec_location = spec_location
# self.spec_metadata = spec_metadata
#
# def visit_FunctionDef(self, feature_node):
# if FeatureRegistrationTransformer._skip_feature(feature_node):
# return feature_node
#
# feature_name = feature_node.name
# if not feature_name.startswith('_'):
#
# feature_name_specified = hasattr(self.spec_location, 'feature_name')
#
# if not feature_name_specified or (
# feature_name_specified and self.spec_location.feature_name == feature_name):
# self.spec_metadata.add_feature(feature_name)
# FeatureBlockTransformer(self.runner_context, self.spec_metadata, feature_name).visit(feature_node)
# FeatureBlockRuleEnforcer(self.spec_metadata, feature_name, feature_node).enforce_tail_end_rules()
#
# feature_variables = self.spec_metadata.feature_variables.get(feature_name)
# if feature_variables:
# existing_arg_names = [existing_arg.arg for existing_arg in feature_node.args.args]
#
# for feature_variable in feature_variables:
# if feature_variable in existing_arg_names:
# continue
# feature_node.args.args.append(_ast.arg(arg=feature_variable))
# feature_node.args.defaults.append(ast.NameConstant(value=None))
#
# if self._feature_has_a_where_function(feature_name):
# self._remove_where_function_from_node(feature_name, feature_node)
# return feature_node
#
# @staticmethod
# def _remove_where_function_from_node(feature_name, feature_node):
# where_function = FeatureRegistrationTransformer._locate_where_function_within_feature(feature_name,
# feature_node)
# feature_node.body.remove(where_function)
#
# def _feature_has_a_where_function(self, feature_name):
# return self.spec_metadata.where_functions.get(feature_name)
#
# @staticmethod
# def _locate_where_function_within_feature(feature_name, feature_node):
# def _is_a_where_function(body_element):
# return hasattr(body_element, 'name') and body_element.name == feature_name + '_where'
#
# return next(body_element for body_element in feature_node.body if _is_a_where_function(body_element))
#
# @staticmethod
# def _skip_feature(feature_node):
# decorators = feature_node.decorator_list
# return any((hasattr(decorator, 'attr') and decorator.attr == 'skip') for decorator in decorators)
#
# Path: nimoy/runner/metadata.py
# class RunnerContext:
#
# def __init__(self, use_power_assertions: bool = False):
# self.use_power_assertions = use_power_assertions
. Output only the next line. | FeatureRegistrationTransformer(self.runner_context, self.spec_location, metadata).visit(class_node) |
Continue the code snippet: <|code_start|> # XXX: must call loader for celery to register all the tasks
default_app.loader.import_default_modules()
return conf
def add_urlhelpers(event):
"""
Add helpers to the template engine.
"""
event['static_url'] = lambda x: static_path(x, event['request'])
event['route_url'] = lambda name, *args, **kwargs: route_path(
name, event['request'], *args, **kwargs)
event['has_permission'] = lambda perm: has_permission(
perm, event['request'].context, event['request'])
def includeme(config):
"""
Pyramid includeme file for the :class:`pyramid.config.Configurator`
"""
ldap = False
settings = config.registry.settings
if 'pyvac.celery.yaml' in settings:
configure(settings['pyvac.celery.yaml'])
if 'pyvac.use_ldap' in settings:
ldap = asbool(settings['pyvac.use_ldap'])
if ldap:
<|code_end|>
. Use current file imports:
import os.path
import yaml
from pyramid.interfaces import IBeforeRender
from pyramid.security import has_permission
from pyramid.url import static_path, route_path
from pyramid.exceptions import Forbidden
from pyramid_jinja2 import renderer_factory
from pyramid.settings import asbool
from yaml import CSafeLoader as YAMLLoader
from yaml import SafeLoader as YAMLLoader
from logging.config import dictConfig
from logutils.dictconfig import dictConfig
from pyvac.helpers.ldap import LdapCache
from pyvac.helpers.i18n import locale_negotiator
from pyvac.helpers.holiday import init_override
from celery import current_app as default_app
from celery.app import default_app
and context (classes, functions, or code) from other files:
# Path: pyvac/helpers/ldap.py
# class LdapCache(object):
# """ Ldap cache class singleton """
# _instance = None
#
# def __new__(cls, *args, **kwargs):
# if not cls._instance:
# raise RuntimeError('Ldap is not initialized')
#
# return cls._instance
#
# @classmethod
# def configure(cls, settings):
# cls._instance = cls.from_config(settings)
#
# @classmethod
# def from_config(cls, config, **kwargs):
# """
# Return a Ldap client object configured from the given configuration.
# """
# return LdapWrapper(config)
#
# Path: pyvac/helpers/i18n.py
# def locale_negotiator(request):
# """
# Locale negotiator for pyramid views.
#
# This version differs from Pyramid's :py:func:`default_locale_negotiator
# <pyramid.i18n.default_locale_negotiator>` in that it gets the locale from
# the url parameter or the cookie, and fallbacks to the user's lang.
# """
#
# login = authenticated_userid(request)
# if login:
# from pyvac.models import DBSession, User
# session = DBSession()
# user = User.by_login(session, login)
# if user.country == 'us':
# return 'en'
# if user.country == 'zh':
# return 'en'
# return user.country
#
# return None
#
# Path: pyvac/helpers/holiday.py
# def init_override(content):
# """Load a yaml file for holidays override.
#
# You can override holidays for a country and a year through
# usage of a configuration setting:
# pyvac.override_holidays_file = %(here)s/conf/holidays.yaml
#
# here is a sample:
# zh:
# 2016:
# '2016-01-01': 'New Years Day'
# '2016-02-07': 'Chinese New Years Eve'
# """
# if not content:
# return
# override.update(content)
. Output only the next line. | LdapCache.configure(settings['pyvac.ldap.yaml']) |
Predict the next line after this snippet: <|code_start|> LdapCache.configure(settings['pyvac.ldap.yaml'])
# initiatlize holiday override from yaml configuration
if 'pyvac.override_holidays_file' in settings:
filename = settings['pyvac.override_holidays_file']
content = None
if os.path.isfile(filename):
with open(filename) as fdesc:
content = yaml.load(fdesc, YAMLLoader)
init_override(content)
# call includeme for models configuration
config.include('pyvac.models')
# call includeme for request views configuration
config.include('pyvac.views.request')
# call includeme for account views configuration
config.include('pyvac.views.account')
# call includeme for credentials views configuration
config.include('pyvac.views.credentials')
# Jinja configuration
# We don't use jinja2 filename, .html instead
config.add_renderer('.html', renderer_factory)
# helpers
config.add_subscriber(add_urlhelpers, IBeforeRender)
# i18n
config.add_translation_dirs('locale/')
<|code_end|>
using the current file's imports:
import os.path
import yaml
from pyramid.interfaces import IBeforeRender
from pyramid.security import has_permission
from pyramid.url import static_path, route_path
from pyramid.exceptions import Forbidden
from pyramid_jinja2 import renderer_factory
from pyramid.settings import asbool
from yaml import CSafeLoader as YAMLLoader
from yaml import SafeLoader as YAMLLoader
from logging.config import dictConfig
from logutils.dictconfig import dictConfig
from pyvac.helpers.ldap import LdapCache
from pyvac.helpers.i18n import locale_negotiator
from pyvac.helpers.holiday import init_override
from celery import current_app as default_app
from celery.app import default_app
and any relevant context from other files:
# Path: pyvac/helpers/ldap.py
# class LdapCache(object):
# """ Ldap cache class singleton """
# _instance = None
#
# def __new__(cls, *args, **kwargs):
# if not cls._instance:
# raise RuntimeError('Ldap is not initialized')
#
# return cls._instance
#
# @classmethod
# def configure(cls, settings):
# cls._instance = cls.from_config(settings)
#
# @classmethod
# def from_config(cls, config, **kwargs):
# """
# Return a Ldap client object configured from the given configuration.
# """
# return LdapWrapper(config)
#
# Path: pyvac/helpers/i18n.py
# def locale_negotiator(request):
# """
# Locale negotiator for pyramid views.
#
# This version differs from Pyramid's :py:func:`default_locale_negotiator
# <pyramid.i18n.default_locale_negotiator>` in that it gets the locale from
# the url parameter or the cookie, and fallbacks to the user's lang.
# """
#
# login = authenticated_userid(request)
# if login:
# from pyvac.models import DBSession, User
# session = DBSession()
# user = User.by_login(session, login)
# if user.country == 'us':
# return 'en'
# if user.country == 'zh':
# return 'en'
# return user.country
#
# return None
#
# Path: pyvac/helpers/holiday.py
# def init_override(content):
# """Load a yaml file for holidays override.
#
# You can override holidays for a country and a year through
# usage of a configuration setting:
# pyvac.override_holidays_file = %(here)s/conf/holidays.yaml
#
# here is a sample:
# zh:
# 2016:
# '2016-01-01': 'New Years Day'
# '2016-02-07': 'Chinese New Years Eve'
# """
# if not content:
# return
# override.update(content)
. Output only the next line. | config.set_locale_negotiator(locale_negotiator) |
Given the following code snippet before the placeholder: <|code_start|> """
event['static_url'] = lambda x: static_path(x, event['request'])
event['route_url'] = lambda name, *args, **kwargs: route_path(
name, event['request'], *args, **kwargs)
event['has_permission'] = lambda perm: has_permission(
perm, event['request'].context, event['request'])
def includeme(config):
"""
Pyramid includeme file for the :class:`pyramid.config.Configurator`
"""
ldap = False
settings = config.registry.settings
if 'pyvac.celery.yaml' in settings:
configure(settings['pyvac.celery.yaml'])
if 'pyvac.use_ldap' in settings:
ldap = asbool(settings['pyvac.use_ldap'])
if ldap:
LdapCache.configure(settings['pyvac.ldap.yaml'])
# initiatlize holiday override from yaml configuration
if 'pyvac.override_holidays_file' in settings:
filename = settings['pyvac.override_holidays_file']
content = None
if os.path.isfile(filename):
with open(filename) as fdesc:
content = yaml.load(fdesc, YAMLLoader)
<|code_end|>
, predict the next line using imports from the current file:
import os.path
import yaml
from pyramid.interfaces import IBeforeRender
from pyramid.security import has_permission
from pyramid.url import static_path, route_path
from pyramid.exceptions import Forbidden
from pyramid_jinja2 import renderer_factory
from pyramid.settings import asbool
from yaml import CSafeLoader as YAMLLoader
from yaml import SafeLoader as YAMLLoader
from logging.config import dictConfig
from logutils.dictconfig import dictConfig
from pyvac.helpers.ldap import LdapCache
from pyvac.helpers.i18n import locale_negotiator
from pyvac.helpers.holiday import init_override
from celery import current_app as default_app
from celery.app import default_app
and context including class names, function names, and sometimes code from other files:
# Path: pyvac/helpers/ldap.py
# class LdapCache(object):
# """ Ldap cache class singleton """
# _instance = None
#
# def __new__(cls, *args, **kwargs):
# if not cls._instance:
# raise RuntimeError('Ldap is not initialized')
#
# return cls._instance
#
# @classmethod
# def configure(cls, settings):
# cls._instance = cls.from_config(settings)
#
# @classmethod
# def from_config(cls, config, **kwargs):
# """
# Return a Ldap client object configured from the given configuration.
# """
# return LdapWrapper(config)
#
# Path: pyvac/helpers/i18n.py
# def locale_negotiator(request):
# """
# Locale negotiator for pyramid views.
#
# This version differs from Pyramid's :py:func:`default_locale_negotiator
# <pyramid.i18n.default_locale_negotiator>` in that it gets the locale from
# the url parameter or the cookie, and fallbacks to the user's lang.
# """
#
# login = authenticated_userid(request)
# if login:
# from pyvac.models import DBSession, User
# session = DBSession()
# user = User.by_login(session, login)
# if user.country == 'us':
# return 'en'
# if user.country == 'zh':
# return 'en'
# return user.country
#
# return None
#
# Path: pyvac/helpers/holiday.py
# def init_override(content):
# """Load a yaml file for holidays override.
#
# You can override holidays for a country and a year through
# usage of a configuration setting:
# pyvac.override_holidays_file = %(here)s/conf/holidays.yaml
#
# here is a sample:
# zh:
# 2016:
# '2016-01-01': 'New Years Day'
# '2016-02-07': 'Chinese New Years Eve'
# """
# if not content:
# return
# override.update(content)
. Output only the next line. | init_override(content) |
Continue the code snippet: <|code_start|> if ';' in message:
return message.split(';', 1)[1]
return message
def hournow(data):
now = datetime.datetime.utcnow()
return now.hour
def datenow(data):
now = datetime.datetime.utcnow()
return schedule_date(now)
def schedule_date(dt):
return dt.strftime("%d/%m")
def is_manager(user):
groupname = 'manager'
for g in user.groups:
if groupname == g.name:
return True
return False
def extract_cn(user_dn):
""" Get cn from a user dn """
try:
<|code_end|>
. Use current file imports:
import json
import datetime
import datetime
from .ldap import dn
from datetime import timedelta
from pyramid.httpexceptions import HTTPNotFound
and context (classes, functions, or code) from other files:
# Path: pyvac/helpers/ldap.py
# class UnknownLdapUser(Exception):
# class LdapWrapper(object):
# class LdapCache(object):
# def __init__(self, filename):
# def _bind(self, dn, password):
# def _search(self, what, retrieve):
# def _search_admin(self, what, retrieve):
# def _search_team(self, what, retrieve):
# def _search_chapter(self, what, retrieve):
# def _search_by_item(self, item):
# def search_user_by_login(self, login):
# def search_user_by_dn(self, user_dn):
# def _extract_country(self, user_dn):
# def _extract_cn(self, user_dn):
# def _convert_date(self, date):
# def _cast_arrivaldate(self, date):
# def parse_ldap_entry(self, user_dn, entry):
# def authenticate(self, login, password):
# def add_user(self, user, password, unit=None, uid=None):
# def update_user(self, user, password=None, unit=None, arrival_date=None,
# uid=None, photo=None, mobile=None):
# def delete_user(self, user_dn):
# def update_team(self, team, members):
# def update_managers(self, old, new):
# def add_manager(self, user_dn):
# def remove_manager(self, user_dn):
# def update_admins(self, old, new):
# def add_admin(self, user_dn):
# def remove_admin(self, user_dn):
# def get_hr_by_country(self, country, full=False):
# def list_users(self):
# def list_ou(self):
# def list_teams(self):
# def list_chapters(self):
# def list_manager(self):
# def list_admin(self):
# def list_arrivals_country(self, country):
# def list_active_users(self):
# def get_users_units(self):
# def get_team_members(self, team):
# def create_team(self, team):
# def delete_team(self, team):
# def __new__(cls, *args, **kwargs):
# def configure(cls, settings):
# def from_config(cls, config, **kwargs):
# def hashPassword(password):
# def randomstring(length=8):
. Output only the next line. | for rdn in dn.str2dn(user_dn): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
def mock_pool(amount, date_start, date_end):
mocked_pool = MagicMock()
type(mocked_pool).amount = PropertyMock(return_value=amount)
type(mocked_pool).date_start = PropertyMock(return_value=date_start) # noqa
type(mocked_pool).date_end = PropertyMock(return_value=date_end) # noqa
return mocked_pool
<|code_end|>
, determine the next line of code. You have imports:
from datetime import datetime
from freezegun import freeze_time
from pyvac.tests import case
from pyvac.tests.mocks.tasks import DummyTasks
from pyvac.tests.mocks.celery import subtask
from mock import patch, MagicMock, PropertyMock
from dateutil.relativedelta import relativedelta
from pyvac.models import Request
from pyvac.views.request import List
from pyvac.models import Request
from pyvac.views.request import List
from pyvac.models import Request
from pyvac.views.request import List
from pyvac.models import Request
from pyvac.views.request import List
from pyvac.models import Request
from pyvac.views.request import Accept
from pyvac.models import Request
from pyvac.views.request import Accept
from pyvac.models import Request
from pyvac.views.request import Refuse
from pyvac.models import Request
from pyvac.views.request import Refuse
from pyvac.models import Request
from pyvac.views.request import Refuse
from pyvac.models import Request
from pyvac.views.request import Cancel
from pyvac.models import Request
from pyvac.views.request import Cancel
from pyvac.models import Request
from pyvac.views.request import Cancel
from pyvac.models import Request
from pyvac.views.request import Cancel
from pyvac.views.request import Export
from pyvac.views.request import Exported
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request, User, CPVacation
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request, User
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
from pyvac.models import Request
from pyvac.views.request import Send
and context (class names, function names, or code) available:
# Path: pyvac/tests/case.py
# class ModelTestCase(TestCase):
# class DummyRoute(object):
# class DummyRequest(testing.DummyRequest):
# class UnauthenticatedViewTestCase(TestCase):
# class ViewTestCase(UnauthenticatedViewTestCase):
# class ViewAdminTestCase(ViewTestCase):
# def setUp(self):
# def tearDown(self):
# def auto_translate(string, country):
# def __init__(self, methodName='runTest'):
# def setUp(self):
# def tearDown(self):
# def create_request(self, params=None, environ=None, matchdict=None,
# headers=None, path='/', cookies=None, post=None, **kw):
# def assertIsRedirect(self, view):
# def setUp(self):
# def set_userid(self, userid='admin', permissive=False):
# def setUp(self):
. Output only the next line. | class RequestTestCase(case.ViewTestCase): |
Given snippet: <|code_start|>
def mock_pool(amount, date_start, date_end):
mocked_pool = MagicMock()
type(mocked_pool).amount = PropertyMock(return_value=amount)
type(mocked_pool).date_start = PropertyMock(return_value=date_start) # noqa
type(mocked_pool).date_end = PropertyMock(return_value=date_end) # noqa
return mocked_pool
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from datetime import datetime
from freezegun import freeze_time
from dateutil.relativedelta import relativedelta
from mock import patch, PropertyMock, MagicMock
from .case import ModelTestCase
from pyvac.models import Group
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User
from pyvac.models import User, Request
from pyvac.models import User, Request
from pyvac.models import User, Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import Request
from pyvac.models import User, VacationType
from pyvac.models import User, VacationType
from pyvac.models import User, VacationType
from pyvac.models import User, VacationType
from pyvac.models import VacationType
from pyvac.models import VacationType
from pyvac.models import VacationType
from pyvac.models import User
from pyvac.models import CPLUVacation, CompensatoireVacation, User
from pyvac.models import CPVacation, User
from pyvac.models import Sudoer
from pyvac.models import Sudoer
from pyvac.models import User
from pyvac.models import Sudoer
from pyvac.models import User
and context:
# Path: pyvac/tests/case.py
# class ModelTestCase(TestCase):
#
# def setUp(self):
# transaction.begin()
# self.session = DBSession()
#
# def tearDown(self):
# transaction.commit()
which might include code, classes, or functions. Output only the next line. | class GroupTestCase(ModelTestCase): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
log = logging.getLogger(__name__)
class List(View):
"""
List holiday for given year
"""
def render(self):
year = int(self.request.params.get('year'))
<|code_end|>
with the help of current file imports:
import logging
from .base import View
from pyvac.helpers.holiday import get_holiday
and context from other files:
# Path: pyvac/views/base.py
# class View(ViewBase):
# """
# Base class of every views.
# """
#
# def update_response(self, response):
# # this is a view to render
# if isinstance(response, dict):
# global_ = {
# 'pyvac': {
# 'version': __version__,
# 'login': self.login,
# 'user': self.user,
# }
# }
#
# if self.user:
# # if logged, retrieve total requests count for header
# req_list = {'requests': []}
# requests = []
# if self.user.is_admin:
# country = self.user.country
# requests = Request.all_for_admin_per_country(self.session,
# country)
# elif self.user.is_super:
# requests = Request.by_manager(self.session, self.user)
#
# req_list['requests'] = requests
#
# # always add our requests
# for req in Request.by_user(self.session, self.user):
# if req not in req_list['requests']:
# req_list['requests'].append(req)
#
# # only count next requests
# today = datetime.now()
# if self.user.is_admin:
# # for admin, display request from 1st of month
# today = today.replace(day=1)
# requests_count = len([req for req in req_list['requests']
# if req.date_to >= today])
#
# global_['pyvac']['requests_count'] = requests_count
#
# # retrieve available users for sudo
# sudoers = Sudoer.alias(self.session, self.user)
# if sudoers:
# sudoers.append(self.user)
# global_['pyvac']['sudoers'] = sudoers
#
# response.update(global_)
#
# Path: pyvac/helpers/holiday.py
# def get_holiday(user, year=None, use_datetime=False):
# """ return holidays for user country
#
# format is unixtime for javascript
# """
# klass = conv_table[user.country]
#
# cal = klass()
# current_year = year or datetime.now().year
# next_year = current_year + 1
#
# # retrieve Dates from workalendar
# holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)]
# holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)]
#
# if user.country in override and current_year in override[user.country]:
# holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d')
# for dt in
# override[user.country][current_year]]
#
# if user.country in override and next_year in override[user.country]:
# holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d')
# for dt in
# override[user.country][next_year]]
#
# if not use_datetime:
# # must cast to javascript timestamp
# holiday_current = [utcify(dt) for dt in holiday_current_raw]
# holiday_next = [utcify(dt) for dt in holiday_next_raw]
# else:
# # must cast to datetime as workalendar returns only Date objects
# holiday_current = [datetime(dt.year, dt.month, dt.day)
# for dt in holiday_current_raw]
# holiday_next = [datetime(dt.year, dt.month, dt.day)
# for dt in holiday_next_raw]
#
# return holiday_current + holiday_next
, which may contain function names, class names, or code. Output only the next line. | holidays = get_holiday(self.user, year) |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/python
#-*- coding: utf-8 -*-
try:
default_app = Celery()
except ImportError: # pragma: no cover
try:
except ImportError: # pragma: no cover
class CommandMixin(object):
preload_options = []
def setup_app_from_commandline(self, argv):
if len(argv) < 2:
print('No configuration file specified.', file=sys.stderr)
sys.exit(1)
<|code_end|>
with the help of current file imports:
import sys
from celery import Celery
from celery.app import default_app
from celery.bin.worker import worker as BaseWorkerCommand # noqa
from celery.bin.celeryd import WorkerCommand as BaseWorkerCommand
from pyvac.config import configure
from celery.concurrency.processes.forking import freeze_support
and context from other files:
# Path: pyvac/config.py
# def configure(filename='conf/pyvac.yaml', init_celery=True, default_app=None):
# with open(filename) as fdesc:
# conf = yaml.load(fdesc, YAMLLoader)
# if conf.get('logging'):
# dictConfig(conf.get('logging'))
#
# if init_celery:
# if not default_app:
# try:
# from celery import current_app as default_app
# except ImportError: # pragma: no cover
# from celery.app import default_app
#
# default_app.config_from_object(conf.get('celeryconfig'))
# # XXX: must call loader for celery to register all the tasks
# default_app.loader.import_default_modules()
#
# return conf
, which may contain function names, class names, or code. Output only the next line. | configure(sys.argv[1], default_app=default_app) |
Given snippet: <|code_start|>from __future__ import print_function, division
__all__ = ['Extinction']
class Extinction(object):
def __init__(self):
self.wav = None
self.chi = None
@property
def wav(self):
return self._wav
@wav.setter
def wav(self, value):
if value is None:
self._wav = None
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
from astropy import units as u
from astropy.table import Table
from ..utils.validator import validate_array
and context:
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
which might include code, classes, or functions. Output only the next line. | self._wav = validate_array('wav', value, ndim=1, |
Given snippet: <|code_start|> sed : SED
The SED, scaled to the new distance
"""
sed = self.copy()
sed.distance = distance * u.cm
sed.flux = sed.flux * (self.distance.to(u.cm) / sed.distance) ** 2
sed.error = sed.error * (self.distance.to(u.cm) / sed.distance) ** 2
return sed
def scale_to_av(self, av, law):
sed = self.copy()
sed.flux = sed.flux * 10. ** (av * law(sed.wav))
sed.error = sed.error * 10. ** (av * law(sed.wav))
return sed
@property
def wav(self):
"""
The wavelengths at which the SED is defined
"""
if self._wav is None and self._nu is not None:
return self._nu.to(u.micron, equivalencies=u.spectral())
else:
return self._wav
@wav.setter
def wav(self, value):
if value is None:
self._wav = None
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import numpy as np
from astropy import log
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import interp1d
from astropy import units as u
from ..utils.validator import validate_array
from .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux
from copy import deepcopy
and context:
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
#
# def convert_flux(nu, flux, target_unit, distance=None):
# """
# Convert flux to a target unit
# """
#
# curr_unit = flux.unit
#
# if curr_unit.is_equivalent(u.erg / u.s):
# flux = flux / distance ** 2
# elif curr_unit.is_equivalent(u.Jy):
# flux = flux * nu
# elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert {0} to ergs/cm^2/s" % (flux.unit))
#
# # Convert to requested unit
#
# if target_unit.is_equivalent(u.erg / u.s):
# flux = flux * distance ** 2
# elif target_unit.is_equivalent(u.Jy):
# flux = flux / nu
# elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert %s to %s" % (curr_unit, target_unit))
#
# return flux.to(target_unit)
which might include code, classes, or functions. Output only the next line. | self._wav = validate_array('wav', value, domain='positive', ndim=1, |
Given snippet: <|code_start|> unit_freq: `~astropy.units.Unit`, optional
The units to convert the frequency to.
unit_flux: `~astropy.units.Unit`, optional
The units to convert the flux to.
order: str, optional
Whether to sort the SED by increasing wavelength (`wav`) or
frequency ('nu').
"""
# Instantiate SED class
sed = cls()
# Assume that the filename may be missing the .gz extension
if not os.path.exists(filename) and os.path.exists(filename + '.gz'):
filename += ".gz"
# Open FILE file
hdulist = fits.open(filename, memmap=False)
# Extract model name
sed.name = hdulist[0].header['MODEL']
# Check if distance is specified in header, otherwise assume 1kpc
if 'DISTANCE' in hdulist[0].header:
sed.distance = hdulist[0].header['DISTANCE'] * u.cm
else:
log.debug("No distance found in SED file, assuming 1kpc")
sed.distance = 1. * u.kpc
# Extract SED values
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import numpy as np
from astropy import log
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import interp1d
from astropy import units as u
from ..utils.validator import validate_array
from .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux
from copy import deepcopy
and context:
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
#
# def convert_flux(nu, flux, target_unit, distance=None):
# """
# Convert flux to a target unit
# """
#
# curr_unit = flux.unit
#
# if curr_unit.is_equivalent(u.erg / u.s):
# flux = flux / distance ** 2
# elif curr_unit.is_equivalent(u.Jy):
# flux = flux * nu
# elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert {0} to ergs/cm^2/s" % (flux.unit))
#
# # Convert to requested unit
#
# if target_unit.is_equivalent(u.erg / u.s):
# flux = flux * distance ** 2
# elif target_unit.is_equivalent(u.Jy):
# flux = flux / nu
# elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert %s to %s" % (curr_unit, target_unit))
#
# return flux.to(target_unit)
which might include code, classes, or functions. Output only the next line. | wav = hdulist[1].data.field('WAVELENGTH') * parse_unit_safe(hdulist[1].columns[0].unit) |
Next line prediction: <|code_start|>
__all__ = ['SED']
class SED(object):
def __init__(self):
# Metadata
self.name = None
self.distance = None
# Spectral info
self.wav = None
self.nu = None
# Apertures
self.apertures = None
# Fluxes
self.flux = None
self.error = None
def __eq__(self, other):
try:
assert self.name == other.name
<|code_end|>
. Use current file imports:
(import os
import numpy as np
from astropy import log
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import interp1d
from astropy import units as u
from ..utils.validator import validate_array
from .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux
from copy import deepcopy)
and context including class names, function names, or small code snippets from other files:
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
#
# def convert_flux(nu, flux, target_unit, distance=None):
# """
# Convert flux to a target unit
# """
#
# curr_unit = flux.unit
#
# if curr_unit.is_equivalent(u.erg / u.s):
# flux = flux / distance ** 2
# elif curr_unit.is_equivalent(u.Jy):
# flux = flux * nu
# elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert {0} to ergs/cm^2/s" % (flux.unit))
#
# # Convert to requested unit
#
# if target_unit.is_equivalent(u.erg / u.s):
# flux = flux * distance ** 2
# elif target_unit.is_equivalent(u.Jy):
# flux = flux / nu
# elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert %s to %s" % (curr_unit, target_unit))
#
# return flux.to(target_unit)
. Output only the next line. | assert_allclose_quantity(self.distance, other.distance) |
Continue the code snippet: <|code_start|> filename += ".gz"
# Open FILE file
hdulist = fits.open(filename, memmap=False)
# Extract model name
sed.name = hdulist[0].header['MODEL']
# Check if distance is specified in header, otherwise assume 1kpc
if 'DISTANCE' in hdulist[0].header:
sed.distance = hdulist[0].header['DISTANCE'] * u.cm
else:
log.debug("No distance found in SED file, assuming 1kpc")
sed.distance = 1. * u.kpc
# Extract SED values
wav = hdulist[1].data.field('WAVELENGTH') * parse_unit_safe(hdulist[1].columns[0].unit)
nu = hdulist[1].data.field('FREQUENCY') * parse_unit_safe(hdulist[1].columns[1].unit)
ap = hdulist[2].data.field('APERTURE') * parse_unit_safe(hdulist[2].columns[0].unit)
flux = hdulist[3].data.field('TOTAL_FLUX') * parse_unit_safe(hdulist[3].columns[0].unit)
error = hdulist[3].data.field('TOTAL_FLUX_ERR') * parse_unit_safe(hdulist[3].columns[1].unit)
# Set SED attributes
sed.apertures = ap
# Convert wavelength and frequencies to requested units
sed.wav = wav.to(unit_wav)
sed.nu = nu.to(unit_freq)
# Set fluxes
<|code_end|>
. Use current file imports:
import os
import numpy as np
from astropy import log
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import interp1d
from astropy import units as u
from ..utils.validator import validate_array
from .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux
from copy import deepcopy
and context (classes, functions, or code) from other files:
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
#
# def convert_flux(nu, flux, target_unit, distance=None):
# """
# Convert flux to a target unit
# """
#
# curr_unit = flux.unit
#
# if curr_unit.is_equivalent(u.erg / u.s):
# flux = flux / distance ** 2
# elif curr_unit.is_equivalent(u.Jy):
# flux = flux * nu
# elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert {0} to ergs/cm^2/s" % (flux.unit))
#
# # Convert to requested unit
#
# if target_unit.is_equivalent(u.erg / u.s):
# flux = flux * distance ** 2
# elif target_unit.is_equivalent(u.Jy):
# flux = flux / nu
# elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):
# raise Exception("Don't know how to convert %s to %s" % (curr_unit, target_unit))
#
# return flux.to(target_unit)
. Output only the next line. | sed.flux = convert_flux(nu, flux, unit_flux, distance=sed.distance) |
Given the code snippet: <|code_start|>from __future__ import print_function, division
__all__ = ['extract_parameters']
def extract_parameters(input=None, output_prefix=None, output_suffix=None,
parameters='all', select_format=("N", 1), header=True):
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
from .fit_info import FitInfoFile
from .models import load_parameter_table
and context (functions, classes, or occasionally code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
. Output only the next line. | fin = FitInfoFile(input, 'r') |
Continue the code snippet: <|code_start|>from __future__ import print_function, division
__all__ = ['extract_parameters']
def extract_parameters(input=None, output_prefix=None, output_suffix=None,
parameters='all', select_format=("N", 1), header=True):
fin = FitInfoFile(input, 'r')
# Read in table of parameters for model grid
<|code_end|>
. Use current file imports:
import numpy as np
from .fit_info import FitInfoFile
from .models import load_parameter_table
and context (classes, functions, or code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
. Output only the next line. | t = load_parameter_table(fin.meta.model_dir) |
Using the snippet: <|code_start|> if model_names is None:
raise ValueError("model_names is required when using initialize_arrays=True")
if flux is None:
self.flux = np.zeros((self.n_models, self.n_ap)) * initialize_units
else:
self.flux = flux
if error is None:
self.error = np.zeros((self.n_models, self.n_ap)) * initialize_units
else:
self.error = error
else:
self.flux = flux
self.error = error
@property
def central_wavelength(self):
"""
The central or characteristic wavelength of the filter
"""
return self._wavelength
@central_wavelength.setter
def central_wavelength(self, value):
if value is None:
self._wavelength = None
else:
<|code_end|>
, determine the next line of code. You have imports:
import numpy as np
from scipy.interpolate import interp1d
from astropy.logger import log
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from ..utils.io import read_table
from ..utils.misc import order_to_match
and context (class names, function names, or code) available:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/utils/io.py
# def read_table(file_name):
# try:
# return Table.read(file_name, format='fits', character_as_bytes=False)
# except TypeError: # older versions of Astropy
# return Table.read(file_name, format='fits')
. Output only the next line. | self._wavelength = validate_scalar('central_wavelength', value, domain='strictly-positive', physical_type='length') |
Using the snippet: <|code_start|>
self.flux = flux
self.error = error
@property
def central_wavelength(self):
"""
The central or characteristic wavelength of the filter
"""
return self._wavelength
@central_wavelength.setter
def central_wavelength(self, value):
if value is None:
self._wavelength = None
else:
self._wavelength = validate_scalar('central_wavelength', value, domain='strictly-positive', physical_type='length')
@property
def model_names(self):
"""
The names of the models
"""
return self._model_names
@model_names.setter
def model_names(self, value):
if value is None:
self._model_names = value
else:
<|code_end|>
, determine the next line of code. You have imports:
import numpy as np
from scipy.interpolate import interp1d
from astropy.logger import log
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from ..utils.io import read_table
from ..utils.misc import order_to_match
and context (class names, function names, or code) available:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/utils/io.py
# def read_table(file_name):
# try:
# return Table.read(file_name, format='fits', character_as_bytes=False)
# except TypeError: # older versions of Astropy
# return Table.read(file_name, format='fits')
. Output only the next line. | self._model_names = validate_array('model_names', value, ndim=1) |
Continue the code snippet: <|code_start|> self.model_names = self.model_names[order]
self.flux = self.flux[order, :]
self.error = self.error[order, :]
@classmethod
def read(cls, filename):
"""
Read convolved flux from a FITS file
Parameters
----------
filename : str
The name of the FITS file to read the convolved fluxes from
"""
conv = cls()
# Open the convolved flux FITS file
convolved = fits.open(filename)
keywords = convolved[0].header
# Try and read in the wavelength of the filter
if 'FILTWAV' in keywords:
conv.central_wavelength = keywords['FILTWAV'] * u.micron
else:
conv.central_wavelength = None
# Read in apertures, if present
try:
<|code_end|>
. Use current file imports:
import numpy as np
from scipy.interpolate import interp1d
from astropy.logger import log
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from ..utils.io import read_table
from ..utils.misc import order_to_match
and context (classes, functions, or code) from other files:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/utils/io.py
# def read_table(file_name):
# try:
# return Table.read(file_name, format='fits', character_as_bytes=False)
# except TypeError: # older versions of Astropy
# return Table.read(file_name, format='fits')
. Output only the next line. | ta = read_table(convolved['APERTURES']) |
Predict the next line after this snippet: <|code_start|>
ROOT = os.path.dirname(__file__)
@pytest.mark.parametrize('format', ('par', 'conf'))
def test_parfile(format):
<|code_end|>
using the current file's imports:
import os
import pytest
from ..parfile import read
and any relevant context from other files:
# Path: sedfitter/utils/parfile.py
# def read(filename, format):
# """
# Read a parameter file and return a dictionary
# """
#
# parameters = {}
#
# for line in open(filename):
# if '=' in line and line[0] != "#" and line.strip() != "":
# cols = line.split('=')
# if format == 'par':
# value, key = cols[0].strip(), cols[1].strip()
# elif format == 'conf':
# key, value = cols[0].strip(), cols[1].strip()
# else:
# raise Exception("Format should be par or conf")
# try:
# value = int(value)
# except:
# try:
# value = float(value)
# except:
# if value.lower() in ['y', 'yes']:
# value = True
# elif value.lower() in ['n', 'no']:
# value = False
# parameters[key] = value
#
# return parameters
. Output only the next line. | p = read(os.path.join(ROOT, 'data', 'test.' + format), format) |
Predict the next line after this snippet: <|code_start|> log_x : bool, optional
Whether to plot the x-axis values in log space
log_y : bool, optional
Whether to plot the y-axis values in log space
label_x : str, optional
The x-axis label (if not specified, the parameter name is used)
label_y : str, optional
The y-axis label (if not specified, the parameter name is used)
additional : Table
A dictionary specifying additional parameters not listed in the
parameter list for the models. Each item of the dictionary should
itself be a dictionary giving the values for each model (where the key
is the model name).
plot_name: bool, optional
Whether to show the source name on the plot(s).
format: str, optional
The file format to use for the plot, if output_dir is specified.
dpi : int, optional
The resolution of the figure to save
"""
npix = 1024
if output_dir is None:
raise ValueError("No output directory has been specified")
# Create output directory
io.create_dir(output_dir)
# Open input file
<|code_end|>
using the current file's imports:
from copy import deepcopy
from astropy.table import join
from astropy.convolution import Tophat2DKernel
from scipy.ndimage import convolve
from matplotlib.font_manager import FontProperties
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and any relevant context from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | fin = FitInfoFile(input_fits, 'r') |
Continue the code snippet: <|code_start|> Whether to plot the y-axis values in log space
label_x : str, optional
The x-axis label (if not specified, the parameter name is used)
label_y : str, optional
The y-axis label (if not specified, the parameter name is used)
additional : Table
A dictionary specifying additional parameters not listed in the
parameter list for the models. Each item of the dictionary should
itself be a dictionary giving the values for each model (where the key
is the model name).
plot_name: bool, optional
Whether to show the source name on the plot(s).
format: str, optional
The file format to use for the plot, if output_dir is specified.
dpi : int, optional
The resolution of the figure to save
"""
npix = 1024
if output_dir is None:
raise ValueError("No output directory has been specified")
# Create output directory
io.create_dir(output_dir)
# Open input file
fin = FitInfoFile(input_fits, 'r')
# Read in table of parameters for model grid
<|code_end|>
. Use current file imports:
from copy import deepcopy
from astropy.table import join
from astropy.convolution import Tophat2DKernel
from scipy.ndimage import convolve
from matplotlib.font_manager import FontProperties
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context (classes, functions, or code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | t = load_parameter_table(fin.meta.model_dir) |
Continue the code snippet: <|code_start|> select_format : tuple, optional
Tuple specifying which fits should be plotted. See the documentation
for a description of the tuple syntax.
log_x : bool, optional
Whether to plot the x-axis values in log space
log_y : bool, optional
Whether to plot the y-axis values in log space
label_x : str, optional
The x-axis label (if not specified, the parameter name is used)
label_y : str, optional
The y-axis label (if not specified, the parameter name is used)
additional : Table
A dictionary specifying additional parameters not listed in the
parameter list for the models. Each item of the dictionary should
itself be a dictionary giving the values for each model (where the key
is the model name).
plot_name: bool, optional
Whether to show the source name on the plot(s).
format: str, optional
The file format to use for the plot, if output_dir is specified.
dpi : int, optional
The resolution of the figure to save
"""
npix = 1024
if output_dir is None:
raise ValueError("No output directory has been specified")
# Create output directory
<|code_end|>
. Use current file imports:
from copy import deepcopy
from astropy.table import join
from astropy.convolution import Tophat2DKernel
from scipy.ndimage import convolve
from matplotlib.font_manager import FontProperties
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context (classes, functions, or code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | io.create_dir(output_dir) |
Given snippet: <|code_start|> [ymin, ymax]])
elif log_y:
gray_all, ex, ey = np.histogram2d(tpos[parameter_x],
np.log10(tpos[parameter_y]), bins=npix,
range=[[xmin, xmax],
[np.log10(ymin), np.log10(ymax)]])
else:
gray_all, ex, ey = np.histogram2d(tpos[parameter_x],
tpos[parameter_y], bins=npix,
range=[[xmin, xmax],
[ymin, ymax]])
gray_all = convolve(gray_all, KERNEL)
gray_all = np.clip(gray_all, 0., 13.)
# Grayscale showing all models. Since pcolormesh is very slow for PDF, we
# create a 'ghost' axis which is already in log space.
ax_log = get_axes(fig, label='log', zorder=-100)
ax_log.axis('off')
ax_log.imshow(gray_all.transpose(), cmap='binary', vmin=0, vmax=40.,
extent=[ex[0], ex[-1], ey[0], ey[-1]], aspect='auto',
zorder=-100, origin='lower')
ax.patch.set_facecolor('none')
ax.set_xlabel(parameter_x if label_x is None else label_x)
ax.set_ylabel(parameter_y if label_y is None else label_y)
if log_x:
ax.set_xscale('log')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from copy import deepcopy
from astropy.table import join
from astropy.convolution import Tophat2DKernel
from scipy.ndimage import convolve
from matplotlib.font_manager import FontProperties
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
which might include code, classes, or functions. Output only the next line. | ax.xaxis.set_major_formatter(LogFormatterMathtextAuto()) |
Predict the next line after this snippet: <|code_start|> ax_log.set_xlim(xmin, xmax)
if log_y:
ax_log.set_ylim(np.log10(ymin), np.log10(ymax))
else:
ax_log.set_ylim(ymin, ymax)
ax.set_autoscale_on(False)
ax_log.set_autoscale_on(False)
pfits = None
source_label = None
for info in fin:
# Remove previous histogram
if pfits is not None:
pfits.remove()
if source_label is not None:
source_label.remove()
# Filter fits
info.keep(select_format)
# Get filtered and sorted table of parameters
tsorted = info.filter_table(t)
pfits = ax.scatter(tsorted[parameter_x], tsorted[parameter_y], c='black', s=10)
if plot_name:
<|code_end|>
using the current file's imports:
from copy import deepcopy
from astropy.table import join
from astropy.convolution import Tophat2DKernel
from scipy.ndimage import convolve
from matplotlib.font_manager import FontProperties
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and any relevant context from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | source_label = ax.text(0.5, 0.95, tex_friendly(info.source.name), |
Given snippet: <|code_start|> for a description of the tuple syntax.
log_x : bool, optional
Whether to plot the x-axis values in log space
log_y : bool, optional
Whether to plot the y-axis values in log space
label : str, optional
The x-axis label (if not specified, the parameter name is used)
bins : int, optional
The number of bins for the histogram
hist_range : tuple, optional
The range of values to show
additional : dict, optional
A dictionary specifying additional parameters not listed in the
parameter list for the models. Each item of the dictionary should
itself be a dictionary giving the values for each model (where the key
is the model name).
plot_name: bool, optional
Whether to show the source name on the plot(s).
format: str, optional
The file format to use for the plot, if output_dir is specified.
dpi : int, optional
The resolution of the figure to save
"""
if output_dir is None:
raise ValueError("No output directory has been specified")
# Create output directory
io.create_dir(output_dir)
# Open input file
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from copy import deepcopy
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Polygon
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
which might include code, classes, or functions. Output only the next line. | fin = FitInfoFile(input_fits, 'r') |
Next line prediction: <|code_start|> log_y : bool, optional
Whether to plot the y-axis values in log space
label : str, optional
The x-axis label (if not specified, the parameter name is used)
bins : int, optional
The number of bins for the histogram
hist_range : tuple, optional
The range of values to show
additional : dict, optional
A dictionary specifying additional parameters not listed in the
parameter list for the models. Each item of the dictionary should
itself be a dictionary giving the values for each model (where the key
is the model name).
plot_name: bool, optional
Whether to show the source name on the plot(s).
format: str, optional
The file format to use for the plot, if output_dir is specified.
dpi : int, optional
The resolution of the figure to save
"""
if output_dir is None:
raise ValueError("No output directory has been specified")
# Create output directory
io.create_dir(output_dir)
# Open input file
fin = FitInfoFile(input_fits, 'r')
# Read in table of parameters for model grid
<|code_end|>
. Use current file imports:
(from copy import deepcopy
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Polygon
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt)
and context including class names, function names, or small code snippets from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | t = load_parameter_table(fin.meta.model_dir) |
Given the code snippet: <|code_start|> If specified, plots are written to that directory
select_format : tuple, optional
Tuple specifying which fits should be plotted. See the documentation
for a description of the tuple syntax.
log_x : bool, optional
Whether to plot the x-axis values in log space
log_y : bool, optional
Whether to plot the y-axis values in log space
label : str, optional
The x-axis label (if not specified, the parameter name is used)
bins : int, optional
The number of bins for the histogram
hist_range : tuple, optional
The range of values to show
additional : dict, optional
A dictionary specifying additional parameters not listed in the
parameter list for the models. Each item of the dictionary should
itself be a dictionary giving the values for each model (where the key
is the model name).
plot_name: bool, optional
Whether to show the source name on the plot(s).
format: str, optional
The file format to use for the plot, if output_dir is specified.
dpi : int, optional
The resolution of the figure to save
"""
if output_dir is None:
raise ValueError("No output directory has been specified")
# Create output directory
<|code_end|>
, generate the next line using the imports in this file:
from copy import deepcopy
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Polygon
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context (functions, classes, or occasionally code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | io.create_dir(output_dir) |
Predict the next line for this snippet: <|code_start|> # Find range of values
if hist_range is None:
pmin, pmax = tpos[parameter].min(), tpos[parameter].max()
else:
pmin, pmax = hist_range
# Compute histogram
if log_x:
hist_all, edges = np.histogram(np.log10(tpos[parameter]), bins=bins, range=[np.log10(pmin), np.log10(pmax)])
center = (edges[1:] + edges[:-1]) / 2.
edges, center = 10. ** edges, 10. ** center
else:
hist_all, edges = np.histogram(t[parameter], bins=bins, range=[pmin, pmax])
center = (edges[1:] + edges[:-1]) / 2.
# Grayscale showing all models
p = []
for i in range(len(hist_all)):
p.append((edges[i], max(hist_all[i], 0.01)))
p.append((edges[i + 1], max(hist_all[i], 0.01)))
p.append((edges[-1], 0.01))
p.append((edges[0], 0.01))
p = Polygon(p, facecolor='0.8', edgecolor='none')
ax.add_patch(p)
ax.set_xlabel(parameter if label is None else label)
if log_x:
ax.set_xscale('log')
<|code_end|>
with the help of current file imports:
from copy import deepcopy
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Polygon
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
, which may contain function names, class names, or code. Output only the next line. | ax.xaxis.set_major_formatter(LogFormatterMathtextAuto()) |
Based on the snippet: <|code_start|>
# Remove previous histogram
if pfits is not None:
pfits.remove()
if source_label is not None:
source_label.remove()
# Filter fits
info.keep(select_format)
# Get filtered and sorted table of parameters
tsorted = info.filter_table(t, additional=additional)
# Compute histogram
if log_x:
hist, _ = np.histogram(np.log10(tsorted[parameter]), bins=bins, range=[np.log10(pmin), np.log10(pmax)])
else:
hist, _ = np.histogram(tsorted[parameter], bins=bins, range=[pmin, pmax])
# Histogram showing values for good-fitting models
p = []
for i in range(len(hist)):
p.append((edges[i], max(hist[i], 0.01)))
p.append((edges[i + 1], max(hist[i], 0.01)))
p.append((edges[-1], 0.01))
p.append((edges[0], 0.01))
pfits = Polygon(p, hatch='/', edgecolor='black', facecolor='none')
ax.add_patch(pfits)
if plot_name:
<|code_end|>
, predict the immediate next line with the help of imports:
from copy import deepcopy
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Polygon
from .fit_info import FitInfoFile
from .models import load_parameter_table
from .utils import io
from .utils.formatter import LogFormatterMathtextAuto
from .plot_helpers import tex_friendly
import numpy as np
import matplotlib.pyplot as plt
and context (classes, functions, sometimes code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
#
# Path: sedfitter/utils/io.py
# def create_dir(dir_name):
# def delete_dir(dir_name):
# def delete_file(file_name):
# def read_table(file_name):
#
# Path: sedfitter/utils/formatter.py
# class LogFormatterMathtextAuto(LogFormatterMathtext):
#
# def __call__(self, x, pos=None):
# if x in [0.001, 0.01, 0.1]:
# return str(x)
# elif x in [1., 10., 100., 1000.]:
# return str(int(x))
# else:
# return LogFormatterMathtext.__call__(self, x, pos=pos)
#
# Path: sedfitter/plot_helpers.py
# def tex_friendly(string):
# if plt.rcParams['text.usetex']:
# return string.replace('_', '\_').replace('%', '\%')
# else:
# return string
. Output only the next line. | source_label = ax.text(0.5, 0.95, tex_friendly(info.source.name), |
Given the following code snippet before the placeholder: <|code_start|>
def integrate_subset(x, y, xmin, xmax):
"""
Perform trapezium integration of a set of points (x,y) between bounds xmin
and xmax. The interpolation between the points is done in linear space, so
this is designed for functions that are piecewise linear in linear space.
"""
# Swap arrays if necessary
if x[-1] < x[0]:
x = x[::-1]
y = y[::-1]
# Swap limits if necessary
if xmin > xmax:
xmin, xmax = xmax, xmin
elif xmin == xmax:
return 0.
# Find the subset of points to use and the value of the function at the
# end-points of the integration
if xmin == x[0]:
i1 = 1
ymin = y[0]
else:
i1 = np.searchsorted(x, xmin)
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
from .interpolate import interp1d_fast
and context including class names, function names, and sometimes code from other files:
# Path: sedfitter/utils/interpolate.py
# @check_bounds
# def interp1d_fast(x, y, xval):
# """On-the-fly linear interpolator"""
# if len(x) != len(y):
# raise Exception("x and y should have the same length")
# ipos = np.searchsorted(x, xval)
# return (xval - x[ipos - 1]) \
# / (x[ipos] - x[ipos - 1]) \
# * (y[ipos] - y[ipos - 1]) \
# + y[ipos - 1]
. Output only the next line. | ymin = interp1d_fast(x[i1 - 1:i1 + 1], y[i1 - 1:i1 + 1], xmin) |
Next line prediction: <|code_start|>
def test_roundrip(tmpdir):
n_models = 30
n_ap = 3
n_wav = 10
<|code_end|>
. Use current file imports:
(import numpy as np
from astropy import units as u
from ..cube import SEDCube)
and context including class names, function names, or small code snippets from other files:
# Path: sedfitter/sed/cube.py
# class SEDCube(BaseCube):
# _physical_type = ('power', 'flux', 'spectral flux density')
#
# def get_sed(self, model_name):
#
# try:
# sed_index = np.nonzero(self.names == model_name)[0][0]
# except IndexError:
# raise ValueError("Model '{0}' not found in SED cube".format(model_name))
#
# from .sed import SED
# sed = SED()
# sed.name = model_name
# sed.distance = self.distance
# sed.wav = self.wav
# sed.nu = self.nu
# sed.apertures = self.apertures
# sed.flux = self.val[sed_index, :,:]
# sed.error = self.unc[sed_index, :,:]
# return sed
. Output only the next line. | s = SEDCube() |
Continue the code snippet: <|code_start|> shape=None if self.wav is None else (len(self.wav),),
physical_type='frequency')
@property
def apertures(self):
"""
The ap at which the SEDs are defined.
"""
return self._apertures
@apertures.setter
def apertures(self, value):
if value is None:
self._apertures = None
else:
self._apertures = validate_array('apertures', value, domain='positive',
ndim=1, physical_type='length')
@property
def distance(self):
"""
The distance at which the SEDs are defined.
"""
return self._distance
@distance.setter
def distance(self, value):
if value is None:
self._distance = None
else:
<|code_end|>
. Use current file imports:
import abc
import numpy as np
import six
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
from .sed import SED
and context (classes, functions, or code) from other files:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def table_to_hdu(table):
# hdu = fits.BinTableHDU(np.array(table))
# for i in range(len(table.columns)):
# if table.columns[i].unit is not None:
# hdu.columns[i].unit = table.columns[i].unit.to_string(format='fits')
# return hdu
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
. Output only the next line. | self._distance = validate_scalar('distance', value, domain='positive', |
Based on the snippet: <|code_start|>
assert_allclose_quantity(self.wav, other.wav)
assert_allclose_quantity(self.nu, other.nu)
assert_allclose_quantity(self.apertures, other.apertures)
assert_allclose_quantity(self.val, other.val)
assert_allclose_quantity(self.unc, other.unc)
except AssertionError:
raise
return False
else:
return True
@property
def valid(self):
"""
Which models are valid
"""
if self.n_models is None or self._valid is not None:
return self._valid
else:
return np.ones(self.n_models)
@valid.setter
def valid(self, value):
if value is None:
self._valid = None
else:
<|code_end|>
, predict the immediate next line with the help of imports:
import abc
import numpy as np
import six
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
from .sed import SED
and context (classes, functions, sometimes code) from other files:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def table_to_hdu(table):
# hdu = fits.BinTableHDU(np.array(table))
# for i in range(len(table.columns)):
# if table.columns[i].unit is not None:
# hdu.columns[i].unit = table.columns[i].unit.to_string(format='fits')
# return hdu
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
. Output only the next line. | self._valid = validate_array('valid', value, ndim=1, |
Using the snippet: <|code_start|> Read models from a FITS file.
Parameters
----------
filename: str
The name of the file to read the cube from.
order: str, optional
Whether to sort the SED by increasing wavelength (`wav`) or
frequency ('nu').
"""
# Create class instance
cube = cls()
# Open FILE file
hdulist = fits.open(filename, memmap=memmap)
# Extract distance
cube.distance = hdulist[0].header['DISTANCE'] * u.cm
# Get validity
cube.valid = hdulist[0].data.astype(bool)
# Extract model names
cube.names = hdulist['MODEL_NAMES'].data['MODEL_NAME'].astype(str)
# Extract wavelengths
hdu_spectral = hdulist['SPECTRAL_INFO']
cube.wav = u.Quantity(hdu_spectral.data['WAVELENGTH'],
<|code_end|>
, determine the next line of code. You have imports:
import abc
import numpy as np
import six
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
from .sed import SED
and context (class names, function names, or code) available:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def table_to_hdu(table):
# hdu = fits.BinTableHDU(np.array(table))
# for i in range(len(table.columns)):
# if table.columns[i].unit is not None:
# hdu.columns[i].unit = table.columns[i].unit.to_string(format='fits')
# return hdu
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
. Output only the next line. | parse_unit_safe(hdu_spectral.columns[0].unit)) |
Next line prediction: <|code_start|> if self.distance is None:
raise ValueError("Value 'distance' is not set")
def write(self, filename, overwrite=False, meta={}):
"""
Write the models to a FITS file.
Parameters
----------
filename: str
The name of the file to write the cube to.
"""
self._check_all_set()
hdulist = fits.HDUList()
# Create empty first HDU and add distance
hdu0 = fits.PrimaryHDU(data=self.valid.astype(int))
hdu0.header['distance'] = (self.distance.to(u.cm).value, 'Distance assumed for the values, in cm')
hdu0.header['NWAV'] = (self.n_wav, "Number of wavelengths")
if self.apertures is not None:
hdu0.header['NAP'] = (self.n_ap, "Number of apertures")
for key in meta:
hdu0.header[key] = meta[key]
hdulist.append(hdu0)
# Create names table
t1 = Table()
t1['MODEL_NAME'] = np.array(self.names, 'S')
<|code_end|>
. Use current file imports:
(import abc
import numpy as np
import six
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
from .sed import SED)
and context including class names, function names, or small code snippets from other files:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def table_to_hdu(table):
# hdu = fits.BinTableHDU(np.array(table))
# for i in range(len(table.columns)):
# if table.columns[i].unit is not None:
# hdu.columns[i].unit = table.columns[i].unit.to_string(format='fits')
# return hdu
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
. Output only the next line. | hdu1 = table_to_hdu(t1) |
Given the following code snippet before the placeholder: <|code_start|>
def __init__(self, valid=None, names=None, distance=None, wav=None,
nu=None, apertures=None, val=None, unc=None):
# Which models are valid
self.valid = valid
# The names of all the models
self.names = names
# The distance at which the fluxes are defined
self.distance = distance
# The wavelengths and ap
self.wav = wav
self.nu = nu
self.apertures = apertures
# The value and uncertainties
self.val = val
self.unc = unc
def __eq__(self, other):
try:
assert np.all(self.valid == other.valid)
assert np.all(self.names == other.names)
<|code_end|>
, predict the next line using imports from the current file:
import abc
import numpy as np
import six
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
from .sed import SED
and context including class names, function names, and sometimes code from other files:
# Path: sedfitter/utils/validator.py
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
#
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# Path: sedfitter/sed/helpers.py
# def parse_unit_safe(unit_string):
# if unit_string in UNIT_MAPPING:
# return UNIT_MAPPING[unit_string]
# else:
# return u.Unit(unit_string, parse_strict=False)
#
# def table_to_hdu(table):
# hdu = fits.BinTableHDU(np.array(table))
# for i in range(len(table.columns)):
# if table.columns[i].unit is not None:
# hdu.columns[i].unit = table.columns[i].unit.to_string(format='fits')
# return hdu
#
# def assert_allclose_quantity(q1, q2):
# if q1 is None and q2 is None:
# return True
# if q1 is None or q2 is None:
# raise AssertionError()
# else:
# np.testing.assert_allclose(q1.value, q2.to(q1.unit).value)
. Output only the next line. | assert_allclose_quantity(self.distance, other.distance) |
Given the code snippet: <|code_start|>
# Define new filter
f = Filter()
f.name = self.name
f.central_wavelength = self.central_wavelength
f.nu = nu_new
self_nu_hz = self.nu.to(u.Hz).value
nu_new_hz = f.nu.to(u.Hz).value
# Compute re-binned transmission
f.response = np.zeros(nu_new.shape)
for i in range(len(f.response)):
if i == 0:
nu1 = nu_new_hz[0]
else:
nu1 = 0.5 * (nu_new_hz[i - 1] + nu_new_hz[i])
if i == len(nu_new_hz) - 1:
nu2 = nu_new_hz[-1]
else:
nu2 = 0.5 * (nu_new_hz[i] + nu_new_hz[i + 1])
nu1 = min(max(nu1, self_nu_hz[0]), self_nu_hz[-1])
nu2 = min(max(nu2, self_nu_hz[0]), self_nu_hz[-1])
if nu2 != nu1:
<|code_end|>
, generate the next line using the imports in this file:
import os
import numpy as np
from astropy import units as u
from ..utils.integrate import integrate_subset, integrate
from ..utils.validator import validate_array, validate_scalar
and context (functions, classes, or occasionally code) from other files:
# Path: sedfitter/utils/integrate.py
# def integrate_subset(x, y, xmin, xmax):
# """
# Perform trapezium integration of a set of points (x,y) between bounds xmin
# and xmax. The interpolation between the points is done in linear space, so
# this is designed for functions that are piecewise linear in linear space.
# """
#
# # Swap arrays if necessary
# if x[-1] < x[0]:
# x = x[::-1]
# y = y[::-1]
#
# # Swap limits if necessary
# if xmin > xmax:
# xmin, xmax = xmax, xmin
# elif xmin == xmax:
# return 0.
#
# # Find the subset of points to use and the value of the function at the
# # end-points of the integration
#
# if xmin == x[0]:
# i1 = 1
# ymin = y[0]
# else:
# i1 = np.searchsorted(x, xmin)
# ymin = interp1d_fast(x[i1 - 1:i1 + 1], y[i1 - 1:i1 + 1], xmin)
#
# if xmax == x[-1]:
# i2 = -2
# ymax = y[-1]
# else:
# i2 = np.searchsorted(x, xmax)
# ymax = interp1d_fast(x[i2 - 1:i2 + 1], y[i2 - 1:i2 + 1], xmax)
#
# # Construct sub-arrays of the relevant data
# x = np.hstack([xmin, x[i1:i2], xmax])
# y = np.hstack([ymin, y[i1:i2], ymax])
#
# # Call function to integrate the whole subset
# return integrate(x, y)
#
# def integrate(x, y):
# """
# Perform trapezium integration of a set of points (x,y). The interpolation
# between the points is done in linear space, so this is designed for
# functions that are piecewise linear in linear space.
# """
#
# # Fix NaN values
# y[np.isnan(y)] = 0.
#
# # Find the integral of all the chunks
# integrals = 0.5 * (x[1:] - x[:-1]) * (y[1:] + y[:-1])
#
# # Sum them all up
# integral = np.sum(integrals)
#
# # Check if the integral is NaN or infinity
# if np.isnan(integral) or np.isinf(integral):
# raise Exception("Integral is NaN or Inf")
#
# return integral
#
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
. Output only the next line. | f.response[i] = integrate_subset(self_nu_hz, self.response, nu1, nu2) |
Continue the code snippet: <|code_start|> filename: str
The name of the file containing the filter
"""
self = cls()
# Read in central wavelength
self.central_wavelength = float(open(filename, 'r').readline().split('=')[1]) * u.micron
# Read in spectral response curve
self.wav = np.loadtxt(filename, usecols=[0], dtype=float) * u.micron
self.response = np.loadtxt(filename, usecols=[1], dtype=float)
# Compute frequency
self.nu = self.wav.to(u.Hz, equivalencies=u.spectral())
# Set name
if self.name is None:
self.name = os.path.basename(filename).split('.')[0]
return self
def normalize(self):
"""
Normalize so the integral over nu is 1
"""
if self.nu is None:
raise ValueError("nu has not been set")
if self.response is None:
raise ValueError("response has not been set")
<|code_end|>
. Use current file imports:
import os
import numpy as np
from astropy import units as u
from ..utils.integrate import integrate_subset, integrate
from ..utils.validator import validate_array, validate_scalar
and context (classes, functions, or code) from other files:
# Path: sedfitter/utils/integrate.py
# def integrate_subset(x, y, xmin, xmax):
# """
# Perform trapezium integration of a set of points (x,y) between bounds xmin
# and xmax. The interpolation between the points is done in linear space, so
# this is designed for functions that are piecewise linear in linear space.
# """
#
# # Swap arrays if necessary
# if x[-1] < x[0]:
# x = x[::-1]
# y = y[::-1]
#
# # Swap limits if necessary
# if xmin > xmax:
# xmin, xmax = xmax, xmin
# elif xmin == xmax:
# return 0.
#
# # Find the subset of points to use and the value of the function at the
# # end-points of the integration
#
# if xmin == x[0]:
# i1 = 1
# ymin = y[0]
# else:
# i1 = np.searchsorted(x, xmin)
# ymin = interp1d_fast(x[i1 - 1:i1 + 1], y[i1 - 1:i1 + 1], xmin)
#
# if xmax == x[-1]:
# i2 = -2
# ymax = y[-1]
# else:
# i2 = np.searchsorted(x, xmax)
# ymax = interp1d_fast(x[i2 - 1:i2 + 1], y[i2 - 1:i2 + 1], xmax)
#
# # Construct sub-arrays of the relevant data
# x = np.hstack([xmin, x[i1:i2], xmax])
# y = np.hstack([ymin, y[i1:i2], ymax])
#
# # Call function to integrate the whole subset
# return integrate(x, y)
#
# def integrate(x, y):
# """
# Perform trapezium integration of a set of points (x,y). The interpolation
# between the points is done in linear space, so this is designed for
# functions that are piecewise linear in linear space.
# """
#
# # Fix NaN values
# y[np.isnan(y)] = 0.
#
# # Find the integral of all the chunks
# integrals = 0.5 * (x[1:] - x[:-1]) * (y[1:] + y[:-1])
#
# # Sum them all up
# integral = np.sum(integrals)
#
# # Check if the integral is NaN or infinity
# if np.isnan(integral) or np.isinf(integral):
# raise Exception("Integral is NaN or Inf")
#
# return integral
#
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
. Output only the next line. | self.response = self.response / np.abs(integrate(self.nu.to(u.Hz).value, self.response)) |
Continue the code snippet: <|code_start|> return f
@property
def central_wavelength(self):
"""
The central or characteristic wavelength of the filter
"""
return self._wavelength
@central_wavelength.setter
def central_wavelength(self, value):
if value is None:
self._wavelength = None
else:
self._wavelength = validate_scalar('central_wavelength', value,
domain='strictly-positive',
physical_type='length')
@property
def nu(self):
"""
The frequencies at which the filter is defined
"""
return self._nu
@nu.setter
def nu(self, value):
if value is None:
self._nu = None
else:
<|code_end|>
. Use current file imports:
import os
import numpy as np
from astropy import units as u
from ..utils.integrate import integrate_subset, integrate
from ..utils.validator import validate_array, validate_scalar
and context (classes, functions, or code) from other files:
# Path: sedfitter/utils/integrate.py
# def integrate_subset(x, y, xmin, xmax):
# """
# Perform trapezium integration of a set of points (x,y) between bounds xmin
# and xmax. The interpolation between the points is done in linear space, so
# this is designed for functions that are piecewise linear in linear space.
# """
#
# # Swap arrays if necessary
# if x[-1] < x[0]:
# x = x[::-1]
# y = y[::-1]
#
# # Swap limits if necessary
# if xmin > xmax:
# xmin, xmax = xmax, xmin
# elif xmin == xmax:
# return 0.
#
# # Find the subset of points to use and the value of the function at the
# # end-points of the integration
#
# if xmin == x[0]:
# i1 = 1
# ymin = y[0]
# else:
# i1 = np.searchsorted(x, xmin)
# ymin = interp1d_fast(x[i1 - 1:i1 + 1], y[i1 - 1:i1 + 1], xmin)
#
# if xmax == x[-1]:
# i2 = -2
# ymax = y[-1]
# else:
# i2 = np.searchsorted(x, xmax)
# ymax = interp1d_fast(x[i2 - 1:i2 + 1], y[i2 - 1:i2 + 1], xmax)
#
# # Construct sub-arrays of the relevant data
# x = np.hstack([xmin, x[i1:i2], xmax])
# y = np.hstack([ymin, y[i1:i2], ymax])
#
# # Call function to integrate the whole subset
# return integrate(x, y)
#
# def integrate(x, y):
# """
# Perform trapezium integration of a set of points (x,y). The interpolation
# between the points is done in linear space, so this is designed for
# functions that are piecewise linear in linear space.
# """
#
# # Fix NaN values
# y[np.isnan(y)] = 0.
#
# # Find the integral of all the chunks
# integrals = 0.5 * (x[1:] - x[:-1]) * (y[1:] + y[:-1])
#
# # Sum them all up
# integral = np.sum(integrals)
#
# # Check if the integral is NaN or infinity
# if np.isnan(integral) or np.isinf(integral):
# raise Exception("Integral is NaN or Inf")
#
# return integral
#
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
. Output only the next line. | self._nu = validate_array('nu', value, domain='strictly-positive', ndim=1, |
Given snippet: <|code_start|> if i == 0:
nu1 = nu_new_hz[0]
else:
nu1 = 0.5 * (nu_new_hz[i - 1] + nu_new_hz[i])
if i == len(nu_new_hz) - 1:
nu2 = nu_new_hz[-1]
else:
nu2 = 0.5 * (nu_new_hz[i] + nu_new_hz[i + 1])
nu1 = min(max(nu1, self_nu_hz[0]), self_nu_hz[-1])
nu2 = min(max(nu2, self_nu_hz[0]), self_nu_hz[-1])
if nu2 != nu1:
f.response[i] = integrate_subset(self_nu_hz, self.response, nu1, nu2)
return f
@property
def central_wavelength(self):
"""
The central or characteristic wavelength of the filter
"""
return self._wavelength
@central_wavelength.setter
def central_wavelength(self, value):
if value is None:
self._wavelength = None
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import numpy as np
from astropy import units as u
from ..utils.integrate import integrate_subset, integrate
from ..utils.validator import validate_array, validate_scalar
and context:
# Path: sedfitter/utils/integrate.py
# def integrate_subset(x, y, xmin, xmax):
# """
# Perform trapezium integration of a set of points (x,y) between bounds xmin
# and xmax. The interpolation between the points is done in linear space, so
# this is designed for functions that are piecewise linear in linear space.
# """
#
# # Swap arrays if necessary
# if x[-1] < x[0]:
# x = x[::-1]
# y = y[::-1]
#
# # Swap limits if necessary
# if xmin > xmax:
# xmin, xmax = xmax, xmin
# elif xmin == xmax:
# return 0.
#
# # Find the subset of points to use and the value of the function at the
# # end-points of the integration
#
# if xmin == x[0]:
# i1 = 1
# ymin = y[0]
# else:
# i1 = np.searchsorted(x, xmin)
# ymin = interp1d_fast(x[i1 - 1:i1 + 1], y[i1 - 1:i1 + 1], xmin)
#
# if xmax == x[-1]:
# i2 = -2
# ymax = y[-1]
# else:
# i2 = np.searchsorted(x, xmax)
# ymax = interp1d_fast(x[i2 - 1:i2 + 1], y[i2 - 1:i2 + 1], xmax)
#
# # Construct sub-arrays of the relevant data
# x = np.hstack([xmin, x[i1:i2], xmax])
# y = np.hstack([ymin, y[i1:i2], ymax])
#
# # Call function to integrate the whole subset
# return integrate(x, y)
#
# def integrate(x, y):
# """
# Perform trapezium integration of a set of points (x,y). The interpolation
# between the points is done in linear space, so this is designed for
# functions that are piecewise linear in linear space.
# """
#
# # Fix NaN values
# y[np.isnan(y)] = 0.
#
# # Find the integral of all the chunks
# integrals = 0.5 * (x[1:] - x[:-1]) * (y[1:] + y[:-1])
#
# # Sum them all up
# integral = np.sum(integrals)
#
# # Check if the integral is NaN or infinity
# if np.isnan(integral) or np.isinf(integral):
# raise Exception("Integral is NaN or Inf")
#
# return integral
#
# Path: sedfitter/utils/validator.py
# def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# # First convert to a Numpy array:
# if type(value) in [list, tuple]:
# value = np.array(value)
#
# # Check the value is an array with the right number of dimensions
# if not isinstance(value, np.ndarray) or value.ndim != ndim:
# if ndim == 1:
# raise TypeError("{0} should be a 1-d sequence".format(name))
# else:
# raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
#
# # Check that the shape matches that expected
# if shape is not None and value.shape != shape:
# if ndim == 1:
# raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
# else:
# # On Windows, shapes can contain long integers, so we fix this to
# # have consistent error messages across platforms.
# expected_shape = tuple(int(x) for x in shape)
# actual_shape = tuple(int(x) for x in value.shape)
# raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, expected_shape, actual_shape))
#
# return value
#
# def validate_scalar(name, value, domain=None, physical_type=None):
#
# validate_physical_type(name, value, physical_type)
#
# if not physical_type:
# if np.isscalar(value) or not np.isreal(value):
# raise TypeError("{0} should be a scalar floating point value".format(name))
#
# if domain == 'positive':
# if value < 0.:
# raise ValueError("{0} should be positive".format(name))
# elif domain == 'strictly-positive':
# if value <= 0.:
# raise ValueError("{0} should be strictly positive".format(name))
# elif domain == 'negative':
# if value > 0.:
# raise ValueError("{0} should be negative".format(name))
# elif domain == 'strictly-negative':
# if value >= 0.:
# raise ValueError("{0} should be strictly negative".format(name))
# elif type(domain) in [tuple, list] and len(domain) == 2:
# if value < domain[0] or value > domain[-1]:
# raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
#
# return value
which might include code, classes, or functions. Output only the next line. | self._wavelength = validate_scalar('central_wavelength', value, |
Predict the next line for this snippet: <|code_start|>from __future__ import print_function, division
__all__ = ['write_parameters']
def write_parameters(input_fits, output_file, select_format=("N", 1), additional={}):
"""
Write out an ASCII file with the paramters for each source.
Parameters
----------
input_fits : str or :class:`sedfitter.fit_info.FitInfo` or iterable
This should be either a file containing the fit information, a
:class:`sedfitter.fit_info.FitInfo` instance, or an iterable containing
:class:`sedfitter.fit_info.FitInfo` instances.
output_file : str, optional
The output ASCII file containing the parameters
select_format : tuple, optional
Tuple specifying which fits should be output. See the documentation
for a description of the tuple syntax.
additional : dict, optional
A dictionary giving additional parameters for each model. This should
be a dictionary where each key is a parameter, and each value is a
dictionary mapping the model names to the parameter values.
"""
# Open input and output file
<|code_end|>
with the help of current file imports:
import numpy as np
from .fit_info import FitInfoFile
from .models import load_parameter_table
and context from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
, which may contain function names, class names, or code. Output only the next line. | fin = FitInfoFile(input_fits, 'r') |
Based on the snippet: <|code_start|>
__all__ = ['write_parameters']
def write_parameters(input_fits, output_file, select_format=("N", 1), additional={}):
"""
Write out an ASCII file with the paramters for each source.
Parameters
----------
input_fits : str or :class:`sedfitter.fit_info.FitInfo` or iterable
This should be either a file containing the fit information, a
:class:`sedfitter.fit_info.FitInfo` instance, or an iterable containing
:class:`sedfitter.fit_info.FitInfo` instances.
output_file : str, optional
The output ASCII file containing the parameters
select_format : tuple, optional
Tuple specifying which fits should be output. See the documentation
for a description of the tuple syntax.
additional : dict, optional
A dictionary giving additional parameters for each model. This should
be a dictionary where each key is a parameter, and each value is a
dictionary mapping the model names to the parameter values.
"""
# Open input and output file
fin = FitInfoFile(input_fits, 'r')
fout = open(output_file, 'w')
# Read in table of parameters for model grid
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
from .fit_info import FitInfoFile
from .models import load_parameter_table
and context (classes, functions, sometimes code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
. Output only the next line. | t = load_parameter_table(fin.meta.model_dir) |
Given the code snippet: <|code_start|>
__all__ = ['write_parameter_ranges']
NODATA = '-'.center(10)
def write_parameter_ranges(input_fits, output_file, select_format=("N", 1), additional={}):
"""
Write out an ASCII file with ranges of paramters for each source.
Parameters
----------
input_fits : str or :class:`sedfitter.fit_info.FitInfo` or iterable
This should be either a file containing the fit information, a
:class:`sedfitter.fit_info.FitInfo` instance, or an iterable containing
:class:`sedfitter.fit_info.FitInfo` instances.
output_file : str, optional
The output ASCII file containing the parameter ranges
select_format : tuple, optional
Tuple specifying which fits should be output. See the documentation
for a description of the tuple syntax.
additional : dict, optional
A dictionary giving additional parameters for each model. This should
be a dictionary where each key is a parameter, and each value is a
dictionary mapping the model names to the parameter values.
"""
# Open input and output file
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
from .fit_info import FitInfoFile
from .models import load_parameter_table
and context (functions, classes, or occasionally code) from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
. Output only the next line. | fin = FitInfoFile(input_fits, 'r') |
Next line prediction: <|code_start|>
NODATA = '-'.center(10)
def write_parameter_ranges(input_fits, output_file, select_format=("N", 1), additional={}):
"""
Write out an ASCII file with ranges of paramters for each source.
Parameters
----------
input_fits : str or :class:`sedfitter.fit_info.FitInfo` or iterable
This should be either a file containing the fit information, a
:class:`sedfitter.fit_info.FitInfo` instance, or an iterable containing
:class:`sedfitter.fit_info.FitInfo` instances.
output_file : str, optional
The output ASCII file containing the parameter ranges
select_format : tuple, optional
Tuple specifying which fits should be output. See the documentation
for a description of the tuple syntax.
additional : dict, optional
A dictionary giving additional parameters for each model. This should
be a dictionary where each key is a parameter, and each value is a
dictionary mapping the model names to the parameter values.
"""
# Open input and output file
fin = FitInfoFile(input_fits, 'r')
fout = open(output_file, 'w')
# Read in table of parameters for model grid
<|code_end|>
. Use current file imports:
(import numpy as np
from .fit_info import FitInfoFile
from .models import load_parameter_table)
and context including class names, function names, or small code snippets from other files:
# Path: sedfitter/fit_info.py
# class FitInfoFile(object):
#
# def __init__(self, fits, mode=None):
#
# if isinstance(fits, six.string_types):
#
# if mode not in 'wr':
# raise ValueError('mode should be r or w')
#
# self._handle = open(fits, mode + 'b')
# self._mode = mode
#
# if mode == 'r':
# self._first_meta = FitInfoMeta()
# self._first_meta.model_dir = pickle.load(self._handle)
# self._first_meta.filters = pickle.load(self._handle)
# self._first_meta.extinction_law = pickle.load(self._handle)
# else:
# self._first_meta = None
#
# self._fits = None
#
# elif isinstance(fits, FitInfo):
#
# self._fits = [fits]
#
# elif isinstance(fits, (list, tuple)):
#
# for info in self._fits[1:]:
# if info.meta != self._fits[0].meta:
# raise ValueError("The meta property of all FitInfo instances should match")
#
# self._fits = fits
#
# else:
#
# raise TypeError('fits should be a string, FitInfo instance, or iterable of FitInfo instances')
#
# @property
# def meta(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("meta property is only available in read mode")
# return self._first_meta
# else:
# return self._fits[0].meta
#
# def write(self, info):
#
# if self._mode != 'w':
# raise ValueError("File not open for writing")
#
# # We only write the metadata for the first source, and we then check
# # the metadata of other sources against the first one to make sure it
# # matches.
#
# if self._first_meta is None:
# pickle.dump(info.meta.model_dir, self._handle, 2)
# pickle.dump(info.meta.filters, self._handle, 2)
# pickle.dump(info.meta.extinction_law, self._handle, 2)
# self._first_meta = info.meta
# else:
# if not info.meta == self._first_meta:
# raise ValueError("meta does not match previously written value")
#
# pickle.dump(info, self._handle, 2)
#
# def close(self):
# if self._fits is None:
# self._handle.close()
#
# def __iter__(self):
# if self._fits is None:
# if self._mode != 'r':
# raise ValueError("File not open for reading")
# while True:
# try:
# info = pickle.load(self._handle)
# except EOFError:
# return
# else:
# info.meta = self._first_meta
# yield info
# else:
# for info in self._fits:
# yield info
#
# Path: sedfitter/models.py
# def load_parameter_table(model_dir):
#
# if os.path.exists(model_dir + '/parameters.fits'):
# t = read_table(model_dir + '/parameters.fits')
# elif os.path.exists(model_dir + '/parameters.fits.gz'):
# t = read_table(model_dir + '/parameters.fits.gz')
# else:
# raise Exception("Parameter file not found in %s" % model_dir)
#
# return t
. Output only the next line. | t = load_parameter_table(fin.meta.model_dir) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
HOST_NAME = '127.0.0.1'
PROXY_PORT = 55444
PROXY_URL_FORMAT = 'http://%s:%s/proxy.%%s?url=%%s' % (HOST_NAME, PROXY_PORT)
session = requests.Session()
BASE_URL = None
def log(msg):
# pass
<|code_end|>
. Use current file imports:
(import ssl
import sys
import re
import socket
import os
import urllib
import urlparse
import threading
import requests
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from resources.lib.modules import control)
and context including class names, function names, or small code snippets from other files:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
. Output only the next line. | control.log(msg) |
Here is a snippet: <|code_start|>
def get(function, timeout_hour, *args, **kargs):
# try:
response = None
force_refresh = kargs['force_refresh'] if 'force_refresh' in kargs else False
kargs.pop('force_refresh', None)
lock_obj = kargs['lock_obj'] if 'lock_obj' in kargs else None
kargs.pop('lock_obj', None)
f = repr(function)
f = re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', f)
a = hashlib.md5()
for i in args: a.update(str(i))
for key in kargs:
if key != 'table':
a.update('%s=%s' % (key, str(kargs[key])))
a = str(a.hexdigest())
# except:
# pass
try:
table = kargs['table']
kargs.pop('table')
except:
table = 'rel_list'
try:
<|code_end|>
. Write the next line using the current file imports:
import re
import hashlib
import time
import datetime
import traceback
import inspect
import cPickle as pickle
import pickle
from sqlite3 import dbapi2 as database
from pysqlite2 import dbapi2 as database
from resources.lib.modules import control
from collections import OrderedDict
and context from other files:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
, which may include functions, classes, or code. Output only the next line. | control.makeFile(control.dataPath) |
Given snippet: <|code_start|>
class Schedule:
def __init__(self):
pass
def get_schedule(self):
# In Settings.xml - globo_affiliate
# "0" = All
# "1" = Rio de Janeiro
# "2" = Sao Paulo
# "3" = Brasilia
# "4" = Belo Horizonte
# "5" = Recife
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import requests
from resources.lib.modules import control
and context:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
which might include code, classes, or functions. Output only the next line. | if control.setting("globo_affiliate") == "2": |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
if __name__ == '__main__':
try:
control.log('%s %s | Starting...' % (control.addonInfo('name'), control.addonInfo('version')))
buggalo.EMAIL_CONFIG = {
"recipient": 'brplayissues@gmail.com',
"sender": "BRplay <brplayissues@gmail.com>",
"server": 'smtp.googlemail.com',
"method": 'ssl',
"user": 'brplayissues@gmail.com',
"pass": "yourpasswordforbuggalo_account"
}
argv = dict(urlparse.parse_qsl(sys.argv[2].replace('?', '')))
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import urlparse
import buggalo
from resources.lib import main
from resources.lib.modules import control
and context (classes, functions, sometimes code) from other files:
# Path: resources/lib/main.py
# def run(params):
#
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
. Output only the next line. | main.run(argv) |
Predict the next line for this snippet: <|code_start|>
try:
except:
PLATFORM = 'PCTV' # PCTV | IPHONEH | ANDROIDTABLETH | ANDROIDMOBILEH
<|code_end|>
with the help of current file imports:
import requests
import uuid
import cPickle as pickle
import pickle
from resources.lib.modules import control
and context from other files:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
, which may contain function names, class names, or code. Output only the next line. | proxy = control.proxy_url |
Given the code snippet: <|code_start|>
result.update({
'studio': 'Now Online'
})
yield result
if has_more_pages:
yield {
'handler': __name__,
'method': 'search',
'term': term,
'page': page + 1,
'limit': limit,
'label': control.lang(34136).encode('utf-8'),
'art': {
'poster': control.addonNext(),
'fanart': FANART
},
'properties': {
'SpecialSort': 'bottom'
}
}
def request_logged_in(url, use_cache=True, validate=False, force_refresh=False, retry=1):
headers, cookies = get_request_data(validate)
control.log('GET %s' % url)
if use_cache:
<|code_end|>
, generate the next line using the imports in this file:
from auth import PLATFORM
from auth import get_request_data
from resources.lib.modules import cache
from resources.lib.modules import workers
import requests
import resources.lib.modules.control as control
import scraper_live
import player
import os
import urllib
and context (functions, classes, or occasionally code) from other files:
# Path: resources/lib/modules/cache.py
# def get(function, timeout_hour, *args, **kargs):
# def clear_item(function, *args, **kargs):
# def __get_from_cache(dbcur, table, f, a, timeout_hour):
# def __execute_origin(dbcur, dbcon, function, table, f, a, response, *args, **kargs):
# def delete_file():
# def clear(table=None):
#
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
. Output only the next line. | response = cache.get(requests.get, 1, url, headers=headers, cookies=cookies, force_refresh=force_refresh, table="netnow") |
Predict the next line for this snippet: <|code_start|> 'category': category,
'subcategory': item.get('title', '').encode('utf-8'),
'label': item.get('title', '').encode('utf-8'),
'art': {
'thumb': LOGO,
'fanart': FANART
}
}
def get_content(category, subcategory):
response = _get_page(category)
item = next((item for item in response.get('response', {}).get('categories', []) if item.get('title', '') == subcategory), {})
if not item.get('contents', []):
if item.get('type') == 'continue_watching':
url = 'https://www.nowonline.com.br/AGL/1.0/R/ENG/{platform}/ALL/NET/USER/BOOKMARKS'.format(platform=PLATFORM)
response = request_logged_in(url, False)
contents = [result.get('content', {}) for result in response.get('resultObj', []) if result.get('content', {})]
else:
id = item.get('id', -1)
url = 'https://www.nowonline.com.br/avsclient/categories/{id}/contents?offset=1&channel={platform}&limit=30'.format(platform=PLATFORM, id=id)
response = request_logged_in(url)
contents = response.get('response', {}).get('contents', [])
else:
contents = item.get('contents', [])
threads = [{
<|code_end|>
with the help of current file imports:
from auth import PLATFORM
from auth import get_request_data
from resources.lib.modules import cache
from resources.lib.modules import workers
import requests
import resources.lib.modules.control as control
import scraper_live
import player
import os
import urllib
and context from other files:
# Path: resources/lib/modules/cache.py
# def get(function, timeout_hour, *args, **kargs):
# def clear_item(function, *args, **kargs):
# def __get_from_cache(dbcur, table, f, a, timeout_hour):
# def __execute_origin(dbcur, dbcon, function, table, f, a, response, *args, **kargs):
# def delete_file():
# def clear(table=None):
#
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
, which may contain function names, class names, or code. Output only the next line. | 'thread': workers.Thread(_get_content, content.get('id', -1)), |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
class RouteError(Exception):
pass
MODULES_CACHE = {}
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
def handle_route(data):
handler = data.get('handler', None)
method = data.get('method', None)
if not handler or not method:
<|code_end|>
, predict the immediate next line with the help of imports:
import importlib
import sys
import urllib
import json
import operator
import traceback
import types
from allowkwargs import allow_kwargs
from resources.lib.modules import control
and context (classes, functions, sometimes code) from other files:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
. Output only the next line. | control.log('No handler available for: %s' % data) |
Using the snippet: <|code_start|>
if id is None:
return
try:
url = self.geturl(id, encrypted=encrypted)
except Exception as ex:
control.log(traceback.format_exc(), control.LOGERROR)
control.okDialog(u'TNT Play', str(ex))
return
if encrypted and not control.is_inputstream_available():
control.okDialog(u'TNT Play', control.lang(34103).encode('utf-8'))
self.stop_content(id, encrypted=encrypted)
return
control.log("media url: %s" % url)
self.offset = float(meta['milliseconds_watched']) / 1000.0 if 'milliseconds_watched' in meta else 0
self.isLive = not encrypted
parsed_url = urlparse(url)
# if ".m3u8" in parsed_url.path:
# self.url, mime_type, stop_event, cookies = hlshelper.pick_bandwidth(url)
# else:
# self.url = url
# mime_type, stop_event, cookies = None, None, None
<|code_end|>
, determine the next line of code. You have imports:
import threading
import sys
import urllib
import resources.lib.modules.control as control
import requests
import xbmc
import traceback
from urlparse import urlparse
from resources.lib.hlsproxy.simpleproxy import MediaProxy
from auth import get_token, get_device_id, logout
and context (class names, function names, or code) available:
# Path: resources/lib/hlsproxy/simpleproxy.py
# class MediaProxy:
#
# @property
# def stop_event(self):
# return self.stopPlaying
#
# def __init__(self, proxy=None):
# log('MediaProxy Init | proxy: %s' % proxy)
# self.host_name = HOST_NAME
# self.port = PROXY_PORT
#
# self.stopPlaying = threading.Event()
# self.stopPlaying.clear()
#
# t = threading.Thread(target=self.__start, args=(self.stopPlaying, proxy,))
# t.daemon = True
# t.start()
#
# def __start(self, stop_event, proxy):
# log('MediaProxy Start')
# socket.setdefaulttimeout(10)
#
# RequestHandler.protocol_version = "HTTP/1.1"
# RequestHandler.proxy = proxy
#
# httpd = None
# try:
# ThreadedHTTPServer.daemon_threads = True
# httpd = ThreadedHTTPServer((self.host_name, self.port, ), RequestHandler, stop_event)
#
# log("Simple Proxy Started - %s:%s" % (self.host_name, self.port))
#
# while not stop_event.isSet():
# httpd.handle_request()
# finally:
# try:
# if httpd:
# httpd.server_close()
# finally:
# log("Simple Proxy Stopped %s:%s" % (self.host_name, self.port))
#
# def resolve(self, url):
#
# log('MediaProxy resolve: %s' % url)
#
# path = urlparse.urlparse(url).path
# extension = os.path.splitext(path)[1] or 'm3u8'
# extension = extension.replace('.', '')
#
# link = PROXY_URL_FORMAT % (extension, urllib.quote_plus(url))
#
# return link # make a url that caller then call load into player
. Output only the next line. | proxy_handler = MediaProxy(control.proxy_url) |
Continue the code snippet: <|code_start|>(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
g_stopEvent = None
g_downloader = None
VIDEO_MIME_TYPE = 'video/MP2T'
PLAYLIST_MIME_TYPE = 'application/vnd.apple.mpegurl'
HOST_NAME = '127.0.0.1'
PORT_NUMBER = 55444
def log(msg):
# pass
<|code_end|>
. Use current file imports:
import sys
import ssl
import re
import socket
import traceback
import urllib
import urlparse
import xbmc
import xbmcgui
import threading
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from resources.lib.modules import control
and context (classes, functions, or code) from other files:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
. Output only the next line. | control.log(msg) |
Continue the code snippet: <|code_start|> schedule = []
threads = [
workers.Thread(self._get_globosat_schedule, schedule),
workers.Thread(self._get_combate_schedule, schedule)
]
[i.start() for i in threads]
[i.join() for i in threads]
return schedule
def _get_globosat_schedule(self, schedule):
globosat_schedule_url = "http://api.simulcast.globosat.tv/globosatplay/?page=%s"
headers = {
"User-Agent": "GlobosatPlay/142 CFNetwork/811.4.18 Darwin/16.5.0",
"Authorization": "Token 59150c4cc6a00f467bf225cf3bf8f44617e27037",
"Accept-Encoding": "gzip, deflate"
}
page = 1
response = requests.get(globosat_schedule_url % page, headers=headers).json()
results = response['results']
while response['next'] != None:
page += 1
response = requests.get(globosat_schedule_url % page, headers=headers).json()
results += response['results']
for result in results:
<|code_end|>
. Use current file imports:
import datetime
import requests
from resources.lib.modules import util
from resources.lib.modules import workers
and context (classes, functions, or code) from other files:
# Path: resources/lib/modules/util.py
# class struct(object):
# def __init__(self, kdict=None):
# def __repr__(self):
# def __getattr__(self, name):
# def __len__(self):
# def get(self, key):
# def getMPAAFromCI(ci):
# def getBestBitrateUrl(plugin, streams):
# def merge_dicts(x, *argv):
# def slugify(string):
# def unescape(text):
# def fixup(m):
# def time_format(time_str=None, input_format=None):
# def get_signed_hashes(a):
# def J(a):
# def K(a):
# def L():
# def M(a):
# def N():
# def O(a):
# def P(a):
# def get_utc_delta():
# def strptime(date_string, format):
# def strptime_workaround(date_string, format='%Y-%m-%dT%H:%M:%S'):
# def get_total_seconds(timedelta):
# def get_total_seconds_float(timedelta):
# def get_total_hours(timedelta):
# def add_url_parameters(url, params):
# def is_number(s):
# G = 3600
# H = "=0xAC10FD"
#
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
. Output only the next line. | beginsAt = util.strptime_workaround(result['day'], '%d/%m/%Y %H:%M') + util.get_utc_delta() |
Given the following code snippet before the placeholder: <|code_start|>
class Schedule:
def get_schedule(self):
schedule = []
threads = [
<|code_end|>
, predict the next line using imports from the current file:
import datetime
import requests
from resources.lib.modules import util
from resources.lib.modules import workers
and context including class names, function names, and sometimes code from other files:
# Path: resources/lib/modules/util.py
# class struct(object):
# def __init__(self, kdict=None):
# def __repr__(self):
# def __getattr__(self, name):
# def __len__(self):
# def get(self, key):
# def getMPAAFromCI(ci):
# def getBestBitrateUrl(plugin, streams):
# def merge_dicts(x, *argv):
# def slugify(string):
# def unescape(text):
# def fixup(m):
# def time_format(time_str=None, input_format=None):
# def get_signed_hashes(a):
# def J(a):
# def K(a):
# def L():
# def M(a):
# def N():
# def O(a):
# def P(a):
# def get_utc_delta():
# def strptime(date_string, format):
# def strptime_workaround(date_string, format='%Y-%m-%dT%H:%M:%S'):
# def get_total_seconds(timedelta):
# def get_total_seconds_float(timedelta):
# def get_total_hours(timedelta):
# def add_url_parameters(url, params):
# def is_number(s):
# G = 3600
# H = "=0xAC10FD"
#
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
. Output only the next line. | workers.Thread(self._get_globosat_schedule, schedule), |
Given snippet: <|code_start|> control.log("Oi Play - play_stream: id=%s | meta=%s" % (id, meta))
if id is None: return
provider = meta.get('provider')
self.isLive = meta.get('livefeed', False)
data = self.individualize(self.isLive, id, provider)
if not data or 'individualization' not in data:
error_message = '%s: %s' % (data.get('reason'), data.get('detail')) if data and data.get('reason') else control.lang(34100).encode('utf-8')
control.infoDialog(error_message, icon='ERROR')
return
encrypted = 'drm' in data and 'licenseUrl' in data['drm']
if encrypted and not control.is_inputstream_available():
control.okDialog(u'Oi Play', control.lang(34103).encode('utf-8'))
return
url = data['individualization']['url']
# info = data.get('token', {}).get('cmsChannelItem') or data.get('token', {}).get('cmsContentItem')
control.log("live media url: %s" % url)
self.offset = float(meta['milliseconds_watched']) / 1000.0 if 'milliseconds_watched' in meta else 0
parsed_url = urlparse(url)
if ".m3u8" in parsed_url.path:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import requests
import sys
import threading
import resources.lib.modules.control as control
import xbmc
from urlparse import urlparse
from auth import gettoken
from auth import get_default_profile
from private_data import get_device_id
from resources.lib.modules import hlshelper
and context:
# Path: resources/lib/modules/hlshelper.py
# def get_max_bandwidth():
# def pick_bandwidth(url):
which might include code, classes, or functions. Output only the next line. | self.url, mime_type, stopEvent, cookies = hlshelper.pick_bandwidth(url) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: UTF-8 -*-
try:
except:
try:
except:
auth_lock = Lock()
SERVICES_LOCKS = {}
class Auth:
ENDPOINT_URL = 'https://login.globo.com/api/authentication/sdk'
PROVIDER_ID = None
def __init__(self, tenant='globoplay'):
try:
self.tenant = tenant
<|code_end|>
, predict the next line using imports from the current file:
import requests
import hashlib
import HTMLParser
import html.parser as HTMLParser
import cPickle as pickle
import pickle
from resources.lib.modules import control
from . import cache
from threading import Lock
and context including class names, function names, and sometimes code from other files:
# Path: resources/lib/modules/control.py
# SORT_METHOD_NONE = xbmcplugin.SORT_METHOD_NONE
# SORT_METHOD_UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
# SORT_METHOD_VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
# SORT_METHOD_TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
# SORT_METHOD_FILE = xbmcplugin.SORT_METHOD_FILE
# SORT_METHOD_TITLE = xbmcplugin.SORT_METHOD_TITLE
# SORT_METHOD_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
# SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
# SORT_METHOD_VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
# SORT_METHOD_FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
# SORT_METHOD_LABEL = xbmcplugin.SORT_METHOD_LABEL
# SORT_METHOD_LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
# SORT_METHOD_LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
# SORT_METHOD_CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
# SORT_METHOD_DATE = xbmcplugin.SORT_METHOD_DATE
# SORT_METHOD_DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
# SORT_METHOD_PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
# SORT_METHOD_EPISODE = xbmcplugin.SORT_METHOD_EPISODE
# SORT_METHOD_STUDIO = xbmcplugin.SORT_METHOD_STUDIO
# SORT_METHOD_STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
# SORT_METHOD_MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
# LOGDEBUG = 0
# LOGERROR = 4
# LOGFATAL = 6
# LOGINFO = 1
# LOGNONE = 7
# LOGNOTICE = 2
# LOGSEVERE = 5
# LOGWARNING = 3
# INFO_LABELS = [
# 'genre',
# 'country',
# 'year',
# 'episode',
# 'season',
# 'sortepisode',
# 'sortseason',
# 'episodeguide',
# 'showlink',
# 'top250',
# 'setid',
# 'tracknumber',
# 'rating',
# 'userrating',
# 'watched',
# 'playcount',
# 'overlay',
# 'cast',
# 'castandrole',
# 'director',
# 'mpaa',
# 'plot',
# 'plotoutline',
# 'title',
# 'originaltitle',
# 'sorttitle',
# 'duration',
# 'studio',
# 'tagline',
# 'writer',
# 'tvshowtitle',
# 'premiered',
# 'status',
# 'set',
# 'setoverview',
# 'tag',
# 'imdbnumber',
# 'code',
# 'aired',
# 'credits',
# 'lastplayed',
# 'album',
# 'artist',
# 'votes',
# 'path',
# 'trailer',
# 'dateadded',
# 'mediatype',
# 'dbid'
# ]
# def get_current_brasilia_utc_offset():
# def get_inputstream_addon():
# def is_inputstream_available():
# def is_live_available():
# def is_vod_available():
# def is_globosat_available():
# def is_globoplay_available():
# def is_globoplay_mais_canais_ao_vivo_available():
# def globoplay_ignore_channel_authorization():
# def is_oiplay_available():
# def is_tntplay_available():
# def is_nowonline_available():
# def is_telecine_available():
# def is_sbt_available():
# def is_pluto_available():
# def getKodiVersion():
# def addonIcon():
# def getBandwidthLimit():
# def addonThumb():
# def addonPoster():
# def addonBanner():
# def addonFanart():
# def addonNext():
# def artPath():
# def okDialog(heading, line1, line2=None, line3=None):
# def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False):
# def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
# def selectDialog(list, heading=addonInfo('name')):
# def apiLanguage(ret_name=None):
# def version():
# def openSettings(query=None, id=addonInfo('id')):
# def refresh():
# def idle():
# def queueItem():
# def clear_credentials():
# def clear_globosat_credentials():
# def log(msg, level=LOGNOTICE):
# def get_coordinates(affiliate):
# def get_ip_coordinates():
# def get_affiliates_by_id(id):
# def filter_info_labels(info_labels):
# def to_timestamp(date):
# def run_plugin_url(params=None):
# def get_weekday_name(date):
#
# Path: resources/lib/modules/cache.py
# def get(function, timeout_hour, *args, **kargs):
# def clear_item(function, *args, **kargs):
# def __get_from_cache(dbcur, table, f, a, timeout_hour):
# def __execute_origin(dbcur, dbcon, function, table, f, a, response, *args, **kargs):
# def delete_file():
# def clear(table=None):
. Output only the next line. | credentials = control.setting(self.get_credentials_key()) |
Next line prediction: <|code_start|>
PLAYER_HANDLER = player.__name__
def get_live_channels():
url = 'https://apim.oi.net.br/app/oiplay/ummex/v1/lists/651acd5c-236d-47d1-9e57-584a233ab76a?limit=200&orderby=titleAsc&page=1&useragent=androidtv'
response = requests.get(url).json()
channels = []
<|code_end|>
. Use current file imports:
(import requests
import datetime
import player
from resources.lib.modules import workers
from resources.lib.modules import util
from resources.lib.modules import cache)
and context including class names, function names, or small code snippets from other files:
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
#
# Path: resources/lib/modules/util.py
# class struct(object):
# def __init__(self, kdict=None):
# def __repr__(self):
# def __getattr__(self, name):
# def __len__(self):
# def get(self, key):
# def getMPAAFromCI(ci):
# def getBestBitrateUrl(plugin, streams):
# def merge_dicts(x, *argv):
# def slugify(string):
# def unescape(text):
# def fixup(m):
# def time_format(time_str=None, input_format=None):
# def get_signed_hashes(a):
# def J(a):
# def K(a):
# def L():
# def M(a):
# def N():
# def O(a):
# def P(a):
# def get_utc_delta():
# def strptime(date_string, format):
# def strptime_workaround(date_string, format='%Y-%m-%dT%H:%M:%S'):
# def get_total_seconds(timedelta):
# def get_total_seconds_float(timedelta):
# def get_total_hours(timedelta):
# def add_url_parameters(url, params):
# def is_number(s):
# G = 3600
# H = "=0xAC10FD"
#
# Path: resources/lib/modules/cache.py
# def get(function, timeout_hour, *args, **kargs):
# def clear_item(function, *args, **kargs):
# def __get_from_cache(dbcur, table, f, a, timeout_hour):
# def __execute_origin(dbcur, dbcon, function, table, f, a, response, *args, **kargs):
# def delete_file():
# def clear(table=None):
. Output only the next line. | threads = [workers.Thread(__merge_channel_data, channel['prgSvcId'], channels) for channel in response['items']] |
Given the following code snippet before the placeholder: <|code_start|>
def __merge_channel_data(channel, result):
data = get_channel_epg_now(channel)
result.append(data)
def get_channel_epg_now(channel):
url = 'https://apim.oi.net.br/app/oiplay/ummex/v1/epg/{channel}/beforenowandnext?beforeCount=0&nextCount=0&includeCurrentProgram=true'.format(channel=channel)
response = requests.get(url).json()
now = response['schedules'][0]
program = now['program']
title = program['seriesTitle']
series = u" (S" + str(program['seasonNumber']) + u':E' + str(program['episodeNumber']) + u")" if program['programType'] == 'Series' else u''
episode_title = program['title'] + series if 'title' in program and program['title'] != title else ''
studio = response['title']
thumb = None
fanart = None
if 'programImages' in program and len(program['programImages']) > 0:
thumb = next((image['url'] for image in program['programImages'] if image['type'] == 'Thumbnail'), None)
fanart = next((image['url'] for image in program['programImages'] if image['type'] == 'Backdrop'), thumb) or thumb
thumb = thumb or fanart
logo = response['positiveLogoUrl']
cast = [c['name'] for c in program['castMembers']]
<|code_end|>
, predict the next line using imports from the current file:
import requests
import datetime
import player
from resources.lib.modules import workers
from resources.lib.modules import util
from resources.lib.modules import cache
and context including class names, function names, and sometimes code from other files:
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
#
# Path: resources/lib/modules/util.py
# class struct(object):
# def __init__(self, kdict=None):
# def __repr__(self):
# def __getattr__(self, name):
# def __len__(self):
# def get(self, key):
# def getMPAAFromCI(ci):
# def getBestBitrateUrl(plugin, streams):
# def merge_dicts(x, *argv):
# def slugify(string):
# def unescape(text):
# def fixup(m):
# def time_format(time_str=None, input_format=None):
# def get_signed_hashes(a):
# def J(a):
# def K(a):
# def L():
# def M(a):
# def N():
# def O(a):
# def P(a):
# def get_utc_delta():
# def strptime(date_string, format):
# def strptime_workaround(date_string, format='%Y-%m-%dT%H:%M:%S'):
# def get_total_seconds(timedelta):
# def get_total_seconds_float(timedelta):
# def get_total_hours(timedelta):
# def add_url_parameters(url, params):
# def is_number(s):
# G = 3600
# H = "=0xAC10FD"
#
# Path: resources/lib/modules/cache.py
# def get(function, timeout_hour, *args, **kargs):
# def clear_item(function, *args, **kargs):
# def __get_from_cache(dbcur, table, f, a, timeout_hour):
# def __execute_origin(dbcur, dbcon, function, table, f, a, response, *args, **kargs):
# def delete_file():
# def clear(table=None):
. Output only the next line. | date = util.strptime(now['startTimeUtc'], '%Y-%m-%dT%H:%M:%SZ') + util.get_utc_delta() |
Predict the next line for this snippet: <|code_start|> # 'title': episode_title,
'tvshowtitle': title,
'sorttitle': program_name,
'channel_id': response['prgSvcId'],
'dateadded': datetime.datetime.strftime(date, '%Y-%m-%d %H:%M:%S'),
'plot': description,
'tag': tags,
'duration': now['durationSeconds'],
'adult': program['isAdult'],
'cast': cast,
'director': program['directors'],
'genre': program['genres'],
'rating': program['rating'],
'year': program['releaseYear'],
'episode': program['episodeNumber'] if program['episodeNumber'] else None,
'season': program['seasonNumber'] if program['seasonNumber'] else None,
'art': {
'icon': logo,
'thumb': thumb,
'tvshow.poster': thumb,
'clearlogo': logo,
'fanart': fanart
}
}
def get_epg(start, end, channel_map):
start_time = datetime.datetime.strftime(start, '%Y-%m-%dT%H:%M:%SZ')
end_time = datetime.datetime.strftime(end, '%Y-%m-%dT%H:%M:%SZ')
url = 'https://apim.oi.net.br/app/oiplay/ummex/v1/epg?starttime={starttime}&endtime={endtime}&liveSubscriberGroup={channelmap}'.format(starttime=start_time, endtime=end_time, channelmap=channel_map)
<|code_end|>
with the help of current file imports:
import requests
import datetime
import player
from resources.lib.modules import workers
from resources.lib.modules import util
from resources.lib.modules import cache
and context from other files:
# Path: resources/lib/modules/workers.py
# class Thread(threading.Thread):
# class Workers:
# def __init__(self, target, *args):
# def run(self):
# def get_result(self):
# def globaltrace(self, frame, event, arg):
# def localtrace(self, frame, event, arg):
# def kill(self):
# def __init__(self, concurrent=30, queue_size=None, target=None, expect_result=True):
# def _queue_worker():
# def put(self, item):
# def join(self):
# def terminate(self):
#
# Path: resources/lib/modules/util.py
# class struct(object):
# def __init__(self, kdict=None):
# def __repr__(self):
# def __getattr__(self, name):
# def __len__(self):
# def get(self, key):
# def getMPAAFromCI(ci):
# def getBestBitrateUrl(plugin, streams):
# def merge_dicts(x, *argv):
# def slugify(string):
# def unescape(text):
# def fixup(m):
# def time_format(time_str=None, input_format=None):
# def get_signed_hashes(a):
# def J(a):
# def K(a):
# def L():
# def M(a):
# def N():
# def O(a):
# def P(a):
# def get_utc_delta():
# def strptime(date_string, format):
# def strptime_workaround(date_string, format='%Y-%m-%dT%H:%M:%S'):
# def get_total_seconds(timedelta):
# def get_total_seconds_float(timedelta):
# def get_total_hours(timedelta):
# def add_url_parameters(url, params):
# def is_number(s):
# G = 3600
# H = "=0xAC10FD"
#
# Path: resources/lib/modules/cache.py
# def get(function, timeout_hour, *args, **kargs):
# def clear_item(function, *args, **kargs):
# def __get_from_cache(dbcur, table, f, a, timeout_hour):
# def __execute_origin(dbcur, dbcon, function, table, f, a, response, *args, **kargs):
# def delete_file():
# def clear(table=None):
, which may contain function names, class names, or code. Output only the next line. | epg = cache.get(requests.get, 20, url, table='oiplay').json() |
Given snippet: <|code_start|> verbose_name='分类等级')
subset = models.ManyToManyField('self', blank=True, verbose_name='分类关系')
class Meta:
verbose_name = '视频分类'
verbose_name_plural = '视频分类管理'
def __str__(self):
base_name = self.name + str(' (level %d)' % (self.level))
if self.subset.first() and self.level == 2:
return '--'.join([self.subset.first().name, base_name])
else:
return base_name
def save(self, *args, **kwargs):
super(VideoCategory, self).save(*args, **kwargs)
def colored_level(self):
color_code = 'red' if self.level == 1 else 'green'
return format_html(
'<span style="color:{};">{}</span>',
color_code,
self.get_level_display()
)
colored_level.short_description = '分级'
# ---------------------------------------------------------------------
class MultipleUpload(models.Model):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import os
import datetime
import six
import humanfriendly
from pathlib import Path
from django.db import models
from django.utils.html import format_html
from django.utils.encoding import uri_to_iri
from django.core.management import call_command
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_init, post_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from django.core.files import File
from sortedm2m.fields import SortedManyToManyField
from uuslug import uuslug
from .my_storage import VodStorage
from admin_resumable.fields import (
ModelAdminResumableFileField, ModelAdminResumableImageField,
ModelAdminResumableMultiFileField, ModelAdminResumableRestoreFileField
)
from xpinyin import Pinyin # for pinyin search
from django.utils.encoding import smart_str
from django.utils.encoding import smart_unicode as smart_str
and context:
# Path: vodmanagement/my_storage.py
# class VodStorage(FileSystemStorage):
# """
# Returns same name for existing file and deletes existing file on save.
# """
#
# def _save(self, name, content):
# if self.exists(name):
# self.delete(name)
# return super(VodStorage, self)._save(name, content)
#
# def get_available_name(self, name, max_length=None):
# return name
#
# Path: admin_resumable/fields.py
# class ModelAdminResumableFileField(models.FileField):
# save_model = False
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# self.orig_upload_to = upload_to
# super(ModelAdminResumableFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableFileField, self).formfield(**kwargs)
#
# class ModelAdminResumableImageField(models.ImageField):
# # orig_upload_to = ''
# save_model = False
#
# def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
# self.orig_upload_to = ''
# super(ModelAdminResumableImageField, self).__init__(verbose_name, name, width_field, height_field, **kwargs)
#
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableImageField, self).formfield(**kwargs)
#
# class ModelAdminResumableMultiFileField(models.FileField):
# save_model = True
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# self.orig_upload_to = upload_to
# super(ModelAdminResumableMultiFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableMultiFileField, self).formfield(**kwargs)
#
# class ModelAdminResumableRestoreFileField(models.FileField):
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# super(ModelAdminResumableRestoreFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminRestoreFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableRestoreFileField, self).formfield(**kwargs)
which might include code, classes, or functions. Output only the next line. | files = ModelAdminResumableMultiFileField(null=True, blank=True, storage=VodStorage(), verbose_name='文件') |
Next line prediction: <|code_start|> verbose_name='分类等级')
subset = models.ManyToManyField('self', blank=True, verbose_name='分类关系')
class Meta:
verbose_name = '视频分类'
verbose_name_plural = '视频分类管理'
def __str__(self):
base_name = self.name + str(' (level %d)' % (self.level))
if self.subset.first() and self.level == 2:
return '--'.join([self.subset.first().name, base_name])
else:
return base_name
def save(self, *args, **kwargs):
super(VideoCategory, self).save(*args, **kwargs)
def colored_level(self):
color_code = 'red' if self.level == 1 else 'green'
return format_html(
'<span style="color:{};">{}</span>',
color_code,
self.get_level_display()
)
colored_level.short_description = '分级'
# ---------------------------------------------------------------------
class MultipleUpload(models.Model):
<|code_end|>
. Use current file imports:
(import logging
import os
import datetime
import six
import humanfriendly
from pathlib import Path
from django.db import models
from django.utils.html import format_html
from django.utils.encoding import uri_to_iri
from django.core.management import call_command
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_init, post_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from django.core.files import File
from sortedm2m.fields import SortedManyToManyField
from uuslug import uuslug
from .my_storage import VodStorage
from admin_resumable.fields import (
ModelAdminResumableFileField, ModelAdminResumableImageField,
ModelAdminResumableMultiFileField, ModelAdminResumableRestoreFileField
)
from xpinyin import Pinyin # for pinyin search
from django.utils.encoding import smart_str
from django.utils.encoding import smart_unicode as smart_str)
and context including class names, function names, or small code snippets from other files:
# Path: vodmanagement/my_storage.py
# class VodStorage(FileSystemStorage):
# """
# Returns same name for existing file and deletes existing file on save.
# """
#
# def _save(self, name, content):
# if self.exists(name):
# self.delete(name)
# return super(VodStorage, self)._save(name, content)
#
# def get_available_name(self, name, max_length=None):
# return name
#
# Path: admin_resumable/fields.py
# class ModelAdminResumableFileField(models.FileField):
# save_model = False
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# self.orig_upload_to = upload_to
# super(ModelAdminResumableFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableFileField, self).formfield(**kwargs)
#
# class ModelAdminResumableImageField(models.ImageField):
# # orig_upload_to = ''
# save_model = False
#
# def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
# self.orig_upload_to = ''
# super(ModelAdminResumableImageField, self).__init__(verbose_name, name, width_field, height_field, **kwargs)
#
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableImageField, self).formfield(**kwargs)
#
# class ModelAdminResumableMultiFileField(models.FileField):
# save_model = True
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# self.orig_upload_to = upload_to
# super(ModelAdminResumableMultiFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableMultiFileField, self).formfield(**kwargs)
#
# class ModelAdminResumableRestoreFileField(models.FileField):
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# super(ModelAdminResumableRestoreFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminRestoreFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableRestoreFileField, self).formfield(**kwargs)
. Output only the next line. | files = ModelAdminResumableMultiFileField(null=True, blank=True, storage=VodStorage(), verbose_name='文件') |
Given the following code snippet before the placeholder: <|code_start|> '<span style="color:{};">{}</span>',
color_code,
self.get_level_display()
)
colored_level.short_description = '分级'
# ---------------------------------------------------------------------
class MultipleUpload(models.Model):
files = ModelAdminResumableMultiFileField(null=True, blank=True, storage=VodStorage(), verbose_name='文件')
save_path = models.CharField(max_length=128, blank=False, null=True, verbose_name='保存路径')
category = models.ForeignKey(VideoCategory, null=True, verbose_name='分类')
class Meta:
verbose_name = '批量上传'
verbose_name_plural = '批量上传管理'
# ---------------------------------------------------------------------
# TODO(hhy): Please Leave This Model Here. It Will Be Use In The Future.
# class VideoTag(models.Model):
# name = models.CharField(max_length=200, null=False, blank=False)
#
# def __str__(self):
# return self.name
class Restore(models.Model):
txt_file = models.FileField(blank=True, null=True, verbose_name='备份配置文件')
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
import datetime
import six
import humanfriendly
from pathlib import Path
from django.db import models
from django.utils.html import format_html
from django.utils.encoding import uri_to_iri
from django.core.management import call_command
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_init, post_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from django.core.files import File
from sortedm2m.fields import SortedManyToManyField
from uuslug import uuslug
from .my_storage import VodStorage
from admin_resumable.fields import (
ModelAdminResumableFileField, ModelAdminResumableImageField,
ModelAdminResumableMultiFileField, ModelAdminResumableRestoreFileField
)
from xpinyin import Pinyin # for pinyin search
from django.utils.encoding import smart_str
from django.utils.encoding import smart_unicode as smart_str
and context including class names, function names, and sometimes code from other files:
# Path: vodmanagement/my_storage.py
# class VodStorage(FileSystemStorage):
# """
# Returns same name for existing file and deletes existing file on save.
# """
#
# def _save(self, name, content):
# if self.exists(name):
# self.delete(name)
# return super(VodStorage, self)._save(name, content)
#
# def get_available_name(self, name, max_length=None):
# return name
#
# Path: admin_resumable/fields.py
# class ModelAdminResumableFileField(models.FileField):
# save_model = False
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# self.orig_upload_to = upload_to
# super(ModelAdminResumableFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableFileField, self).formfield(**kwargs)
#
# class ModelAdminResumableImageField(models.ImageField):
# # orig_upload_to = ''
# save_model = False
#
# def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
# self.orig_upload_to = ''
# super(ModelAdminResumableImageField, self).__init__(verbose_name, name, width_field, height_field, **kwargs)
#
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableImageField, self).formfield(**kwargs)
#
# class ModelAdminResumableMultiFileField(models.FileField):
# save_model = True
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# self.orig_upload_to = upload_to
# super(ModelAdminResumableMultiFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableMultiFileField, self).formfield(**kwargs)
#
# class ModelAdminResumableRestoreFileField(models.FileField):
#
# def __init__(self, verbose_name=None, name=None, upload_to='',
# storage=None, **kwargs):
# super(ModelAdminResumableRestoreFileField, self).__init__(
# verbose_name, name, 'unused', **kwargs)
#
# def formfield(self, **kwargs):
# content_type_id = ContentType.objects.get_for_model(self.model).id
# defaults = {
# 'form_class': FormAdminResumableFileField,
# 'widget': AdminRestoreFileResumableWidget(attrs={
# 'content_type_id': content_type_id,
# 'field_name': self.name})
# }
# kwargs.update(defaults)
# return super(ModelAdminResumableRestoreFileField, self).formfield(**kwargs)
. Output only the next line. | zip_file = ModelAdminResumableRestoreFileField(null=True, blank=True, storage=VodStorage(), verbose_name='压缩包') |
Given the code snippet: <|code_start|>
cache_time = 10
urlpatterns = [
url(r'^$', cache_page(cache_time)(VodListAPIView.as_view()), name='list'),
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from .views import (
VodListAPIView, HomeListAPIView, HomeOverViewAPIView,CategoryListAPIView,
YearListAPIView, RegionListAPIView, VodDetailAPIView
)
and context (functions, classes, or occasionally code) from other files:
# Path: vodmanagement/api/views.py
# class VodListAPIView(ListAPIView):
# """
# VodListAPIView doc
# """
# serializer_class = VodListSerializer
# permission_classes = [AllowAny]
# pagination_class = VodPageNumberPagination # PageNumberPagination
#
# def get_queryset(self, *args, **kwargs):
# main_category = self.request.query_params.get('main_category')
# queryset_list = get_all_videos(main_category)
# category = checked_query_param(self.request.query_params.get('category'))
# year = checked_query_param(self.request.query_params.get('year'))
# region = checked_query_param(self.request.query_params.get('region'))
# queryset_list = get_filter_videos(queryset_list, category=category, year=year, region=region)
# search = self.request.GET.get("search")
# if search is not None and search != '':
# queryset_list = queryset_list.filter(
# Q(title__icontains=search) |
# Q(description__icontains=search)
# )
# return queryset_list
#
# class HomeListAPIView(APIView):
# """
# HomeListAPIView doc
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# preview_categories = {}
# videos = get_random_videos()
# preview_categories['count'] = len(videos)
# preview_categories['videos'] = VodHomeListSerializer(videos, many=True).data
# return Response(preview_categories)
#
# class HomeOverViewAPIView(APIView):
# """
# 展示在主页的每个一级分类下最新的N个视频,分类名称通过GET请求中的category参数确定,N通过GET请求中的length参数确定
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# category = self.request.query_params.get('category')
# length = self.request.query_params.get('length')
# try:
# if category is not None:
# if length is None: length = 4
# videos = get_all_videos(None).filter(
# Q(category__subset__name=category) |
# Q(category__name=category)
# )[:int(length)]
# if not videos:
# raise ValueError('视频列表为空,请检查分类名称')
# overview_videos = VodListSerializer(videos, many=True).data
# return Response(overview_videos)
# else:
# raise ValueError('缺少分类名称参数category')
# except Exception as e:
# return Response({
# 'error': str(e)
# })
#
# class CategoryListAPIView(APIView):
# """
# CategoryListAPIView doc
# """
# serializer_class = CategoryListSerializer
# permission_classes = [AllowAny]
# queryset = VideoCategory.objects.all()
#
# def get(self, request, format=None):
# return Response(gen_categories())
#
# class YearListAPIView(APIView):
# permission_classes = [AllowAny]
#
# def get(self, *args, **kwargs):
# main_category = self.request.query_params.get('category')
# if main_category is None:
# Response('Error, the year list request must contain 1 first level category parameter.')
# year_list = get_years(main_category)
# return Response(year_list)
#
# class RegionListAPIView(ListAPIView):
# permission_classes = [AllowAny]
# serializer_class = RegionListSerializer
#
# def get_queryset(self, *args, **kwargs):
# return VideoRegion.objects.all()
#
# class VodDetailAPIView(RetrieveAPIView):
# """
# VodDetailAPIView doc
#
# """
# lookup_field = 'id'
# serializer_class = VodDetailSerializer
# permission_classes = [HasPermission]
#
# def get_queryset(self, *args, **kwargs):
# query_set = Vod.objects.filter(active=1)
# return query_set
. Output only the next line. | url(r'^home/$', cache_page(cache_time)(HomeListAPIView.as_view()), name='home'), |
Here is a snippet: <|code_start|>
cache_time = 10
urlpatterns = [
url(r'^$', cache_page(cache_time)(VodListAPIView.as_view()), name='list'),
url(r'^home/$', cache_page(cache_time)(HomeListAPIView.as_view()), name='home'),
<|code_end|>
. Write the next line using the current file imports:
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from .views import (
VodListAPIView, HomeListAPIView, HomeOverViewAPIView,CategoryListAPIView,
YearListAPIView, RegionListAPIView, VodDetailAPIView
)
and context from other files:
# Path: vodmanagement/api/views.py
# class VodListAPIView(ListAPIView):
# """
# VodListAPIView doc
# """
# serializer_class = VodListSerializer
# permission_classes = [AllowAny]
# pagination_class = VodPageNumberPagination # PageNumberPagination
#
# def get_queryset(self, *args, **kwargs):
# main_category = self.request.query_params.get('main_category')
# queryset_list = get_all_videos(main_category)
# category = checked_query_param(self.request.query_params.get('category'))
# year = checked_query_param(self.request.query_params.get('year'))
# region = checked_query_param(self.request.query_params.get('region'))
# queryset_list = get_filter_videos(queryset_list, category=category, year=year, region=region)
# search = self.request.GET.get("search")
# if search is not None and search != '':
# queryset_list = queryset_list.filter(
# Q(title__icontains=search) |
# Q(description__icontains=search)
# )
# return queryset_list
#
# class HomeListAPIView(APIView):
# """
# HomeListAPIView doc
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# preview_categories = {}
# videos = get_random_videos()
# preview_categories['count'] = len(videos)
# preview_categories['videos'] = VodHomeListSerializer(videos, many=True).data
# return Response(preview_categories)
#
# class HomeOverViewAPIView(APIView):
# """
# 展示在主页的每个一级分类下最新的N个视频,分类名称通过GET请求中的category参数确定,N通过GET请求中的length参数确定
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# category = self.request.query_params.get('category')
# length = self.request.query_params.get('length')
# try:
# if category is not None:
# if length is None: length = 4
# videos = get_all_videos(None).filter(
# Q(category__subset__name=category) |
# Q(category__name=category)
# )[:int(length)]
# if not videos:
# raise ValueError('视频列表为空,请检查分类名称')
# overview_videos = VodListSerializer(videos, many=True).data
# return Response(overview_videos)
# else:
# raise ValueError('缺少分类名称参数category')
# except Exception as e:
# return Response({
# 'error': str(e)
# })
#
# class CategoryListAPIView(APIView):
# """
# CategoryListAPIView doc
# """
# serializer_class = CategoryListSerializer
# permission_classes = [AllowAny]
# queryset = VideoCategory.objects.all()
#
# def get(self, request, format=None):
# return Response(gen_categories())
#
# class YearListAPIView(APIView):
# permission_classes = [AllowAny]
#
# def get(self, *args, **kwargs):
# main_category = self.request.query_params.get('category')
# if main_category is None:
# Response('Error, the year list request must contain 1 first level category parameter.')
# year_list = get_years(main_category)
# return Response(year_list)
#
# class RegionListAPIView(ListAPIView):
# permission_classes = [AllowAny]
# serializer_class = RegionListSerializer
#
# def get_queryset(self, *args, **kwargs):
# return VideoRegion.objects.all()
#
# class VodDetailAPIView(RetrieveAPIView):
# """
# VodDetailAPIView doc
#
# """
# lookup_field = 'id'
# serializer_class = VodDetailSerializer
# permission_classes = [HasPermission]
#
# def get_queryset(self, *args, **kwargs):
# query_set = Vod.objects.filter(active=1)
# return query_set
, which may include functions, classes, or code. Output only the next line. | url(r'^home/overview$', cache_page(cache_time)(HomeOverViewAPIView.as_view()), name='home_overview'), |
Predict the next line after this snippet: <|code_start|>
cache_time = 10
urlpatterns = [
url(r'^$', cache_page(cache_time)(VodListAPIView.as_view()), name='list'),
url(r'^home/$', cache_page(cache_time)(HomeListAPIView.as_view()), name='home'),
url(r'^home/overview$', cache_page(cache_time)(HomeOverViewAPIView.as_view()), name='home_overview'),
<|code_end|>
using the current file's imports:
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from .views import (
VodListAPIView, HomeListAPIView, HomeOverViewAPIView,CategoryListAPIView,
YearListAPIView, RegionListAPIView, VodDetailAPIView
)
and any relevant context from other files:
# Path: vodmanagement/api/views.py
# class VodListAPIView(ListAPIView):
# """
# VodListAPIView doc
# """
# serializer_class = VodListSerializer
# permission_classes = [AllowAny]
# pagination_class = VodPageNumberPagination # PageNumberPagination
#
# def get_queryset(self, *args, **kwargs):
# main_category = self.request.query_params.get('main_category')
# queryset_list = get_all_videos(main_category)
# category = checked_query_param(self.request.query_params.get('category'))
# year = checked_query_param(self.request.query_params.get('year'))
# region = checked_query_param(self.request.query_params.get('region'))
# queryset_list = get_filter_videos(queryset_list, category=category, year=year, region=region)
# search = self.request.GET.get("search")
# if search is not None and search != '':
# queryset_list = queryset_list.filter(
# Q(title__icontains=search) |
# Q(description__icontains=search)
# )
# return queryset_list
#
# class HomeListAPIView(APIView):
# """
# HomeListAPIView doc
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# preview_categories = {}
# videos = get_random_videos()
# preview_categories['count'] = len(videos)
# preview_categories['videos'] = VodHomeListSerializer(videos, many=True).data
# return Response(preview_categories)
#
# class HomeOverViewAPIView(APIView):
# """
# 展示在主页的每个一级分类下最新的N个视频,分类名称通过GET请求中的category参数确定,N通过GET请求中的length参数确定
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# category = self.request.query_params.get('category')
# length = self.request.query_params.get('length')
# try:
# if category is not None:
# if length is None: length = 4
# videos = get_all_videos(None).filter(
# Q(category__subset__name=category) |
# Q(category__name=category)
# )[:int(length)]
# if not videos:
# raise ValueError('视频列表为空,请检查分类名称')
# overview_videos = VodListSerializer(videos, many=True).data
# return Response(overview_videos)
# else:
# raise ValueError('缺少分类名称参数category')
# except Exception as e:
# return Response({
# 'error': str(e)
# })
#
# class CategoryListAPIView(APIView):
# """
# CategoryListAPIView doc
# """
# serializer_class = CategoryListSerializer
# permission_classes = [AllowAny]
# queryset = VideoCategory.objects.all()
#
# def get(self, request, format=None):
# return Response(gen_categories())
#
# class YearListAPIView(APIView):
# permission_classes = [AllowAny]
#
# def get(self, *args, **kwargs):
# main_category = self.request.query_params.get('category')
# if main_category is None:
# Response('Error, the year list request must contain 1 first level category parameter.')
# year_list = get_years(main_category)
# return Response(year_list)
#
# class RegionListAPIView(ListAPIView):
# permission_classes = [AllowAny]
# serializer_class = RegionListSerializer
#
# def get_queryset(self, *args, **kwargs):
# return VideoRegion.objects.all()
#
# class VodDetailAPIView(RetrieveAPIView):
# """
# VodDetailAPIView doc
#
# """
# lookup_field = 'id'
# serializer_class = VodDetailSerializer
# permission_classes = [HasPermission]
#
# def get_queryset(self, *args, **kwargs):
# query_set = Vod.objects.filter(active=1)
# return query_set
. Output only the next line. | url(r'^category/$', cache_page(cache_time)(CategoryListAPIView.as_view()), name='category'), |
Given the code snippet: <|code_start|>
cache_time = 10
urlpatterns = [
url(r'^$', cache_page(cache_time)(VodListAPIView.as_view()), name='list'),
url(r'^home/$', cache_page(cache_time)(HomeListAPIView.as_view()), name='home'),
url(r'^home/overview$', cache_page(cache_time)(HomeOverViewAPIView.as_view()), name='home_overview'),
url(r'^category/$', cache_page(cache_time)(CategoryListAPIView.as_view()), name='category'),
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from .views import (
VodListAPIView, HomeListAPIView, HomeOverViewAPIView,CategoryListAPIView,
YearListAPIView, RegionListAPIView, VodDetailAPIView
)
and context (functions, classes, or occasionally code) from other files:
# Path: vodmanagement/api/views.py
# class VodListAPIView(ListAPIView):
# """
# VodListAPIView doc
# """
# serializer_class = VodListSerializer
# permission_classes = [AllowAny]
# pagination_class = VodPageNumberPagination # PageNumberPagination
#
# def get_queryset(self, *args, **kwargs):
# main_category = self.request.query_params.get('main_category')
# queryset_list = get_all_videos(main_category)
# category = checked_query_param(self.request.query_params.get('category'))
# year = checked_query_param(self.request.query_params.get('year'))
# region = checked_query_param(self.request.query_params.get('region'))
# queryset_list = get_filter_videos(queryset_list, category=category, year=year, region=region)
# search = self.request.GET.get("search")
# if search is not None and search != '':
# queryset_list = queryset_list.filter(
# Q(title__icontains=search) |
# Q(description__icontains=search)
# )
# return queryset_list
#
# class HomeListAPIView(APIView):
# """
# HomeListAPIView doc
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# preview_categories = {}
# videos = get_random_videos()
# preview_categories['count'] = len(videos)
# preview_categories['videos'] = VodHomeListSerializer(videos, many=True).data
# return Response(preview_categories)
#
# class HomeOverViewAPIView(APIView):
# """
# 展示在主页的每个一级分类下最新的N个视频,分类名称通过GET请求中的category参数确定,N通过GET请求中的length参数确定
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# category = self.request.query_params.get('category')
# length = self.request.query_params.get('length')
# try:
# if category is not None:
# if length is None: length = 4
# videos = get_all_videos(None).filter(
# Q(category__subset__name=category) |
# Q(category__name=category)
# )[:int(length)]
# if not videos:
# raise ValueError('视频列表为空,请检查分类名称')
# overview_videos = VodListSerializer(videos, many=True).data
# return Response(overview_videos)
# else:
# raise ValueError('缺少分类名称参数category')
# except Exception as e:
# return Response({
# 'error': str(e)
# })
#
# class CategoryListAPIView(APIView):
# """
# CategoryListAPIView doc
# """
# serializer_class = CategoryListSerializer
# permission_classes = [AllowAny]
# queryset = VideoCategory.objects.all()
#
# def get(self, request, format=None):
# return Response(gen_categories())
#
# class YearListAPIView(APIView):
# permission_classes = [AllowAny]
#
# def get(self, *args, **kwargs):
# main_category = self.request.query_params.get('category')
# if main_category is None:
# Response('Error, the year list request must contain 1 first level category parameter.')
# year_list = get_years(main_category)
# return Response(year_list)
#
# class RegionListAPIView(ListAPIView):
# permission_classes = [AllowAny]
# serializer_class = RegionListSerializer
#
# def get_queryset(self, *args, **kwargs):
# return VideoRegion.objects.all()
#
# class VodDetailAPIView(RetrieveAPIView):
# """
# VodDetailAPIView doc
#
# """
# lookup_field = 'id'
# serializer_class = VodDetailSerializer
# permission_classes = [HasPermission]
#
# def get_queryset(self, *args, **kwargs):
# query_set = Vod.objects.filter(active=1)
# return query_set
. Output only the next line. | url(r'^year/$', cache_page(cache_time)(YearListAPIView.as_view()), name='year'), |
Given the following code snippet before the placeholder: <|code_start|>
cache_time = 10
urlpatterns = [
url(r'^$', cache_page(cache_time)(VodListAPIView.as_view()), name='list'),
url(r'^home/$', cache_page(cache_time)(HomeListAPIView.as_view()), name='home'),
url(r'^home/overview$', cache_page(cache_time)(HomeOverViewAPIView.as_view()), name='home_overview'),
url(r'^category/$', cache_page(cache_time)(CategoryListAPIView.as_view()), name='category'),
url(r'^year/$', cache_page(cache_time)(YearListAPIView.as_view()), name='year'),
<|code_end|>
, predict the next line using imports from the current file:
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from .views import (
VodListAPIView, HomeListAPIView, HomeOverViewAPIView,CategoryListAPIView,
YearListAPIView, RegionListAPIView, VodDetailAPIView
)
and context including class names, function names, and sometimes code from other files:
# Path: vodmanagement/api/views.py
# class VodListAPIView(ListAPIView):
# """
# VodListAPIView doc
# """
# serializer_class = VodListSerializer
# permission_classes = [AllowAny]
# pagination_class = VodPageNumberPagination # PageNumberPagination
#
# def get_queryset(self, *args, **kwargs):
# main_category = self.request.query_params.get('main_category')
# queryset_list = get_all_videos(main_category)
# category = checked_query_param(self.request.query_params.get('category'))
# year = checked_query_param(self.request.query_params.get('year'))
# region = checked_query_param(self.request.query_params.get('region'))
# queryset_list = get_filter_videos(queryset_list, category=category, year=year, region=region)
# search = self.request.GET.get("search")
# if search is not None and search != '':
# queryset_list = queryset_list.filter(
# Q(title__icontains=search) |
# Q(description__icontains=search)
# )
# return queryset_list
#
# class HomeListAPIView(APIView):
# """
# HomeListAPIView doc
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# preview_categories = {}
# videos = get_random_videos()
# preview_categories['count'] = len(videos)
# preview_categories['videos'] = VodHomeListSerializer(videos, many=True).data
# return Response(preview_categories)
#
# class HomeOverViewAPIView(APIView):
# """
# 展示在主页的每个一级分类下最新的N个视频,分类名称通过GET请求中的category参数确定,N通过GET请求中的length参数确定
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# category = self.request.query_params.get('category')
# length = self.request.query_params.get('length')
# try:
# if category is not None:
# if length is None: length = 4
# videos = get_all_videos(None).filter(
# Q(category__subset__name=category) |
# Q(category__name=category)
# )[:int(length)]
# if not videos:
# raise ValueError('视频列表为空,请检查分类名称')
# overview_videos = VodListSerializer(videos, many=True).data
# return Response(overview_videos)
# else:
# raise ValueError('缺少分类名称参数category')
# except Exception as e:
# return Response({
# 'error': str(e)
# })
#
# class CategoryListAPIView(APIView):
# """
# CategoryListAPIView doc
# """
# serializer_class = CategoryListSerializer
# permission_classes = [AllowAny]
# queryset = VideoCategory.objects.all()
#
# def get(self, request, format=None):
# return Response(gen_categories())
#
# class YearListAPIView(APIView):
# permission_classes = [AllowAny]
#
# def get(self, *args, **kwargs):
# main_category = self.request.query_params.get('category')
# if main_category is None:
# Response('Error, the year list request must contain 1 first level category parameter.')
# year_list = get_years(main_category)
# return Response(year_list)
#
# class RegionListAPIView(ListAPIView):
# permission_classes = [AllowAny]
# serializer_class = RegionListSerializer
#
# def get_queryset(self, *args, **kwargs):
# return VideoRegion.objects.all()
#
# class VodDetailAPIView(RetrieveAPIView):
# """
# VodDetailAPIView doc
#
# """
# lookup_field = 'id'
# serializer_class = VodDetailSerializer
# permission_classes = [HasPermission]
#
# def get_queryset(self, *args, **kwargs):
# query_set = Vod.objects.filter(active=1)
# return query_set
. Output only the next line. | url(r'^region/$', cache_page(cache_time)(RegionListAPIView.as_view()), name='region'), |
Given the following code snippet before the placeholder: <|code_start|>
cache_time = 10
urlpatterns = [
url(r'^$', cache_page(cache_time)(VodListAPIView.as_view()), name='list'),
url(r'^home/$', cache_page(cache_time)(HomeListAPIView.as_view()), name='home'),
url(r'^home/overview$', cache_page(cache_time)(HomeOverViewAPIView.as_view()), name='home_overview'),
url(r'^category/$', cache_page(cache_time)(CategoryListAPIView.as_view()), name='category'),
url(r'^year/$', cache_page(cache_time)(YearListAPIView.as_view()), name='year'),
url(r'^region/$', cache_page(cache_time)(RegionListAPIView.as_view()), name='region'),
<|code_end|>
, predict the next line using imports from the current file:
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from .views import (
VodListAPIView, HomeListAPIView, HomeOverViewAPIView,CategoryListAPIView,
YearListAPIView, RegionListAPIView, VodDetailAPIView
)
and context including class names, function names, and sometimes code from other files:
# Path: vodmanagement/api/views.py
# class VodListAPIView(ListAPIView):
# """
# VodListAPIView doc
# """
# serializer_class = VodListSerializer
# permission_classes = [AllowAny]
# pagination_class = VodPageNumberPagination # PageNumberPagination
#
# def get_queryset(self, *args, **kwargs):
# main_category = self.request.query_params.get('main_category')
# queryset_list = get_all_videos(main_category)
# category = checked_query_param(self.request.query_params.get('category'))
# year = checked_query_param(self.request.query_params.get('year'))
# region = checked_query_param(self.request.query_params.get('region'))
# queryset_list = get_filter_videos(queryset_list, category=category, year=year, region=region)
# search = self.request.GET.get("search")
# if search is not None and search != '':
# queryset_list = queryset_list.filter(
# Q(title__icontains=search) |
# Q(description__icontains=search)
# )
# return queryset_list
#
# class HomeListAPIView(APIView):
# """
# HomeListAPIView doc
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# preview_categories = {}
# videos = get_random_videos()
# preview_categories['count'] = len(videos)
# preview_categories['videos'] = VodHomeListSerializer(videos, many=True).data
# return Response(preview_categories)
#
# class HomeOverViewAPIView(APIView):
# """
# 展示在主页的每个一级分类下最新的N个视频,分类名称通过GET请求中的category参数确定,N通过GET请求中的length参数确定
# """
# permission_classes = [AllowAny]
#
# def get(self, request, format=None):
# category = self.request.query_params.get('category')
# length = self.request.query_params.get('length')
# try:
# if category is not None:
# if length is None: length = 4
# videos = get_all_videos(None).filter(
# Q(category__subset__name=category) |
# Q(category__name=category)
# )[:int(length)]
# if not videos:
# raise ValueError('视频列表为空,请检查分类名称')
# overview_videos = VodListSerializer(videos, many=True).data
# return Response(overview_videos)
# else:
# raise ValueError('缺少分类名称参数category')
# except Exception as e:
# return Response({
# 'error': str(e)
# })
#
# class CategoryListAPIView(APIView):
# """
# CategoryListAPIView doc
# """
# serializer_class = CategoryListSerializer
# permission_classes = [AllowAny]
# queryset = VideoCategory.objects.all()
#
# def get(self, request, format=None):
# return Response(gen_categories())
#
# class YearListAPIView(APIView):
# permission_classes = [AllowAny]
#
# def get(self, *args, **kwargs):
# main_category = self.request.query_params.get('category')
# if main_category is None:
# Response('Error, the year list request must contain 1 first level category parameter.')
# year_list = get_years(main_category)
# return Response(year_list)
#
# class RegionListAPIView(ListAPIView):
# permission_classes = [AllowAny]
# serializer_class = RegionListSerializer
#
# def get_queryset(self, *args, **kwargs):
# return VideoRegion.objects.all()
#
# class VodDetailAPIView(RetrieveAPIView):
# """
# VodDetailAPIView doc
#
# """
# lookup_field = 'id'
# serializer_class = VodDetailSerializer
# permission_classes = [HasPermission]
#
# def get_queryset(self, *args, **kwargs):
# query_set = Vod.objects.filter(active=1)
# return query_set
. Output only the next line. | url(r'^(?P<id>[\w-]+)/$', cache_page(cache_time)(VodDetailAPIView.as_view()), name='detail'), |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.