Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Next line prediction: <|code_start|>"""SPADL schema for Wyscout data."""
class WyscoutCompetitionSchema(CompetitionSchema):
"""Definition of a dataframe containing a list of competitions and seasons."""
country_name: Series[str]
competition_gender: Series[str]
class WyscoutGameSchema(GameSchema):
"""Definition of a dataframe containing a list of games."""
<|code_end|>
. Use current file imports:
(import pandera as pa
from pandera.typing import DateTime, Object, Series
from socceraction.data.schema import (
CompetitionSchema,
EventSchema,
GameSchema,
PlayerSchema,
TeamSchema,
))
and context including class names, function names, or small code snippets from other files:
# Path: socceraction/data/schema.py
# class CompetitionSchema(pa.SchemaModel):
# """Definition of a dataframe containing a list of competitions and seasons."""
#
# season_id: Series[Object] = pa.Field()
# """The unique identifier for the season."""
# season_name: Series[str] = pa.Field()
# """The name of the season."""
# competition_id: Series[Object] = pa.Field()
# """The unique identifier for the competition."""
# competition_name: Series[str] = pa.Field()
# """The name of the competition."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class EventSchema(pa.SchemaModel):
# """Definition of a dataframe containing event stream data of a game."""
#
# game_id: Series[Object] = pa.Field()
# """The unique identifier for the game."""
# event_id: Series[Object] = pa.Field()
# """The unique identifier for the event."""
# period_id: Series[int] = pa.Field()
# """The unique identifier for the part of the game in which the event took place."""
# team_id: Series[Object] = pa.Field(nullable=True)
# """The unique identifier for the team this event relates to."""
# player_id: Series[Object] = pa.Field(nullable=True)
# """The unique identifier for the player this event relates to."""
# type_id: Series[int] = pa.Field()
# """The unique identifier for the type of this event."""
# type_name: Series[str] = pa.Field()
# """The name of the type of this event."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class GameSchema(pa.SchemaModel):
# """Definition of a dataframe containing a list of games."""
#
# game_id: Series[Object] = pa.Field()
# """The unique identifier for the game."""
# season_id: Series[Object] = pa.Field()
# """The unique identifier for the season."""
# competition_id: Series[Object] = pa.Field()
# """The unique identifier for the competition."""
# game_day: Series[pd.Int64Dtype] = pa.Field(nullable=True)
# """Number corresponding to the weeks or rounds into the competition this game is."""
# game_date: Series[DateTime] = pa.Field()
# """The date when the game was played."""
# home_team_id: Series[Object] = pa.Field()
# """The unique identifier for the home team in this game."""
# away_team_id: Series[Object] = pa.Field()
# """The unique identifier for the away team in this game."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class PlayerSchema(pa.SchemaModel):
# """Definition of a dataframe containing the list of players on the teamsheet of a game."""
#
# game_id: Series[Object] = pa.Field()
# """The unique identifier for the game."""
# team_id: Series[Object] = pa.Field()
# """The unique identifier for the player's team."""
# player_id: Series[Object] = pa.Field()
# """The unique identifier for the player."""
# player_name: Series[str] = pa.Field()
# """The name of the player."""
# is_starter: Series[bool] = pa.Field()
# """Whether the player is in the starting lineup."""
# minutes_played: Series[int] = pa.Field()
# """The number of minutes the player played in the game."""
# jersey_number: Series[int] = pa.Field()
# """The player's jersey number."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class TeamSchema(pa.SchemaModel):
# """Definition of a dataframe containing the list of teams of a game."""
#
# team_id: Series[Object] = pa.Field()
# """The unique identifier for the team."""
# team_name: Series[str] = pa.Field()
# """The name of the team."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
. Output only the next line. | class WyscoutPlayerSchema(PlayerSchema): |
Given the code snippet: <|code_start|>"""SPADL schema for Wyscout data."""
class WyscoutCompetitionSchema(CompetitionSchema):
"""Definition of a dataframe containing a list of competitions and seasons."""
country_name: Series[str]
competition_gender: Series[str]
class WyscoutGameSchema(GameSchema):
"""Definition of a dataframe containing a list of games."""
class WyscoutPlayerSchema(PlayerSchema):
"""Definition of a dataframe containing the list of teams of a game."""
firstname: Series[str]
lastname: Series[str]
nickname: Series[str] = pa.Field(nullable=True)
birth_date: Series[DateTime] = pa.Field(nullable=True)
<|code_end|>
, generate the next line using the imports in this file:
import pandera as pa
from pandera.typing import DateTime, Object, Series
from socceraction.data.schema import (
CompetitionSchema,
EventSchema,
GameSchema,
PlayerSchema,
TeamSchema,
)
and context (functions, classes, or occasionally code) from other files:
# Path: socceraction/data/schema.py
# class CompetitionSchema(pa.SchemaModel):
# """Definition of a dataframe containing a list of competitions and seasons."""
#
# season_id: Series[Object] = pa.Field()
# """The unique identifier for the season."""
# season_name: Series[str] = pa.Field()
# """The name of the season."""
# competition_id: Series[Object] = pa.Field()
# """The unique identifier for the competition."""
# competition_name: Series[str] = pa.Field()
# """The name of the competition."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class EventSchema(pa.SchemaModel):
# """Definition of a dataframe containing event stream data of a game."""
#
# game_id: Series[Object] = pa.Field()
# """The unique identifier for the game."""
# event_id: Series[Object] = pa.Field()
# """The unique identifier for the event."""
# period_id: Series[int] = pa.Field()
# """The unique identifier for the part of the game in which the event took place."""
# team_id: Series[Object] = pa.Field(nullable=True)
# """The unique identifier for the team this event relates to."""
# player_id: Series[Object] = pa.Field(nullable=True)
# """The unique identifier for the player this event relates to."""
# type_id: Series[int] = pa.Field()
# """The unique identifier for the type of this event."""
# type_name: Series[str] = pa.Field()
# """The name of the type of this event."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class GameSchema(pa.SchemaModel):
# """Definition of a dataframe containing a list of games."""
#
# game_id: Series[Object] = pa.Field()
# """The unique identifier for the game."""
# season_id: Series[Object] = pa.Field()
# """The unique identifier for the season."""
# competition_id: Series[Object] = pa.Field()
# """The unique identifier for the competition."""
# game_day: Series[pd.Int64Dtype] = pa.Field(nullable=True)
# """Number corresponding to the weeks or rounds into the competition this game is."""
# game_date: Series[DateTime] = pa.Field()
# """The date when the game was played."""
# home_team_id: Series[Object] = pa.Field()
# """The unique identifier for the home team in this game."""
# away_team_id: Series[Object] = pa.Field()
# """The unique identifier for the away team in this game."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class PlayerSchema(pa.SchemaModel):
# """Definition of a dataframe containing the list of players on the teamsheet of a game."""
#
# game_id: Series[Object] = pa.Field()
# """The unique identifier for the game."""
# team_id: Series[Object] = pa.Field()
# """The unique identifier for the player's team."""
# player_id: Series[Object] = pa.Field()
# """The unique identifier for the player."""
# player_name: Series[str] = pa.Field()
# """The name of the player."""
# is_starter: Series[bool] = pa.Field()
# """Whether the player is in the starting lineup."""
# minutes_played: Series[int] = pa.Field()
# """The number of minutes the player played in the game."""
# jersey_number: Series[int] = pa.Field()
# """The player's jersey number."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
#
# class TeamSchema(pa.SchemaModel):
# """Definition of a dataframe containing the list of teams of a game."""
#
# team_id: Series[Object] = pa.Field()
# """The unique identifier for the team."""
# team_name: Series[str] = pa.Field()
# """The name of the team."""
#
# class Config: # noqa: D106
# strict = True
# coerce = True
. Output only the next line. | class WyscoutTeamSchema(TeamSchema): |
Next line prediction: <|code_start|> # | op_1 |
# | op_2 |
return rule_2
if op_2_from_in_op_1 and op_1_to_in_op_2:
# overlap 1
# | op_1 |
# | op_2 |
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_2_from, op_1_to],
}
if op_2_to_in_op_1 and op_1_from_in_op_2:
# overlap 2
# | op_1 |
# | op_2 |
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_1_from, op_2_to],
}
# disjointed
# | op_1 |
# | op_2 |
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
<|code_end|>
. Use current file imports:
(from functools import reduce as ft_reduce
from .errors import CraftAiError
from .formatters import format_decision_rules
from .operators import OPERATORS)
and context including class names, function names, or small code snippets from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
#
# Path: craft_ai/formatters.py
# def format_decision_rules(rules):
# return " and ".join([_format_decision_rule(rule) for rule in rules])
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
. Output only the next line. | format_decision_rules([rule_1]), format_decision_rules([rule_2]) |
Here is a snippet: <|code_start|>
def _is_is_reducer(rule_1, rule_2):
if rule_1["operand"] and (rule_1["operand"] != rule_2["operand"]):
raise CraftAiError(
"Operator '{}' can't have different value. Set to '{}' and receive '{}'".format(
<|code_end|>
. Write the next line using the current file imports:
from functools import reduce as ft_reduce
from .errors import CraftAiError
from .formatters import format_decision_rules
from .operators import OPERATORS
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
#
# Path: craft_ai/formatters.py
# def format_decision_rules(rules):
# return " and ".join([_format_decision_rule(rule) for rule in rules])
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
, which may include functions, classes, or code. Output only the next line. | OPERATORS["IS"], rule_1["operand"], rule_2["operand"] |
Given the code snippet: <|code_start|>
expectations_files = os.listdir(EXPECTATIONS_DIR)
for expectations_file in expectations_files:
if os.path.splitext(expectations_file)[1] == ".json":
# Loading the expectations for this tree
with open(os.path.join(EXPECTATIONS_DIR, expectations_file)) as f:
expectations = json.load(f)
for expectation in expectations:
self.assertTrue(
"title" in expectation,
"Invalid expectation from '{}': missing \"title\".".format(
expectations_file
),
)
self.assertTrue(
"rules" in expectation and "expectation" in expectation,
'Invalid expectation from \'{}\': missing "rules" or "expectation".'.format(
expectations_file
),
)
for expectation in expectations:
with self.subTest():
self.check_expectation(
expectation["rules"], expectation["expectation"]
)
def check_expectation(self, rules, expectation):
if "error" in expectation:
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import json
import os
from dateutil.parser import isoparse
from craft_ai import format_property, format_decision_rules, errors
and context (functions, classes, or occasionally code) from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/formatters.py
# def format_property(property_type, value=None):
# formatter = (
# PROPERTY_FORMATTER[property_type]
# if property_type in PROPERTY_FORMATTER
# else PROPERTY_FORMATTER[TYPE_ANY]
# )
#
# def extended_formatter(value):
# if value is None:
# return "null"
# if value == {}:
# return "N/A"
# return formatter(value)
#
# if value is not None:
# return extended_formatter(value)
#
# return extended_formatter
#
# def format_decision_rules(rules):
# return " and ".join([_format_decision_rule(rule) for rule in rules])
. Output only the next line. | self.assertRaises(errors.CraftAiError, format_decision_rules, rules) |
Given the code snippet: <|code_start|>
expectations_files = os.listdir(EXPECTATIONS_DIR)
for expectations_file in expectations_files:
if os.path.splitext(expectations_file)[1] == ".json":
# Loading the expectations for this tree
with open(os.path.join(EXPECTATIONS_DIR, expectations_file)) as f:
expectations = json.load(f)
for expectation in expectations:
self.assertTrue(
"title" in expectation,
"Invalid expectation from '{}': missing \"title\".".format(
expectations_file
),
)
self.assertTrue(
"rules" in expectation and "expectation" in expectation,
'Invalid expectation from \'{}\': missing "rules" or "expectation".'.format(
expectations_file
),
)
for expectation in expectations:
with self.subTest():
self.check_expectation(
expectation["rules"], expectation["expectation"]
)
def check_expectation(self, rules, expectation):
if "error" in expectation:
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import json
import os
from dateutil.parser import isoparse
from craft_ai import format_property, format_decision_rules, errors
and context (functions, classes, or occasionally code) from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/formatters.py
# def format_property(property_type, value=None):
# formatter = (
# PROPERTY_FORMATTER[property_type]
# if property_type in PROPERTY_FORMATTER
# else PROPERTY_FORMATTER[TYPE_ANY]
# )
#
# def extended_formatter(value):
# if value is None:
# return "null"
# if value == {}:
# return "N/A"
# return formatter(value)
#
# if value is not None:
# return extended_formatter(value)
#
# return extended_formatter
#
# def format_decision_rules(rules):
# return " and ".join([_format_decision_rule(rule) for rule in rules])
. Output only the next line. | self.assertRaises(errors.CraftAiError, format_decision_rules, rules) |
Continue the code snippet: <|code_start|>
HERE = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(HERE, "data", "interpreter")
TREES_DIR = os.path.join(DATA_DIR, "decide", "trees")
PATH_DIR = os.path.join(DATA_DIR, "tree_computations_expectations", "get_paths")
NEIGHBOURS_DIR = os.path.join(
DATA_DIR, "tree_computations_expectations", "get_neighbours"
)
class TestTreeUtils(unittest.TestCase):
def test_extract_output_tree_invalid_tree_1(self):
path = os.path.join(DATA_DIR, "decide", "trees", "v1", "emptyArray.json")
tree = None
with open(path) as f:
tree = json.load(f)
<|code_end|>
. Use current file imports:
import unittest
import json
import os
from craft_ai import (
errors,
extract_decision_paths_from_tree,
extract_decision_path_neighbors,
extract_output_tree,
)
and context (classes, functions, or code) from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/tree_utils.py
# def extract_decision_paths_from_tree(tree):
# """
# Retrieve all the decision paths from a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# Returns: e.g. ['0', '0-0', '0-1']
# """
# return _get_paths(extract_output_tree(tree))
#
# def extract_decision_path_neighbors(
# tree, decision_path, max_depth=None, include_self=False
# ):
# """
# Retrieve neighbor of a decision path in a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# decision_path: string tree path eg. "0-2-1".
# max_depth (int, optional): positive int filter neighbours on their depth,
# default is None.
# include_self (bool, optional): include the given decision_path to the neighbours,
# default is False.
# """
# paths = _get_paths(extract_output_tree(tree))
# if decision_path not in paths:
# raise CraftAiError(
# """Invalid decision path given. """
# """{} not found in tree""".format(decision_path)
# )
#
# dp_depth = len(decision_path.split("-"))
# neighbours = _get_neighbours(paths, decision_path)
# if max_depth is None:
# max_depth = dp_depth
# if max_depth < 0:
# raise CraftAiError(
# """Invalid max depth given: {} should be None or a positive integer """.format(
# max_depth
# )
# )
# filtered = [n for n in neighbours if len(n.split("-")) <= max_depth]
# if include_self:
# filtered.append(decision_path)
# return filtered
#
# def extract_output_tree(tree, output_property=None):
# """
# Extract the output decision tree specific for a given output property from a full decision tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# output_property (optional): If provided, the output property for which the tree predicts
# values, otherwise the first defined tree is retrieved.
# """
# if not isinstance(tree, dict):
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, expected a 'dict' got a {}.""".format(
# type(tree)
# )
# )
# if "trees" not in tree:
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, no 'trees' property defined."""
# )
# trees = tree["trees"]
# if not output_property:
# output_property = list(trees)[0]
# if output_property not in trees:
# raise CraftAiError(
# """'{}' output tree can't be found in the given decision tree.""".format(
# output_property
# )
# )
# tree = tree["trees"][output_property]
# return trees[output_property]
. Output only the next line. | self.assertRaises(errors.CraftAiError, extract_output_tree, tree) |
Given snippet: <|code_start|> tree = json.load(f)
self.assertIsInstance(extract_output_tree(tree), dict)
def test_extract_output_tree_specific_output(self):
path = os.path.join(DATA_DIR, "decide", "trees", "v2", "oneColor.json")
tree = None
with open(path) as f:
tree = json.load(f)
self.assertIsInstance(extract_output_tree(tree, "value"), dict)
def test_extract_output_tree_bad_output(self):
path = os.path.join(DATA_DIR, "decide", "trees", "v2", "oneColor.json")
tree = None
with open(path) as f:
tree = json.load(f)
self.assertRaises(errors.CraftAiError, extract_output_tree, tree, "foo")
def test_extract_decision_paths_from_tree(self):
"""
Testing expectations
in data/interpreter/tree_computations_expectations/get_paths`/
Trees located in data/interpreter/decide/trees/
"""
for version in os.listdir(PATH_DIR):
for filename in os.listdir(os.path.join(PATH_DIR, version)):
# Loading the json tree
tree = None
with open(os.path.join(TREES_DIR, version, filename)) as f:
tree = json.load(f)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import json
import os
from craft_ai import (
errors,
extract_decision_paths_from_tree,
extract_decision_path_neighbors,
extract_output_tree,
)
and context:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/tree_utils.py
# def extract_decision_paths_from_tree(tree):
# """
# Retrieve all the decision paths from a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# Returns: e.g. ['0', '0-0', '0-1']
# """
# return _get_paths(extract_output_tree(tree))
#
# def extract_decision_path_neighbors(
# tree, decision_path, max_depth=None, include_self=False
# ):
# """
# Retrieve neighbor of a decision path in a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# decision_path: string tree path eg. "0-2-1".
# max_depth (int, optional): positive int filter neighbours on their depth,
# default is None.
# include_self (bool, optional): include the given decision_path to the neighbours,
# default is False.
# """
# paths = _get_paths(extract_output_tree(tree))
# if decision_path not in paths:
# raise CraftAiError(
# """Invalid decision path given. """
# """{} not found in tree""".format(decision_path)
# )
#
# dp_depth = len(decision_path.split("-"))
# neighbours = _get_neighbours(paths, decision_path)
# if max_depth is None:
# max_depth = dp_depth
# if max_depth < 0:
# raise CraftAiError(
# """Invalid max depth given: {} should be None or a positive integer """.format(
# max_depth
# )
# )
# filtered = [n for n in neighbours if len(n.split("-")) <= max_depth]
# if include_self:
# filtered.append(decision_path)
# return filtered
#
# def extract_output_tree(tree, output_property=None):
# """
# Extract the output decision tree specific for a given output property from a full decision tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# output_property (optional): If provided, the output property for which the tree predicts
# values, otherwise the first defined tree is retrieved.
# """
# if not isinstance(tree, dict):
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, expected a 'dict' got a {}.""".format(
# type(tree)
# )
# )
# if "trees" not in tree:
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, no 'trees' property defined."""
# )
# trees = tree["trees"]
# if not output_property:
# output_property = list(trees)[0]
# if output_property not in trees:
# raise CraftAiError(
# """'{}' output tree can't be found in the given decision tree.""".format(
# output_property
# )
# )
# tree = tree["trees"][output_property]
# return trees[output_property]
which might include code, classes, or functions. Output only the next line. | results = extract_decision_paths_from_tree(tree) |
Predict the next line for this snippet: <|code_start|> # Loading the json tree
tree = None
with open(os.path.join(TREES_DIR, version, filename)) as f:
tree = json.load(f)
results = extract_decision_paths_from_tree(tree)
# Loading the expectation for this tree
with open(os.path.join(PATH_DIR, version, filename)) as f:
expectation = json.load(f)
self.assertTrue(sorted(list(results)) == expectation)
def test_extract_decision_path_neighbors(self):
"""
Testing expectations
in data/interpreter/tree_computations_expectations/get_neighbours/
Trees located in data/interpreter/decide/trees/
"""
for version in os.listdir(NEIGHBOURS_DIR):
for filename in os.listdir(os.path.join(NEIGHBOURS_DIR, version)):
# Loading the json tree
with open(os.path.join(TREES_DIR, version, filename)) as f:
tree = json.load(f)
# Loading expectations for this tree
with open(os.path.join(NEIGHBOURS_DIR, version, filename)) as f:
expectations = json.load(f)
for expect in expectations:
<|code_end|>
with the help of current file imports:
import unittest
import json
import os
from craft_ai import (
errors,
extract_decision_paths_from_tree,
extract_decision_path_neighbors,
extract_output_tree,
)
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/tree_utils.py
# def extract_decision_paths_from_tree(tree):
# """
# Retrieve all the decision paths from a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# Returns: e.g. ['0', '0-0', '0-1']
# """
# return _get_paths(extract_output_tree(tree))
#
# def extract_decision_path_neighbors(
# tree, decision_path, max_depth=None, include_self=False
# ):
# """
# Retrieve neighbor of a decision path in a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# decision_path: string tree path eg. "0-2-1".
# max_depth (int, optional): positive int filter neighbours on their depth,
# default is None.
# include_self (bool, optional): include the given decision_path to the neighbours,
# default is False.
# """
# paths = _get_paths(extract_output_tree(tree))
# if decision_path not in paths:
# raise CraftAiError(
# """Invalid decision path given. """
# """{} not found in tree""".format(decision_path)
# )
#
# dp_depth = len(decision_path.split("-"))
# neighbours = _get_neighbours(paths, decision_path)
# if max_depth is None:
# max_depth = dp_depth
# if max_depth < 0:
# raise CraftAiError(
# """Invalid max depth given: {} should be None or a positive integer """.format(
# max_depth
# )
# )
# filtered = [n for n in neighbours if len(n.split("-")) <= max_depth]
# if include_self:
# filtered.append(decision_path)
# return filtered
#
# def extract_output_tree(tree, output_property=None):
# """
# Extract the output decision tree specific for a given output property from a full decision tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# output_property (optional): If provided, the output property for which the tree predicts
# values, otherwise the first defined tree is retrieved.
# """
# if not isinstance(tree, dict):
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, expected a 'dict' got a {}.""".format(
# type(tree)
# )
# )
# if "trees" not in tree:
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, no 'trees' property defined."""
# )
# trees = tree["trees"]
# if not output_property:
# output_property = list(trees)[0]
# if output_property not in trees:
# raise CraftAiError(
# """'{}' output tree can't be found in the given decision tree.""".format(
# output_property
# )
# )
# tree = tree["trees"][output_property]
# return trees[output_property]
, which may contain function names, class names, or code. Output only the next line. | result = extract_decision_path_neighbors( |
Predict the next line after this snippet: <|code_start|>
HERE = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(HERE, "data", "interpreter")
TREES_DIR = os.path.join(DATA_DIR, "decide", "trees")
PATH_DIR = os.path.join(DATA_DIR, "tree_computations_expectations", "get_paths")
NEIGHBOURS_DIR = os.path.join(
DATA_DIR, "tree_computations_expectations", "get_neighbours"
)
class TestTreeUtils(unittest.TestCase):
def test_extract_output_tree_invalid_tree_1(self):
path = os.path.join(DATA_DIR, "decide", "trees", "v1", "emptyArray.json")
tree = None
with open(path) as f:
tree = json.load(f)
<|code_end|>
using the current file's imports:
import unittest
import json
import os
from craft_ai import (
errors,
extract_decision_paths_from_tree,
extract_decision_path_neighbors,
extract_output_tree,
)
and any relevant context from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/tree_utils.py
# def extract_decision_paths_from_tree(tree):
# """
# Retrieve all the decision paths from a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# Returns: e.g. ['0', '0-0', '0-1']
# """
# return _get_paths(extract_output_tree(tree))
#
# def extract_decision_path_neighbors(
# tree, decision_path, max_depth=None, include_self=False
# ):
# """
# Retrieve neighbor of a decision path in a tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# decision_path: string tree path eg. "0-2-1".
# max_depth (int, optional): positive int filter neighbours on their depth,
# default is None.
# include_self (bool, optional): include the given decision_path to the neighbours,
# default is False.
# """
# paths = _get_paths(extract_output_tree(tree))
# if decision_path not in paths:
# raise CraftAiError(
# """Invalid decision path given. """
# """{} not found in tree""".format(decision_path)
# )
#
# dp_depth = len(decision_path.split("-"))
# neighbours = _get_neighbours(paths, decision_path)
# if max_depth is None:
# max_depth = dp_depth
# if max_depth < 0:
# raise CraftAiError(
# """Invalid max depth given: {} should be None or a positive integer """.format(
# max_depth
# )
# )
# filtered = [n for n in neighbours if len(n.split("-")) <= max_depth]
# if include_self:
# filtered.append(decision_path)
# return filtered
#
# def extract_output_tree(tree, output_property=None):
# """
# Extract the output decision tree specific for a given output property from a full decision tree.
#
# This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
#
# Parameters:
# tree: A tree.
# output_property (optional): If provided, the output property for which the tree predicts
# values, otherwise the first defined tree is retrieved.
# """
# if not isinstance(tree, dict):
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, expected a 'dict' got a {}.""".format(
# type(tree)
# )
# )
# if "trees" not in tree:
# raise CraftAiError(
# """Unable to retrieve the output tree, """
# """the given decision tree format is not a valid, no 'trees' property defined."""
# )
# trees = tree["trees"]
# if not output_property:
# output_property = list(trees)[0]
# if output_property not in trees:
# raise CraftAiError(
# """'{}' output tree can't be found in the given decision tree.""".format(
# output_property
# )
# )
# tree = tree["trees"][output_property]
# return trees[output_property]
. Output only the next line. | self.assertRaises(errors.CraftAiError, extract_output_tree, tree) |
Predict the next line for this snippet: <|code_start|># To avoid conflicts between python's own 'time' and this 'time.py'
# cf. https://stackoverflow.com/a/28854227
from __future__ import absolute_import
_EPOCH = datetime(1970, 1, 1, tzinfo=pyutc)
class Time(object):
"""Handles time in a useful way for craft ai's client"""
def __init__(self, t=None, timezone=None):
def time_from_datetime_timestamp_and_timezone(timestamp, timezone):
# Handle when datetime already provides timezone :
# datetime(2012, 9, 12, 6, 0, 0, tzinfo=pytz.utc)
result = timestamp
if (result.tzinfo is None) and (not timezone):
# Handle this format :
# Time(datetime(2011, 1, 1, 0, 0), timezone=None)
<|code_end|>
with the help of current file imports:
import time
from datetime import datetime, tzinfo, timedelta
from pytz import utc as pyutc
from tzlocal import get_localzone
from dateutil.parser import isoparse
from craft_ai.errors import CraftAiTimeError
from craft_ai.timezones import is_timezone, timezone_offset_in_sec
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiTimeError(CraftAiError):
# """An error occured during the creation of a craft_ai.Time instance."""
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
#
# def timezone_offset_in_sec(timezone):
# if isinstance(timezone, int):
# # If the offset belongs to [-15, 15] it is considered to represent hours.
# # This reproduces Moment's utcOffset behaviour.
# if timezone > -16 and timezone < 16:
# return timezone * 60 * 60
# return timezone * 60
# if timezone in TIMEZONES:
# timezone = TIMEZONES[timezone]
# if len(timezone) > 3:
# timezone = timezone.replace(":", "")
# offset = (int(timezone[-4:-2]) * 60 + int(timezone[-2:])) * 60
# else:
# offset = (int(timezone[-2:]) * 60) * 60
#
# if timezone[0] == "-":
# offset = -offset
#
# return offset
, which may contain function names, class names, or code. Output only the next line. | raise CraftAiTimeError("You must provide at least one timezone") |
Using the snippet: <|code_start|> # Handle format like : Time().timezone
_time = datetime.fromtimestamp(timestamp, get_localzone())
except (OverflowError, OSError) as e:
raise CraftAiTimeError(
"""Unable to instantiate Time from given timestamp. {}""".format(
e.__str__()
)
)
# If a timezone is specified we can try to use it
if timezone:
# Handle this type of datetime format : Time(1356998400, timezone="+0100")
_time = set_timezone(_time, timezone)
elif isinstance(timestamp, datetime):
_time = time_from_datetime_timestamp_and_timezone(timestamp, timezone)
elif isinstance(timestamp, str):
_time = time_from_string_timestamp_and_timezone(timestamp, timezone)
else:
raise CraftAiTimeError(
"""Unable to instantiate Time from given timestamp."""
""" It must be integer or string."""
)
return _time
def set_timezone(timestamp, timezone):
if isinstance(timezone, tzinfo):
# If it's already a timezone object, no more work is needed
_time = timestamp.astimezone(timezone)
<|code_end|>
, determine the next line of code. You have imports:
import time
from datetime import datetime, tzinfo, timedelta
from pytz import utc as pyutc
from tzlocal import get_localzone
from dateutil.parser import isoparse
from craft_ai.errors import CraftAiTimeError
from craft_ai.timezones import is_timezone, timezone_offset_in_sec
and context (class names, function names, or code) available:
# Path: craft_ai/errors.py
# class CraftAiTimeError(CraftAiError):
# """An error occured during the creation of a craft_ai.Time instance."""
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
#
# def timezone_offset_in_sec(timezone):
# if isinstance(timezone, int):
# # If the offset belongs to [-15, 15] it is considered to represent hours.
# # This reproduces Moment's utcOffset behaviour.
# if timezone > -16 and timezone < 16:
# return timezone * 60 * 60
# return timezone * 60
# if timezone in TIMEZONES:
# timezone = TIMEZONES[timezone]
# if len(timezone) > 3:
# timezone = timezone.replace(":", "")
# offset = (int(timezone[-4:-2]) * 60 + int(timezone[-2:])) * 60
# else:
# offset = (int(timezone[-2:]) * 60) * 60
#
# if timezone[0] == "-":
# offset = -offset
#
# return offset
. Output only the next line. | elif is_timezone(timezone): |
Using the snippet: <|code_start|> except (OverflowError, OSError) as e:
raise CraftAiTimeError(
"""Unable to instantiate Time from given timestamp. {}""".format(
e.__str__()
)
)
# If a timezone is specified we can try to use it
if timezone:
# Handle this type of datetime format : Time(1356998400, timezone="+0100")
_time = set_timezone(_time, timezone)
elif isinstance(timestamp, datetime):
_time = time_from_datetime_timestamp_and_timezone(timestamp, timezone)
elif isinstance(timestamp, str):
_time = time_from_string_timestamp_and_timezone(timestamp, timezone)
else:
raise CraftAiTimeError(
"""Unable to instantiate Time from given timestamp."""
""" It must be integer or string."""
)
return _time
def set_timezone(timestamp, timezone):
if isinstance(timezone, tzinfo):
# If it's already a timezone object, no more work is needed
_time = timestamp.astimezone(timezone)
elif is_timezone(timezone):
# If it's a string, we convert it to a usable timezone object
<|code_end|>
, determine the next line of code. You have imports:
import time
from datetime import datetime, tzinfo, timedelta
from pytz import utc as pyutc
from tzlocal import get_localzone
from dateutil.parser import isoparse
from craft_ai.errors import CraftAiTimeError
from craft_ai.timezones import is_timezone, timezone_offset_in_sec
and context (class names, function names, or code) available:
# Path: craft_ai/errors.py
# class CraftAiTimeError(CraftAiError):
# """An error occured during the creation of a craft_ai.Time instance."""
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
#
# def timezone_offset_in_sec(timezone):
# if isinstance(timezone, int):
# # If the offset belongs to [-15, 15] it is considered to represent hours.
# # This reproduces Moment's utcOffset behaviour.
# if timezone > -16 and timezone < 16:
# return timezone * 60 * 60
# return timezone * 60
# if timezone in TIMEZONES:
# timezone = TIMEZONES[timezone]
# if len(timezone) > 3:
# timezone = timezone.replace(":", "")
# offset = (int(timezone[-4:-2]) * 60 + int(timezone[-2:])) * 60
# else:
# offset = (int(timezone[-2:]) * 60) * 60
#
# if timezone[0] == "-":
# offset = -offset
#
# return offset
. Output only the next line. | offset = timezone_offset_in_sec(timezone) |
Predict the next line for this snippet: <|code_start|>
JWT_IO_EXAMPLE = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ" # noqa: E501
class TestJwtDecode(unittest.TestCase):
def test_decode_works(self):
<|code_end|>
with the help of current file imports:
import unittest
from craft_ai.jwt_decode import jwt_decode
from craft_ai.errors import CraftAiTokenError
and context from other files:
# Path: craft_ai/jwt_decode.py
# def jwt_decode(jwt):
# if isinstance(jwt, str):
# jwt = jwt.encode("utf-8")
#
# if not issubclass(type(jwt), bytes):
# raise CraftAiTokenError(
# "Invalid token type, the token must be a {0}".format(bytes)
# )
#
# try:
# signing_input, crypto_segment = jwt.strip().rsplit(b".", 1)
# header_segment, payload_segment = signing_input.split(b".", 1)
# except ValueError:
# raise CraftAiTokenError("Not enough segments")
#
# try:
# header_data = base64url_decode(header_segment)
# except (TypeError, binascii.Error):
# raise CraftAiTokenError("Invalid header padding")
#
# try:
# header = json.loads(header_data.decode("utf-8"))
# except ValueError as e:
# raise CraftAiTokenError("Invalid header string '%s'" % e)
#
# try:
# payload_data = base64url_decode(payload_segment)
# except (TypeError, binascii.Error):
# raise CraftAiTokenError("Unable to decode the payload segment of the token")
#
# try:
# payload = json.loads(payload_data.decode("utf-8"))
# except ValueError:
# # Unable to load the payload as a json
# payload = payload_data
#
# try:
# signature = base64url_decode(crypto_segment)
# except (TypeError, binascii.Error):
# raise CraftAiTokenError("Unable to decode the crypto segment of the token")
#
# return (payload, signing_input, header, signature)
#
# Path: craft_ai/errors.py
# class CraftAiTokenError(CraftAiError):
# """An invalid token error occured."""
, which may contain function names, class names, or code. Output only the next line. | (payload, _, header, _) = jwt_decode(JWT_IO_EXAMPLE) |
Predict the next line after this snippet: <|code_start|>
JWT_IO_EXAMPLE = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ" # noqa: E501
class TestJwtDecode(unittest.TestCase):
def test_decode_works(self):
(payload, _, header, _) = jwt_decode(JWT_IO_EXAMPLE)
self.assertEqual(header, {"alg": "HS256", "typ": "JWT"})
self.assertEqual(
payload, {"sub": "1234567890", "name": "John Doe", "admin": True}
)
def test_decode_works_with_spaces(self):
(payload, _, header, _) = jwt_decode(" " + JWT_IO_EXAMPLE + " ")
self.assertEqual(header, {"alg": "HS256", "typ": "JWT"})
self.assertEqual(
payload, {"sub": "1234567890", "name": "John Doe", "admin": True}
)
def test_decode_fails_properly(self):
<|code_end|>
using the current file's imports:
import unittest
from craft_ai.jwt_decode import jwt_decode
from craft_ai.errors import CraftAiTokenError
and any relevant context from other files:
# Path: craft_ai/jwt_decode.py
# def jwt_decode(jwt):
# if isinstance(jwt, str):
# jwt = jwt.encode("utf-8")
#
# if not issubclass(type(jwt), bytes):
# raise CraftAiTokenError(
# "Invalid token type, the token must be a {0}".format(bytes)
# )
#
# try:
# signing_input, crypto_segment = jwt.strip().rsplit(b".", 1)
# header_segment, payload_segment = signing_input.split(b".", 1)
# except ValueError:
# raise CraftAiTokenError("Not enough segments")
#
# try:
# header_data = base64url_decode(header_segment)
# except (TypeError, binascii.Error):
# raise CraftAiTokenError("Invalid header padding")
#
# try:
# header = json.loads(header_data.decode("utf-8"))
# except ValueError as e:
# raise CraftAiTokenError("Invalid header string '%s'" % e)
#
# try:
# payload_data = base64url_decode(payload_segment)
# except (TypeError, binascii.Error):
# raise CraftAiTokenError("Unable to decode the payload segment of the token")
#
# try:
# payload = json.loads(payload_data.decode("utf-8"))
# except ValueError:
# # Unable to load the payload as a json
# payload = payload_data
#
# try:
# signature = base64url_decode(crypto_segment)
# except (TypeError, binascii.Error):
# raise CraftAiTokenError("Unable to decode the crypto segment of the token")
#
# return (payload, signing_input, header, signature)
#
# Path: craft_ai/errors.py
# class CraftAiTokenError(CraftAiError):
# """An invalid token error occured."""
. Output only the next line. | self.assertRaises(CraftAiTokenError, jwt_decode, "not a jwt") |
Here is a snippet: <|code_start|> def test_reduce_decision_rules_tests_generator(self):
expectations_files = os.listdir(EXPECTATIONS_DIR)
for expectations_file in expectations_files:
if os.path.splitext(expectations_file)[1] == ".json":
with open(os.path.join(EXPECTATIONS_DIR, expectations_file)) as f:
expectations = json.load(f)
for expectation in expectations:
self.assertTrue(
"title" in expectation,
"Invalid expectation from '{}': missing \"title\".".format(
expectations_file
),
)
self.assertTrue(
"rules" in expectation and "expectation" in expectation,
'Invalid expectation from \'{}\': missing "rules" or "expectation".'.format(
expectations_file
),
)
for expectation in expectations:
with self.subTest():
self.check_expectation(
expectation["rules"], expectation["expectation"]
)
def check_expectation(self, rules, expectation):
if "error" in expectation:
<|code_end|>
. Write the next line using the current file imports:
import unittest
import json
import os
from craft_ai import reduce_decision_rules, errors
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/reducer.py
# def reduce_decision_rules(rules):
# properties = _unique_seq([rule["property"] for rule in rules])
# return [
# ft_reduce(
# _decision_rules_reducer, [rule for rule in rules if rule["property"] == p]
# )
# for p in properties
# ]
, which may include functions, classes, or code. Output only the next line. | self.assertRaises(errors.CraftAiError, reduce_decision_rules, rules) |
Given snippet: <|code_start|> def test_reduce_decision_rules_tests_generator(self):
expectations_files = os.listdir(EXPECTATIONS_DIR)
for expectations_file in expectations_files:
if os.path.splitext(expectations_file)[1] == ".json":
with open(os.path.join(EXPECTATIONS_DIR, expectations_file)) as f:
expectations = json.load(f)
for expectation in expectations:
self.assertTrue(
"title" in expectation,
"Invalid expectation from '{}': missing \"title\".".format(
expectations_file
),
)
self.assertTrue(
"rules" in expectation and "expectation" in expectation,
'Invalid expectation from \'{}\': missing "rules" or "expectation".'.format(
expectations_file
),
)
for expectation in expectations:
with self.subTest():
self.check_expectation(
expectation["rules"], expectation["expectation"]
)
def check_expectation(self, rules, expectation):
if "error" in expectation:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import json
import os
from craft_ai import reduce_decision_rules, errors
and context:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# class CraftAiUnknownError(CraftAiError):
# class CraftAiNetworkError(CraftAiError):
# class CraftAiCredentialsError(CraftAiError):
# class CraftAiInternalError(CraftAiError):
# class CraftAiBadRequestError(CraftAiError):
# class CraftAiNotFoundError(CraftAiError):
# class CraftAiDecisionError(CraftAiError):
# class CraftAiNullDecisionError(CraftAiDecisionError):
# class CraftAiTimeError(CraftAiError):
# class CraftAiTokenError(CraftAiError):
# class CraftAiLongRequestTimeOutError(CraftAiError):
# def __init__(self, message=None, metadata=None):
# def __str__(self):
#
# Path: craft_ai/reducer.py
# def reduce_decision_rules(rules):
# properties = _unique_seq([rule["property"] for rule in rules])
# return [
# ft_reduce(
# _decision_rules_reducer, [rule for rule in rules if rule["property"] == p]
# )
# for p in properties
# ]
which might include code, classes, or functions. Output only the next line. | self.assertRaises(errors.CraftAiError, reduce_decision_rules, rules) |
Predict the next line after this snippet: <|code_start|> return "{} to {}".format(formatted_month_from, formatted_month_to)
def _gte_formatter(property_name, operand, operand_formatter):
if property_name:
return "'{}' >= {}".format(property_name, operand_formatter(operand))
return ">= {}".format(operand_formatter(operand))
def _lt_formatter(property_name, operand, operand_formatter):
if property_name:
return "'{}' < {}".format(property_name, operand_formatter(operand))
return "< {}".format(operand_formatter(operand))
FORMATTER_FROM_DECISION_RULE = {
OPERATORS["IS"]: {TYPE_ANY: _is_formatter},
OPERATORS["IN_INTERVAL"]: {
TYPE_ANY: _in_formatter,
TYPES["day_of_week"]: _in_day_of_week_formatter,
TYPES["day_of_month"]: _in_formatter,
TYPES["month_of_year"]: _in_month_of_year_formatter,
},
OPERATORS["GTE"]: {TYPE_ANY: _gte_formatter},
OPERATORS["LT"]: {TYPE_ANY: _lt_formatter},
}
def _format_decision_rule(rule):
if rule["operator"] not in FORMATTER_FROM_DECISION_RULE:
<|code_end|>
using the current file's imports:
import datetime
import math
from craft_ai.errors import CraftAiError
from craft_ai.operators import OPERATORS
from craft_ai.types import TYPES, TYPE_ANY
and any relevant context from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# TYPE_ANY = "any"
. Output only the next line. | raise CraftAiError( |
Continue the code snippet: <|code_start|>
if (month_to - month_from == 1) or (month_from == 12 and month_to == 1):
# One month in the interval
if property_name:
return "'{}' is {}".format(property_name, formatted_month_from)
return formatted_month_from
formatted_month_to = operand_formatter((12 + month_to - 1) % 12)
if property_name:
return "'{}' from {} to {}".format(
property_name, formatted_month_from, formatted_month_to
)
return "{} to {}".format(formatted_month_from, formatted_month_to)
def _gte_formatter(property_name, operand, operand_formatter):
if property_name:
return "'{}' >= {}".format(property_name, operand_formatter(operand))
return ">= {}".format(operand_formatter(operand))
def _lt_formatter(property_name, operand, operand_formatter):
if property_name:
return "'{}' < {}".format(property_name, operand_formatter(operand))
return "< {}".format(operand_formatter(operand))
FORMATTER_FROM_DECISION_RULE = {
<|code_end|>
. Use current file imports:
import datetime
import math
from craft_ai.errors import CraftAiError
from craft_ai.operators import OPERATORS
from craft_ai.types import TYPES, TYPE_ANY
and context (classes, functions, or code) from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# TYPE_ANY = "any"
. Output only the next line. | OPERATORS["IS"]: {TYPE_ANY: _is_formatter}, |
Next line prediction: <|code_start|> "Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def _time_formatter(time):
if isinstance(time, datetime.datetime):
if time.second == 0:
return time.strftime("%H:%M")
return time.strftime("%H:%M:%S")
else:
hours = int(math.floor(time))
dec_minutes = (time - hours) * 60
minutes = int(math.floor(dec_minutes))
seconds = int(math.floor((dec_minutes - minutes) * 60))
if seconds > 0:
return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)
return "{:02d}:{:02d}".format(hours, minutes)
PROPERTY_FORMATTER = {
TYPE_ANY: lambda value: value,
<|code_end|>
. Use current file imports:
(import datetime
import math
from craft_ai.errors import CraftAiError
from craft_ai.operators import OPERATORS
from craft_ai.types import TYPES, TYPE_ANY)
and context including class names, function names, or small code snippets from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# TYPE_ANY = "any"
. Output only the next line. | TYPES["continuous"]: lambda number: "{:.2f}".format(number).rstrip("0").rstrip("."), |
Given the code snippet: <|code_start|> "Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def _time_formatter(time):
if isinstance(time, datetime.datetime):
if time.second == 0:
return time.strftime("%H:%M")
return time.strftime("%H:%M:%S")
else:
hours = int(math.floor(time))
dec_minutes = (time - hours) * 60
minutes = int(math.floor(dec_minutes))
seconds = int(math.floor((dec_minutes - minutes) * 60))
if seconds > 0:
return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)
return "{:02d}:{:02d}".format(hours, minutes)
PROPERTY_FORMATTER = {
<|code_end|>
, generate the next line using the imports in this file:
import datetime
import math
from craft_ai.errors import CraftAiError
from craft_ai.operators import OPERATORS
from craft_ai.types import TYPES, TYPE_ANY
and context (functions, classes, or occasionally code) from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# TYPE_ANY = "any"
. Output only the next line. | TYPE_ANY: lambda value: value, |
Here is a snippet: <|code_start|>
# Initial code retrieved from PyJWT
# cf. https://github.com/jpadilla/pyjwt/blob/ceff941/jwt/utils.py#L33-L42
def base64url_decode(base64_input):
if isinstance(base64_input, str):
base64_input = base64_input.encode("ascii")
rem = len(base64_input) % 4
if rem > 0:
base64_input += b"=" * (4 - rem)
return base64.urlsafe_b64decode(base64_input)
# Code inspired by PyJWT
# cf. https://github.com/jpadilla/pyjwt/blob/ceff941/jwt/api_jws.py#L144-L181
def jwt_decode(jwt):
if isinstance(jwt, str):
jwt = jwt.encode("utf-8")
if not issubclass(type(jwt), bytes):
<|code_end|>
. Write the next line using the current file imports:
import base64
import binascii
import json
from craft_ai.errors import CraftAiTokenError
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiTokenError(CraftAiError):
# """An invalid token error occured."""
, which may include functions, classes, or code. Output only the next line. | raise CraftAiTokenError( |
Next line prediction: <|code_start|> return leaf
# Finding the first element in this node's childrens matching the
# operator condition with given context
matching_child = InterpreterV1._find_matching_child(node, context)
if not matching_child:
prop = node.get("children")[0].get("decision_rule").get("property")
operand_list = [
child["decision_rule"]["operand"] for child in node["children"]
]
decision_rule = (
[node["decision_rule"]] if not node.get("decision_rule") is None else []
)
raise CraftAiNullDecisionError(
"""Unable to take decision: value '{}' for property '{}' doesn't"""
""" validate any of the decision rules.""".format(
context.get(prop), prop
),
{
"decision_rules": decision_rule,
"expected_values": operand_list,
"property": prop,
"value": context.get(prop),
},
)
# If a matching child is found, recurse
try:
result = InterpreterV1._decide_recursion(matching_child, context)
<|code_end|>
. Use current file imports:
(import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import (
OPERATORS_V1 as OPERATORS,
OPERATORS_FUNCTION_V1 as OPERATORS_FUNCTION,
)
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone)
and context including class names, function names, or small code snippets from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS_V1 = {
# "IS": "is",
# "IN_INTERVAL": "[in[",
# "GTE": ">=",
# "LT": "<",
# }
#
# OPERATORS_FUNCTION_V1 = {
# OPERATORS_V1["IS"]: lambda context, value: context == value,
# OPERATORS_V1["GTE"]: lambda context, value: safe_op(context, value, GTE),
# OPERATORS_V1["LT"]: lambda context, value: safe_op(context, value, LT),
# OPERATORS_V1["IN_INTERVAL"]: lambda context, value: safe_op(context, value[0], GTE)
# and safe_op(context, value[1], LT)
# if safe_op(value[0], value[1], LT)
# else safe_op(context, value[0], GTE) or safe_op(context, value[1], LT),
# }
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | except CraftAiDecisionError as err: |
Based on the snippet: <|code_start|> ),
TYPES["day_of_week"]: lambda value: (
isinstance(value, int) and value >= 0 and value <= 6
),
TYPES["day_of_month"]: lambda value: (
isinstance(value, int) and value >= 1 and value <= 31
),
TYPES["month_of_year"]: lambda value: (
isinstance(value, int) and value >= 1 and value <= 12
),
}
############################
# Interpreter for V1 Trees #
############################
class InterpreterV1(object):
@staticmethod
def decide(configuration, bare_tree, context):
InterpreterV1._check_context(configuration, context)
decision_result = {}
decision_result["output"] = {}
for output in configuration.get("output"):
root = bare_tree[output]
if not ("children" in root and len(root.get("children"))):
predicted_value = root.get("predicted_value")
if predicted_value is None:
<|code_end|>
, predict the immediate next line with the help of imports:
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import (
OPERATORS_V1 as OPERATORS,
OPERATORS_FUNCTION_V1 as OPERATORS_FUNCTION,
)
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context (classes, functions, sometimes code) from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS_V1 = {
# "IS": "is",
# "IN_INTERVAL": "[in[",
# "GTE": ">=",
# "LT": "<",
# }
#
# OPERATORS_FUNCTION_V1 = {
# OPERATORS_V1["IS"]: lambda context, value: context == value,
# OPERATORS_V1["GTE"]: lambda context, value: safe_op(context, value, GTE),
# OPERATORS_V1["LT"]: lambda context, value: safe_op(context, value, LT),
# OPERATORS_V1["IN_INTERVAL"]: lambda context, value: safe_op(context, value[0], GTE)
# and safe_op(context, value[1], LT)
# if safe_op(value[0], value[1], LT)
# else safe_op(context, value[0], GTE) or safe_op(context, value[1], LT),
# }
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | raise CraftAiNullDecisionError( |
Continue the code snippet: <|code_start|> final_result["standard_deviation"] = result.get("standard_deviation")
return final_result
@staticmethod
def _find_matching_child(node, context):
for child in node["children"]:
property_name = child["decision_rule"]["property"]
operand = child["decision_rule"]["operand"]
operator = child["decision_rule"]["operator"]
context_value = context.get(property_name)
# If there is no context value:
if context_value is None:
raise CraftAiDecisionError(
"""Unable to take decision, """
"""property '{}' is missing from the given context.""".format(
property_name
)
)
if not isinstance(operator, str) or operator not in OPERATORS.values():
raise CraftAiDecisionError(
"""Invalid decision tree format, {} is not a valid"""
""" decision operator.""".format(operator)
)
# To be compared, continuous parameters should not be strings
if TYPES["continuous"] in operator:
context_value = float(context_value)
operand = float(operand)
<|code_end|>
. Use current file imports:
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import (
OPERATORS_V1 as OPERATORS,
OPERATORS_FUNCTION_V1 as OPERATORS_FUNCTION,
)
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context (classes, functions, or code) from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS_V1 = {
# "IS": "is",
# "IN_INTERVAL": "[in[",
# "GTE": ">=",
# "LT": "<",
# }
#
# OPERATORS_FUNCTION_V1 = {
# OPERATORS_V1["IS"]: lambda context, value: context == value,
# OPERATORS_V1["GTE"]: lambda context, value: safe_op(context, value, GTE),
# OPERATORS_V1["LT"]: lambda context, value: safe_op(context, value, LT),
# OPERATORS_V1["IN_INTERVAL"]: lambda context, value: safe_op(context, value[0], GTE)
# and safe_op(context, value[1], LT)
# if safe_op(value[0], value[1], LT)
# else safe_op(context, value[0], GTE) or safe_op(context, value[1], LT),
# }
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | if OPERATORS_FUNCTION[operator](context_value, operand): |
Here is a snippet: <|code_start|>
_DECISION_VERSION = "1.1.0"
_VALUE_VALIDATORS = {
TYPES["continuous"]: lambda value: isinstance(value, numbers.Real),
TYPES["enum"]: lambda value: isinstance(value, str),
<|code_end|>
. Write the next line using the current file imports:
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import (
OPERATORS_V1 as OPERATORS,
OPERATORS_FUNCTION_V1 as OPERATORS_FUNCTION,
)
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS_V1 = {
# "IS": "is",
# "IN_INTERVAL": "[in[",
# "GTE": ">=",
# "LT": "<",
# }
#
# OPERATORS_FUNCTION_V1 = {
# OPERATORS_V1["IS"]: lambda context, value: context == value,
# OPERATORS_V1["GTE"]: lambda context, value: safe_op(context, value, GTE),
# OPERATORS_V1["LT"]: lambda context, value: safe_op(context, value, LT),
# OPERATORS_V1["IN_INTERVAL"]: lambda context, value: safe_op(context, value[0], GTE)
# and safe_op(context, value[1], LT)
# if safe_op(value[0], value[1], LT)
# else safe_op(context, value[0], GTE) or safe_op(context, value[1], LT),
# }
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
, which may include functions, classes, or code. Output only the next line. | TYPES["timezone"]: lambda value: is_timezone(value), |
Given the code snippet: <|code_start|> :return: error corresponding to the status code.
:rtype: Error.
"""
if status_code == 1007:
err = CraftAiBadRequestError(message)
elif status_code == 1003:
err = CraftAiNotFoundError(message)
elif status_code == 1002:
err = CraftAiInternalError(message)
else:
err = CraftAiUnknownError(message)
return err
def initialize_websocket(url, headers):
web_socket_event_loop = asyncio.get_event_loop()
try:
[protocol, uri] = url.split("://")
except ValueError:
raise CraftAiBadRequestError("Invalid url provided")
ws_protocol = ("wss://", "ws://")[protocol == "http"]
ws_url = ws_protocol + uri + "/api/ws"
try:
websocket = web_socket_event_loop.run_until_complete(
websockets.connect(
ws_url, subprotocols=["echo-protocol"], extra_headers=headers
)
)
except websockets.exceptions.InvalidStatusCode as e:
<|code_end|>
, generate the next line using the imports in this file:
import websockets
import asyncio
import json
from .errors import (
CraftAiCredentialsError,
CraftAiBadRequestError,
CraftAiNotFoundError,
CraftAiUnknownError,
CraftAiInternalError,
)
and context (functions, classes, or occasionally code) from other files:
# Path: craft_ai/errors.py
# class CraftAiCredentialsError(CraftAiError):
# """A credentials error occured."""
#
# class CraftAiBadRequestError(CraftAiError):
# """An unvalid request was send to craft ai's API."""
#
# class CraftAiNotFoundError(CraftAiError):
# """A Not Found Error (404) occured on craft ai's side."""
#
# class CraftAiUnknownError(CraftAiError):
# """An unknown error occured in the craft ai client."""
#
# class CraftAiInternalError(CraftAiError):
# """An Internal Server Error (500) ocurred on craft ai's side."""
. Output only the next line. | raise CraftAiCredentialsError(e) |
Predict the next line for this snippet: <|code_start|>
def get_error_from_websocket_status(status_code, message):
"""Give the error corresponding to the status code for websocket.
:param int status_code: status code of the response to
a request.
:param str message: error message given by the response.
:return: error corresponding to the status code.
:rtype: Error.
"""
if status_code == 1007:
<|code_end|>
with the help of current file imports:
import websockets
import asyncio
import json
from .errors import (
CraftAiCredentialsError,
CraftAiBadRequestError,
CraftAiNotFoundError,
CraftAiUnknownError,
CraftAiInternalError,
)
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiCredentialsError(CraftAiError):
# """A credentials error occured."""
#
# class CraftAiBadRequestError(CraftAiError):
# """An unvalid request was send to craft ai's API."""
#
# class CraftAiNotFoundError(CraftAiError):
# """A Not Found Error (404) occured on craft ai's side."""
#
# class CraftAiUnknownError(CraftAiError):
# """An unknown error occured in the craft ai client."""
#
# class CraftAiInternalError(CraftAiError):
# """An Internal Server Error (500) ocurred on craft ai's side."""
, which may contain function names, class names, or code. Output only the next line. | err = CraftAiBadRequestError(message) |
Predict the next line after this snippet: <|code_start|>
def get_error_from_websocket_status(status_code, message):
"""Give the error corresponding to the status code for websocket.
:param int status_code: status code of the response to
a request.
:param str message: error message given by the response.
:return: error corresponding to the status code.
:rtype: Error.
"""
if status_code == 1007:
err = CraftAiBadRequestError(message)
elif status_code == 1003:
<|code_end|>
using the current file's imports:
import websockets
import asyncio
import json
from .errors import (
CraftAiCredentialsError,
CraftAiBadRequestError,
CraftAiNotFoundError,
CraftAiUnknownError,
CraftAiInternalError,
)
and any relevant context from other files:
# Path: craft_ai/errors.py
# class CraftAiCredentialsError(CraftAiError):
# """A credentials error occured."""
#
# class CraftAiBadRequestError(CraftAiError):
# """An unvalid request was send to craft ai's API."""
#
# class CraftAiNotFoundError(CraftAiError):
# """A Not Found Error (404) occured on craft ai's side."""
#
# class CraftAiUnknownError(CraftAiError):
# """An unknown error occured in the craft ai client."""
#
# class CraftAiInternalError(CraftAiError):
# """An Internal Server Error (500) ocurred on craft ai's side."""
. Output only the next line. | err = CraftAiNotFoundError(message) |
Given the following code snippet before the placeholder: <|code_start|>
def get_error_from_websocket_status(status_code, message):
"""Give the error corresponding to the status code for websocket.
:param int status_code: status code of the response to
a request.
:param str message: error message given by the response.
:return: error corresponding to the status code.
:rtype: Error.
"""
if status_code == 1007:
err = CraftAiBadRequestError(message)
elif status_code == 1003:
err = CraftAiNotFoundError(message)
elif status_code == 1002:
err = CraftAiInternalError(message)
else:
<|code_end|>
, predict the next line using imports from the current file:
import websockets
import asyncio
import json
from .errors import (
CraftAiCredentialsError,
CraftAiBadRequestError,
CraftAiNotFoundError,
CraftAiUnknownError,
CraftAiInternalError,
)
and context including class names, function names, and sometimes code from other files:
# Path: craft_ai/errors.py
# class CraftAiCredentialsError(CraftAiError):
# """A credentials error occured."""
#
# class CraftAiBadRequestError(CraftAiError):
# """An unvalid request was send to craft ai's API."""
#
# class CraftAiNotFoundError(CraftAiError):
# """A Not Found Error (404) occured on craft ai's side."""
#
# class CraftAiUnknownError(CraftAiError):
# """An unknown error occured in the craft ai client."""
#
# class CraftAiInternalError(CraftAiError):
# """An Internal Server Error (500) ocurred on craft ai's side."""
. Output only the next line. | err = CraftAiUnknownError(message) |
Given the following code snippet before the placeholder: <|code_start|>
def get_error_from_websocket_status(status_code, message):
"""Give the error corresponding to the status code for websocket.
:param int status_code: status code of the response to
a request.
:param str message: error message given by the response.
:return: error corresponding to the status code.
:rtype: Error.
"""
if status_code == 1007:
err = CraftAiBadRequestError(message)
elif status_code == 1003:
err = CraftAiNotFoundError(message)
elif status_code == 1002:
<|code_end|>
, predict the next line using imports from the current file:
import websockets
import asyncio
import json
from .errors import (
CraftAiCredentialsError,
CraftAiBadRequestError,
CraftAiNotFoundError,
CraftAiUnknownError,
CraftAiInternalError,
)
and context including class names, function names, and sometimes code from other files:
# Path: craft_ai/errors.py
# class CraftAiCredentialsError(CraftAiError):
# """A credentials error occured."""
#
# class CraftAiBadRequestError(CraftAiError):
# """An unvalid request was send to craft ai's API."""
#
# class CraftAiNotFoundError(CraftAiError):
# """A Not Found Error (404) occured on craft ai's side."""
#
# class CraftAiUnknownError(CraftAiError):
# """An unknown error occured in the craft ai client."""
#
# class CraftAiInternalError(CraftAiError):
# """An Internal Server Error (500) ocurred on craft ai's side."""
. Output only the next line. | err = CraftAiInternalError(message) |
Using the snippet: <|code_start|> if (
not isinstance(distribution, list)
and "standard_deviation" in distribution
):
leaf["standard_deviation"] = distribution.get("standard_deviation")
leaf["min"] = distribution.get("min")
leaf["max"] = distribution.get("max")
else:
leaf["distribution"] = distribution
return leaf
# Finding the first element in this node's childrens matching the
# operator condition with given context
matching_child_i, matching_child = InterpreterV2._find_matching_child(
node, context
)
# If there is no child corresponding matching the operators then we compute
# the probabilistic distribution from this node.
if not matching_child:
return InterpreterV2.compute_distribution(
node, output_values, output_type, path
)
# Add the matching child index to the path
path.append(str(matching_child_i))
# If a matching child is found, recurse
try:
result = InterpreterV2._decide_recursion(
matching_child, context, output_values, output_type, path
)
<|code_end|>
, determine the next line of code. You have imports:
import math
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import OPERATORS, OPERATORS_FUNCTION
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context (class names, function names, or code) available:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# OPERATORS_FUNCTION = OPERATORS_FUNCTION_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | except CraftAiDecisionError as err: |
Given the code snippet: <|code_start|> ),
TYPES["month_of_year"]: lambda value: (
isinstance(value, int) and value >= 1 and value <= 12
),
}
############################
# Interpreter for V2 Trees #
############################
class InterpreterV2(object):
@staticmethod
def decide(configuration, bare_tree, context):
InterpreterV2._check_context(configuration, context)
decision_result = {}
decision_result["output"] = {}
for output in configuration.get("output"):
output_type = configuration["context"][output]["type"]
root = bare_tree[output]
if not ("children" in root and len(root.get("children"))):
# We check if a leaf has the key 'prediction' corresponging to a v2 tree
prediction = root.get("prediction")
if prediction is None:
prediction = root
predicted_value = prediction.get("value")
if predicted_value is None:
<|code_end|>
, generate the next line using the imports in this file:
import math
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import OPERATORS, OPERATORS_FUNCTION
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context (functions, classes, or occasionally code) from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# OPERATORS_FUNCTION = OPERATORS_FUNCTION_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | raise CraftAiNullDecisionError( |
Predict the next line after this snippet: <|code_start|> new_variance = None
new_mean = None
new_size = None
for mean, std, size in zip(values, stds, sizes):
variance = std * std
if new_mean is None:
new_variance = variance
new_mean = mean
new_size = size
continue
total_size = 1.0 * size + new_size
if total_size == 0:
continue
new_variance = (1.0 / (total_size - 1.0)) * (
(size - 1.0) * variance
+ (new_size - 1.0) * new_variance
+ (size * new_size / total_size) * (mean - new_mean) ** 2
)
new_mean = (1.0 / total_size) * (size * mean + new_size * new_mean)
new_size = total_size
return new_mean, new_size, math.sqrt(new_variance)
@staticmethod
def _find_matching_child(node, context):
for child_index, child in enumerate(node["children"]):
property_name = child["decision_rule"]["property"]
operand = child["decision_rule"]["operand"]
operator = child["decision_rule"]["operator"]
context_value = context.get(property_name)
<|code_end|>
using the current file's imports:
import math
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import OPERATORS, OPERATORS_FUNCTION
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and any relevant context from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# OPERATORS_FUNCTION = OPERATORS_FUNCTION_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | if not isinstance(operator, str) or operator not in OPERATORS.values(): |
Based on the snippet: <|code_start|> new_variance = variance
new_mean = mean
new_size = size
continue
total_size = 1.0 * size + new_size
if total_size == 0:
continue
new_variance = (1.0 / (total_size - 1.0)) * (
(size - 1.0) * variance
+ (new_size - 1.0) * new_variance
+ (size * new_size / total_size) * (mean - new_mean) ** 2
)
new_mean = (1.0 / total_size) * (size * mean + new_size * new_mean)
new_size = total_size
return new_mean, new_size, math.sqrt(new_variance)
@staticmethod
def _find_matching_child(node, context):
for child_index, child in enumerate(node["children"]):
property_name = child["decision_rule"]["property"]
operand = child["decision_rule"]["operand"]
operator = child["decision_rule"]["operator"]
context_value = context.get(property_name)
if not isinstance(operator, str) or operator not in OPERATORS.values():
raise CraftAiDecisionError(
"""Invalid decision tree format, {} is not a valid"""
""" decision operator.""".format(operator)
)
<|code_end|>
, predict the immediate next line with the help of imports:
import math
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import OPERATORS, OPERATORS_FUNCTION
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context (classes, functions, sometimes code) from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# OPERATORS_FUNCTION = OPERATORS_FUNCTION_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | if OPERATORS_FUNCTION[operator](context_value, operand): |
Given the following code snippet before the placeholder: <|code_start|>
_DECISION_VERSION = "2.0.0"
_VALUE_VALIDATORS = {
TYPES["continuous"]: lambda value: isinstance(value, numbers.Real),
TYPES["enum"]: lambda value: isinstance(value, str),
TYPES["boolean"]: lambda value: isinstance(value, bool),
<|code_end|>
, predict the next line using imports from the current file:
import math
import numbers
from craft_ai.errors import CraftAiDecisionError, CraftAiNullDecisionError
from craft_ai.operators import OPERATORS, OPERATORS_FUNCTION
from craft_ai.types import TYPES
from craft_ai.timezones import is_timezone
and context including class names, function names, and sometimes code from other files:
# Path: craft_ai/errors.py
# class CraftAiDecisionError(CraftAiError):
# """An error occured during the decision phase."""
#
# class CraftAiNullDecisionError(CraftAiDecisionError):
# """An error occured during the decision phase."""
#
# Path: craft_ai/operators.py
# OPERATORS = OPERATORS_V2
#
# OPERATORS_FUNCTION = OPERATORS_FUNCTION_V2
#
# Path: craft_ai/types.py
# TYPES = {
# "continuous": "continuous",
# "enum": "enum",
# "boolean": "boolean",
# "timezone": "timezone",
# "time_of_day": "time_of_day",
# "day_of_week": "day_of_week",
# "day_of_month": "day_of_month",
# "month_of_year": "month_of_year",
# }
#
# Path: craft_ai/timezones.py
# def is_timezone(value):
# # Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# # cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
# if isinstance(value, int) and value <= 840 and value >= -720:
# return True
# if not isinstance(value, str):
# return False
# if value in TIMEZONES:
# return True
# result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
# return result_reg_exp
. Output only the next line. | TYPES["timezone"]: lambda value: is_timezone(value), |
Predict the next line for this snippet: <|code_start|> return path0[:-1] == path1[:-1] and path0 != path1
def _get_neighbours(paths, decision_path):
"""
Collect all neighbours paths of the given decision path
param: paths: paths aggregator
param: decision_path: decision path to get neighbours from
"""
split = decision_path.split("-")
neighbours = []
for step in range(1, len(split) + 1):
for path in paths:
if _is_neighbour(path, "-".join(split[:step])):
neighbours.append(path)
return neighbours
def extract_output_tree(tree, output_property=None):
"""
Extract the output decision tree specific for a given output property from a full decision tree.
This function accepts trees as retrieved from `craft_ai.Client.get_generator_decision_tree`.
Parameters:
tree: A tree.
output_property (optional): If provided, the output property for which the tree predicts
values, otherwise the first defined tree is retrieved.
"""
if not isinstance(tree, dict):
<|code_end|>
with the help of current file imports:
from copy import copy
from .errors import CraftAiError
and context from other files:
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
, which may contain function names, class names, or code. Output only the next line. | raise CraftAiError( |
Here is a snippet: <|code_start|>
DUMMY_COLUMN_NAME = "CraftGeneratedDummy"
SELECTED_NODE_REGEX = "^0(-\\d*)*$"
def format_input(val):
<|code_end|>
. Write the next line using the current file imports:
import json
import re
import string
import importlib
import pandas as pd
from random import choice
from semver import VersionInfo
from .constants import (
MISSING_VALUE,
OPTIONAL_VALUE,
)
from ..constants import REACT_CRAFT_AI_DECISION_TREE_VERSION
from ..errors import CraftAiError
and context from other files:
# Path: craft_ai/pandas/constants.py
# MISSING_VALUE = MissingValue()
#
# OPTIONAL_VALUE = OptionalValue()
#
# Path: craft_ai/constants.py
# REACT_CRAFT_AI_DECISION_TREE_VERSION = "0.0.23"
#
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
, which may include functions, classes, or code. Output only the next line. | if val == MISSING_VALUE: |
Here is a snippet: <|code_start|>
DUMMY_COLUMN_NAME = "CraftGeneratedDummy"
SELECTED_NODE_REGEX = "^0(-\\d*)*$"
def format_input(val):
if val == MISSING_VALUE:
return None
<|code_end|>
. Write the next line using the current file imports:
import json
import re
import string
import importlib
import pandas as pd
from random import choice
from semver import VersionInfo
from .constants import (
MISSING_VALUE,
OPTIONAL_VALUE,
)
from ..constants import REACT_CRAFT_AI_DECISION_TREE_VERSION
from ..errors import CraftAiError
and context from other files:
# Path: craft_ai/pandas/constants.py
# MISSING_VALUE = MissingValue()
#
# OPTIONAL_VALUE = OptionalValue()
#
# Path: craft_ai/constants.py
# REACT_CRAFT_AI_DECISION_TREE_VERSION = "0.0.23"
#
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
, which may include functions, classes, or code. Output only the next line. | if val == OPTIONAL_VALUE: |
Given the code snippet: <|code_start|> for folded_node in folded_nodes:
if not isinstance(folded_node, str) and not re.compile(
SELECTED_NODE_REGEX
).match(folded_node):
raise CraftAiError(
"""Invalid folded node format given, tt should be a"""
"""String following this regex: {}, found: {}""".format(
SELECTED_NODE_REGEX, folded_nodes
)
)
if edge_type not in ["constant", "absolute", "relative"]:
raise CraftAiError(
"""Invalid edge type given, its value should be a "constant", """
""""absolute" or "relative", found: {}""".format(edge_type)
)
if not isinstance(selected_node, str) and not re.compile(SELECTED_NODE_REGEX).match(
selected_node
):
raise CraftAiError(
"""Invalid selected node format given, tt should be a"""
"""String following this regex: {}, found: {}""".format(
SELECTED_NODE_REGEX, selected_node
)
)
return html_template.format(
height=height,
tree=json.dumps(tree_object),
<|code_end|>
, generate the next line using the imports in this file:
import json
import re
import string
import importlib
import pandas as pd
from random import choice
from semver import VersionInfo
from .constants import (
MISSING_VALUE,
OPTIONAL_VALUE,
)
from ..constants import REACT_CRAFT_AI_DECISION_TREE_VERSION
from ..errors import CraftAiError
and context (functions, classes, or occasionally code) from other files:
# Path: craft_ai/pandas/constants.py
# MISSING_VALUE = MissingValue()
#
# OPTIONAL_VALUE = OptionalValue()
#
# Path: craft_ai/constants.py
# REACT_CRAFT_AI_DECISION_TREE_VERSION = "0.0.23"
#
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
. Output only the next line. | version=REACT_CRAFT_AI_DECISION_TREE_VERSION, |
Predict the next line after this snippet: <|code_start|> </script>
<script src="https://unpkg.com/react-dom@16/umd/react-dom.development.js" crossorigin defer>
</script>
<script src="https://unpkg.com/react-craft-ai-decision-tree@0.0.26" crossorigin defer>
</script>
<style>
.jp-RenderedHTMLCommon table {{ table-layout: inherit; }}
.jp-RenderedHTMLCommon ul {{ padding-left: none; }}
</style>
</head>
<body>
<div id="{idDiv}">
</div>
<script async=false>
ReactDOM.render(
React.createElement(DecisionTree,
{{
style: {{ height: {height} }},
data: {tree},
selectedNode: "{selectedNode}",
foldedNodes: {foldedNodes},
edgeType: "{edgeType}"
}}
),document.getElementById("{idDiv}")
);
</script>
</body>
</html>"""
if height <= 0:
<|code_end|>
using the current file's imports:
import json
import re
import string
import importlib
import pandas as pd
from random import choice
from semver import VersionInfo
from .constants import (
MISSING_VALUE,
OPTIONAL_VALUE,
)
from ..constants import REACT_CRAFT_AI_DECISION_TREE_VERSION
from ..errors import CraftAiError
and any relevant context from other files:
# Path: craft_ai/pandas/constants.py
# MISSING_VALUE = MissingValue()
#
# OPTIONAL_VALUE = OptionalValue()
#
# Path: craft_ai/constants.py
# REACT_CRAFT_AI_DECISION_TREE_VERSION = "0.0.23"
#
# Path: craft_ai/errors.py
# class CraftAiError(Exception):
# """Base class for exceptions in the craft ai client."""
#
# def __init__(self, message=None, metadata=None):
# self.message = message
# self.metadata = metadata
# super(CraftAiError, self).__init__(message, metadata)
#
# def __str__(self):
# return repr(self.message)
. Output only the next line. | raise CraftAiError("A strictly positive height value must be given.") |
Here is a snippet: <|code_start|>try:
except ImportError:
class TestPostgresToS3Transfer(object):
def test_its_created_successfully(self):
<|code_end|>
. Write the next line using the current file imports:
import unittest.mock as mock
import mock
import subprocess
import dags.utils.helpers as helpers
from dags.operators.postgres_to_s3_transfer import PostgresToS3Transfer
and context from other files:
# Path: dags/operators/postgres_to_s3_transfer.py
# class PostgresToS3Transfer(BaseOperator):
# '''Dumps a Postgres database to a S3 key
#
# :param url: URL to download. (templated)
# :type url: str
# :param postgres_conn_id: Postgres Connection's ID.
# :type postgres_conn_id: str
# :param tables: List of tables to export (optional, default exports all
# tables).
# :type tables: list of str
# :param s3_conn_id: S3 Connection's ID. It needs a JSON in the `extra` field
# with `aws_access_key_id` and `aws_secret_access_key`
# :type s3_conn_id: str
# :param s3_url: S3 url (e.g. `s3://my_bucket/my_key.zip`) (templated)
# :type s3_url: str
# '''
# template_fields = ('s3_url',)
#
# @apply_defaults
# def __init__(self, postgres_conn_id, s3_conn_id, s3_url, tables=None, *args, **kwargs):
# super(PostgresToS3Transfer, self).__init__(*args, **kwargs)
# self.postgres_conn_id = postgres_conn_id
# self.tables = tables
# self.s3_conn_id = s3_conn_id
# self.s3_url = s3_url
#
# def execute(self, context):
# s3 = self._load_s3_connection(self.s3_conn_id)
# s3_bucket, s3_key = self._parse_s3_url(self.s3_url)
# command = [
# 'pg_dump',
# '-Fc',
# ]
#
# if self.tables:
# tables_params = ['--table={}'.format(table) for table in self.tables]
# command.extend(tables_params)
#
# logging.info('Dumping database "%s" into "%s"', self.postgres_conn_id, self.s3_url)
# logging.info('Command: %s <POSTGRES_URI>', ' '.join(command))
#
# command.append(helpers.get_postgres_uri(self.postgres_conn_id))
#
# with subprocess.Popen(command, stdout=subprocess.PIPE).stdout as dump_file:
# s3.Bucket(s3_bucket) \
# .upload_fileobj(dump_file, s3_key)
#
# @staticmethod
# def _parse_s3_url(s3_url):
# parsed_url = urlparse(s3_url)
# if not parsed_url.netloc:
# raise airflow.exceptions.AirflowException('Please provide a bucket_name')
# else:
# bucket_name = parsed_url.netloc
# key = parsed_url.path.strip('/')
# return (bucket_name, key)
#
# def _load_s3_connection(self, conn_id):
# '''
# Parses the S3 connection and returns a Boto3 resource.
#
# This should be implementing using the S3Hook, but it currently uses
# boto (not boto3) which doesn't allow streaming.
#
# :return: Boto3 resource
# :rtype: boto3.resources.factory.s3.ServiceResource
# '''
# conn = airflow.hooks.base_hook.BaseHook.get_connection(conn_id)
# extra_dejson = conn.extra_dejson
# key_id = extra_dejson['aws_access_key_id']
# access_key = extra_dejson['aws_secret_access_key']
#
# s3 = boto3.resource(
# 's3',
# aws_access_key_id=key_id,
# aws_secret_access_key=access_key
# )
#
# return s3
, which may include functions, classes, or code. Output only the next line. | operator = PostgresToS3Transfer( |
Here is a snippet: <|code_start|>try:
except ImportError:
class TestHerokuOperator(object):
def test_its_created_successfully(self):
<|code_end|>
. Write the next line using the current file imports:
import unittest.mock as mock
import mock
import pytest
import airflow.exceptions
import requests.exceptions
from dags.operators.heroku_operator import HerokuOperator
and context from other files:
# Path: dags/operators/heroku_operator.py
# class HerokuOperator(airflow.models.BaseOperator):
# '''Executes a command on a Heroku dyno.
#
# :param heroku_conn_id: Heroku's connection ID. It should contain your API
# key in the password field.
# :type heroku_conn_id: str
# :param app_name: Heroku's app name.
# :type app_name: str
# :param command: Command to run (templated)
# :type command: str
# :param size: Dyno's size (default: 'free')
# :type size: str
# :param timeout: Timeout on all HTTP requests (default: 60)
# :type timeout: int
# '''
# template_fields = ('command',)
#
# @apply_defaults
# def __init__(
# self,
# heroku_conn_id,
# app_name,
# command,
# size='free',
# timeout=60,
# *args,
# **kwargs
# ):
# super(HerokuOperator, self).__init__(*args, **kwargs)
# self.heroku_conn_id = heroku_conn_id
# self.app_name = app_name
# self.command = command
# self.size = size
# self.timeout = timeout
# self.dyno = None
#
# def execute(self, context):
# conn = airflow.hooks.base_hook.BaseHook.get_connection(self.heroku_conn_id)
# api_key = conn.password
# self.heroku_conn = heroku3.from_key(api_key)
#
# self.dyno = self.heroku_conn.run_command_on_app(
# self.app_name,
# self.command,
# size=self.size,
# attach=False,
# printout=False
# )
#
# try:
# status_code = None
# for line in self.heroku_conn.stream_app_log(
# self.app_name,
# dyno=self.dyno.name,
# lines=1,
# timeout=self.timeout
# ):
# logging.info(line)
# status_code = self._parse_status_code(line)
# if status_code is not None:
# break
# except requests.exceptions.ConnectionError:
# if self._get_dyno_status_code() is None:
# raise
#
# if status_code != 0:
# msg = 'Command "{command}" returned non-successful status code "{status}"'.format(
# command=self.command,
# status=status_code
# )
# raise airflow.exceptions.AirflowException(msg)
#
# return status_code
#
# def on_kill(self):
# if self.dyno:
# self.dyno.kill()
#
# def _get_dyno_status_code(self):
# logs = self.heroku_conn.get_app_log(
# self.app_name,
# dyno=self.dyno.name,
# timeout=self.timeout
# )
# return self._parse_status_code(logs)
#
# def _parse_status_code(self, line):
# status_code = None
# m = re.search(
# 'heroku\[.+\]: Process exited with status (-?\d+)',
# line
# )
#
# if m:
# status_code = int(m.groups()[0])
#
# return status_code
, which may include functions, classes, or code. Output only the next line. | operator = HerokuOperator( |
Given the code snippet: <|code_start|>
class TestPythonSensor(object):
@pytest.mark.parametrize('return_value, result', [
('a string', False),
(42, False),
(0, False),
(False, False),
(True, True),
])
def test_poke_returns_false_if_callable_doesnt_return_true(self, return_value, result):
callable = lambda: return_value # noqa: E731
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from dags.operators.python_sensor import PythonSensor
and context (functions, classes, or occasionally code) from other files:
# Path: dags/operators/python_sensor.py
# class PythonSensor(airflow.operators.sensors.BaseSensorOperator):
# '''
# Runs Python callable until it returns `True`.
#
# :param python_callable: Python callable to run. It should return `True`
# when successful, any other values (even truthy ones) won't work.
# :type python_callable: function
# '''
#
# @apply_defaults
# def __init__(self, python_callable, *args, **kwargs):
# super(PythonSensor, self).__init__(*args, **kwargs)
# self.python_callable = python_callable
#
# def poke(self, context):
# return self.python_callable() is True
. Output only the next line. | sensor = PythonSensor( |
Continue the code snippet: <|code_start|>try:
except ImportError:
class TestDockerCLIOperator(object):
def test_its_created_successfully(self):
<|code_end|>
. Use current file imports:
import unittest.mock as mock
import mock
import collections
import io
import re
import shlex
import subprocess
import pytest
import airflow.exceptions
from dags.operators.docker_cli_operator import DockerCLIOperator
and context (classes, functions, or code) from other files:
# Path: dags/operators/docker_cli_operator.py
# class DockerCLIOperator(airflow.models.BaseOperator):
# '''Executes a command on a Docker comtainer.
#
# This uses bash to execute Docker commands instead of using the Docker API
# to try to work around issue
# https://issues.apache.org/jira/browse/AIRFLOW-1131
#
# :param image: Docker image from which to create the container.
# :type image: str
# :param command: Command to run (templated)
# :type command: str
# :param environment: Environment variables to set in the container.
# :type environment: dict
# :param force_pull: Pull the docker image on every run (default: False).
# :type force_pull: bool
# '''
# template_fields = ('command',)
#
# @apply_defaults
# def __init__(
# self,
# image,
# command,
# environment=None,
# force_pull=False,
# api_version=None,
# *args,
# **kwargs
# ):
# super(DockerCLIOperator, self).__init__(*args, **kwargs)
# self.image = image
# self.command = command
# self.environment = copy.deepcopy(environment or {})
# self.force_pull = force_pull
# self.api_version = api_version
# self._process = None
#
# if self.api_version:
# self.environment['DOCKER_API_VERSION'] = self.api_version
#
# def execute(self, context):
# if self.force_pull:
# self._pull_image()
#
# docker_run_command = self._get_docker_run_command()
# return self._run_command(docker_run_command, self.environment)
#
# def on_kill(self):
# if self._process:
# logging.info('Sending SIGTERM signal to process group')
# os.killpg(os.getpgid(self._process.pid), signal.SIGTERM)
#
# def _pull_image(self):
# pull_command = 'docker pull {image}'.format(image=self.image)
# return self._run_command(pull_command, self.environment)
#
# def _get_docker_run_command(self):
# env_params = [
# '--env "{key}=${key}"'.format(key=key)
# for key, value in self.environment.items()
# if value is not None
# ]
# resource_limits_params = [
# '--memory=350m',
# '--cpu-period=100000',
# '--cpu-quota=50000',
# ]
#
# docker_command = [
# 'docker',
# 'run',
# '--rm',
# ] + resource_limits_params + [
# ] + env_params + [
# self.image,
# self.command,
# ]
#
# return ' '.join(docker_command)
#
# def _run_command(self, command, env=None):
# command = '/bin/bash -c "{command}"'.format(command=command)
# logging.info('Running command "{}"'.format(shlex.split(command)))
# self._process = subprocess.Popen(
# shlex.split(command),
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# env=_remove_nulls_and_encode_as_utf8_strings(env),
# preexec_fn=os.setsid
# )
# process = self._process
#
# line = ''
# for line in iter(process.stdout.readline, b''):
# line = line.decode('utf-8').strip()
# logging.info(line)
# process.wait()
# logging.info('Command exited with '
# 'return code {0}'.format(process.returncode))
#
# if process.returncode != 0:
# msg = 'Bash command "{command}" failed with exit code "{exitcode}"'.format(
# command=command,
# exitcode=process.returncode
# )
# raise airflow.exceptions.AirflowException(msg)
#
# return process.returncode
. Output only the next line. | operator = DockerCLIOperator( |
Next line prediction: <|code_start|> raw, options=cmarkgfmOptions.CMARK_OPT_UNSAFE
),
}
except ImportError:
warnings.warn(_EXTRA_WARNING)
variants = {}
# Make code fences with `python` as the language default to highlighting as
# Python 3.
_LANG_ALIASES = {
'python': 'python3',
}
def render(raw, variant="GFM", **kwargs):
if not variants:
warnings.warn(_EXTRA_WARNING)
return None
renderer = variants.get(variant)
if not renderer:
return None
rendered = renderer(raw)
if not rendered:
return None
highlighted = _highlight(rendered)
<|code_end|>
. Use current file imports:
(import re
import warnings
import pygments
import pygments.lexers
import pygments.formatters
import cmarkgfm
from html.parser import unescape
from .clean import clean
from cmarkgfm.cmark import Options as cmarkgfmOptions)
and context including class names, function names, or small code snippets from other files:
# Path: readme_renderer/clean.py
# def clean(html, tags=None, attributes=None, styles=None):
# if tags is None:
# tags = ALLOWED_TAGS
# if attributes is None:
# attributes = ALLOWED_ATTRIBUTES
# if styles is None:
# styles = ALLOWED_STYLES
#
# # Clean the output using Bleach
# cleaner = bleach.sanitizer.Cleaner(
# tags=tags,
# attributes=attributes,
# styles=styles,
# filters=[
# # Bleach Linkify makes it easy to modify links, however, we will
# # not be using it to create additional links.
# functools.partial(
# bleach.linkifier.LinkifyFilter,
# callbacks=[
# lambda attrs, new: attrs if not new else None,
# bleach.callbacks.nofollow,
# ],
# skip_tags=["pre"],
# parse_email=False,
# ),
# DisabledCheckboxInputsFilter,
# ],
# )
# try:
# cleaned = cleaner.clean(html)
# return cleaned
# except ValueError:
# return None
. Output only the next line. | cleaned = clean(highlighted) |
Predict the next line for this snippet: <|code_start|>
MD_FIXTURES = [
(fn, os.path.splitext(fn)[0] + ".html", variant)
for variant in variants
for fn in glob.iglob(
os.path.join(
os.path.dirname(__file__),
"fixtures",
"test_" + variant + "*.md"
)
)
]
@pytest.mark.parametrize(
("md_filename", "html_filename", "variant"),
MD_FIXTURES,
)
def test_md_fixtures(md_filename, html_filename, variant):
# Get our Markup
with io.open(md_filename, encoding='utf-8') as f:
md_markup = f.read()
# Get our expected
with io.open(html_filename, encoding="utf-8") as f:
expected = f.read()
<|code_end|>
with the help of current file imports:
import io
import glob
import os
import pytest
from readme_renderer.markdown import render, variants
and context from other files:
# Path: readme_renderer/markdown.py
# _EXTRA_WARNING = (
# "Markdown renderers are not available. "
# "Install 'readme_renderer[md]' to enable Markdown rendering."
# )
# _LANG_ALIASES = {
# 'python': 'python3',
# }
# def render(raw, variant="GFM", **kwargs):
# def _highlight(html):
# def replacer(match):
, which may contain function names, class names, or code. Output only the next line. | assert render(md_markup, variant=variant) == expected |
Using the snippet: <|code_start|> Checks if the long string fields are reST-compliant.
"""
# Warn that this command is deprecated
# Don't use self.warn() because it will cause the check to fail.
Command.warn(
self,
"This command has been deprecated. Use `twine check` instead: "
"https://packaging.python.org/guides/making-a-pypi-friendly-readme"
"#validating-restructuredtext-markup"
)
data = self.distribution.get_long_description()
content_type = getattr(
self.distribution.metadata, 'long_description_content_type', None)
if content_type:
content_type, _ = cgi.parse_header(content_type)
if content_type != 'text/x-rst':
self.warn(
"Not checking long description content type '%s', this "
"command only checks 'text/x-rst'." % content_type)
return
# None or empty string should both trigger this branch.
if not data or data == 'UNKNOWN':
self.warn(
"The project's long_description is either missing or empty.")
return
stream = _WarningStream()
<|code_end|>
, determine the next line of code. You have imports:
import cgi
import io
import re
import distutils.log
from distutils.command.check import check as _check
from distutils.core import Command
from ..rst import render
and context (class names, function names, or code) available:
# Path: readme_renderer/rst.py
# def render(raw, stream=None, **kwargs):
# if stream is None:
# # Use a io.StringIO as the warning stream to prevent warnings from
# # being printed to sys.stderr.
# stream = io.StringIO()
#
# settings = SETTINGS.copy()
# settings["warning_stream"] = stream
#
# writer = Writer()
# writer.translator_class = ReadMeHTMLTranslator
#
# try:
# parts = publish_parts(raw, writer=writer, settings_overrides=settings)
# except SystemMessage:
# rendered = None
# else:
# rendered = parts.get("docinfo", "") + parts.get("fragment", "")
#
# if rendered:
# return clean(rendered)
# else:
# return None
. Output only the next line. | markup = render(data, stream=stream) |
Given the code snippet: <|code_start|># Copyright 2015 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
if sys.version_info >= (3,):
else:
def html_escape(s):
return escape(s, quote=True).replace("'", ''')
def render(raw, **kwargs):
rendered = html_escape(raw).replace("\n", "<br>")
<|code_end|>
, generate the next line using the imports in this file:
import sys
from .clean import clean
from html import escape as html_escape
from cgi import escape
and context (functions, classes, or occasionally code) from other files:
# Path: readme_renderer/clean.py
# def clean(html, tags=None, attributes=None, styles=None):
# if tags is None:
# tags = ALLOWED_TAGS
# if attributes is None:
# attributes = ALLOWED_ATTRIBUTES
# if styles is None:
# styles = ALLOWED_STYLES
#
# # Clean the output using Bleach
# cleaner = bleach.sanitizer.Cleaner(
# tags=tags,
# attributes=attributes,
# styles=styles,
# filters=[
# # Bleach Linkify makes it easy to modify links, however, we will
# # not be using it to create additional links.
# functools.partial(
# bleach.linkifier.LinkifyFilter,
# callbacks=[
# lambda attrs, new: attrs if not new else None,
# bleach.callbacks.nofollow,
# ],
# skip_tags=["pre"],
# parse_email=False,
# ),
# DisabledCheckboxInputsFilter,
# ],
# )
# try:
# cleaned = cleaner.clean(html)
# return cleaned
# except ValueError:
# return None
. Output only the next line. | return clean(rendered, tags=["br"]) |
Given the code snippet: <|code_start|>
@pytest.mark.skipif(variants, reason="Extra is installed")
@pytest.mark.parametrize("variant", ('GFM', 'CommonMark'))
def test_no_extra(variant):
with pytest.warns(UserWarning) as warnings:
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from readme_renderer.markdown import render, variants
and context (functions, classes, or occasionally code) from other files:
# Path: readme_renderer/markdown.py
# _EXTRA_WARNING = (
# "Markdown renderers are not available. "
# "Install 'readme_renderer[md]' to enable Markdown rendering."
# )
# _LANG_ALIASES = {
# 'python': 'python3',
# }
# def render(raw, variant="GFM", **kwargs):
# def _highlight(html):
# def replacer(match):
. Output only the next line. | assert render('Hello', variant=variant) is None |
Given the code snippet: <|code_start|> # Use the short form of syntax highlighting so that the generated
# Pygments CSS can be used to style the output.
"syntax_highlight": "short",
# Maximum width (in characters) for one-column field names.
# 0 means "no limit"
"field_name_limit": 0,
}
def render(raw, stream=None, **kwargs):
if stream is None:
# Use a io.StringIO as the warning stream to prevent warnings from
# being printed to sys.stderr.
stream = io.StringIO()
settings = SETTINGS.copy()
settings["warning_stream"] = stream
writer = Writer()
writer.translator_class = ReadMeHTMLTranslator
try:
parts = publish_parts(raw, writer=writer, settings_overrides=settings)
except SystemMessage:
rendered = None
else:
rendered = parts.get("docinfo", "") + parts.get("fragment", "")
if rendered:
<|code_end|>
, generate the next line using the imports in this file:
import io
from docutils.core import publish_parts
from docutils.writers.html4css1 import HTMLTranslator, Writer
from docutils.utils import SystemMessage
from .clean import clean
and context (functions, classes, or occasionally code) from other files:
# Path: readme_renderer/clean.py
# def clean(html, tags=None, attributes=None, styles=None):
# if tags is None:
# tags = ALLOWED_TAGS
# if attributes is None:
# attributes = ALLOWED_ATTRIBUTES
# if styles is None:
# styles = ALLOWED_STYLES
#
# # Clean the output using Bleach
# cleaner = bleach.sanitizer.Cleaner(
# tags=tags,
# attributes=attributes,
# styles=styles,
# filters=[
# # Bleach Linkify makes it easy to modify links, however, we will
# # not be using it to create additional links.
# functools.partial(
# bleach.linkifier.LinkifyFilter,
# callbacks=[
# lambda attrs, new: attrs if not new else None,
# bleach.callbacks.nofollow,
# ],
# skip_tags=["pre"],
# parse_email=False,
# ),
# DisabledCheckboxInputsFilter,
# ],
# )
# try:
# cleaned = cleaner.clean(html)
# return cleaned
# except ValueError:
# return None
. Output only the next line. | return clean(rendered) |
Given the code snippet: <|code_start|>
@pytest.mark.parametrize(
("rst_filename", "html_filename"),
[
(fn, os.path.splitext(fn)[0] + ".html")
for fn in glob.glob(
os.path.join(os.path.dirname(__file__), "fixtures", "test_*.rst")
)
],
)
def test_rst_fixtures(rst_filename, html_filename):
# Get our Markup
with io.open(rst_filename, encoding='utf-8') as f:
rst_markup = f.read()
# Get our expected
with io.open(html_filename, encoding="utf-8") as f:
expected = f.read()
<|code_end|>
, generate the next line using the imports in this file:
import io
import glob
import os.path
import pytest
from readme_renderer.rst import render
and context (functions, classes, or occasionally code) from other files:
# Path: readme_renderer/rst.py
# def render(raw, stream=None, **kwargs):
# if stream is None:
# # Use a io.StringIO as the warning stream to prevent warnings from
# # being printed to sys.stderr.
# stream = io.StringIO()
#
# settings = SETTINGS.copy()
# settings["warning_stream"] = stream
#
# writer = Writer()
# writer.translator_class = ReadMeHTMLTranslator
#
# try:
# parts = publish_parts(raw, writer=writer, settings_overrides=settings)
# except SystemMessage:
# rendered = None
# else:
# rendered = parts.get("docinfo", "") + parts.get("fragment", "")
#
# if rendered:
# return clean(rendered)
# else:
# return None
. Output only the next line. | out = render(rst_markup) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
class SystemCases(unittest.TestCase):
@unittest.skipIf(os.getuid()==0, "Test mustn't be run as superuser")
def test_is_running_no_access(self):
with self.assertRaises(OSError):
<|code_end|>
, generate the next line using the imports in this file:
import os
import shutil
import signal
import tempfile
import time
import unittest
from iktomi.utils.system import (
is_running, safe_makedirs, terminate, doublefork,
)
and context (functions, classes, or occasionally code) from other files:
# Path: iktomi/utils/system.py
# def is_running(pid):
# '''Returns True if process with PID `pid` is running. Current user must
# have permission to access process information.'''
# try:
# os.kill(pid, 0)
# except OSError as exc:
# if exc.errno == errno.ESRCH:
# return False
# raise
# return True
#
# def safe_makedirs(*files):
# for filename in files:
# dirname = os.path.dirname(filename)
# if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
# def terminate(pid, sig, timeout):
# '''Terminates process with PID `pid` and returns True if process finished
# during `timeout`. Current user must have permission to access process
# information.'''
# os.kill(pid, sig)
# start = time.time()
# while True:
# try:
# # This is requireed if it's our child to avoid zombie. Also
# # is_running() returns True for zombie process.
# _, status = os.waitpid(pid, os.WNOHANG)
# except OSError as exc:
# if exc.errno != errno.ECHILD: # pragma: nocover
# raise
# else:
# if status:
# return True
# if not is_running(pid):
# return True
# if time.time()-start>=timeout:
# return False
# time.sleep(0.1)
#
# def doublefork(pidfile, logfile, cwd, umask): # pragma: nocover
# '''Daemonize current process.
# After first fork we return to the shell and removing our self from
# controling terminal via `setsid`.
# After second fork we are not session leader any more and cant get
# controlling terminal when opening files.'''
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #1 failed: ({}) {}'.format(e.errno, e.strerror))
# os.setsid()
# os.chdir(cwd)
# os.umask(umask)
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #2 failed: ({}) {}'.format(e.errno, e.strerror))
# if logfile is not None:
# si = open('/dev/null')
# if six.PY2:
# so = open(logfile, 'a+', 0)
# else:
# so = io.open(logfile, 'ab+', 0)
# so = io.TextIOWrapper(so, write_through=True, encoding="utf-8")
#
# os.dup2(si.fileno(), 0)
# os.dup2(so.fileno(), 1)
# os.dup2(so.fileno(), 2)
# sys.stdin = si
# sys.stdout = sys.stderr = so
# with open(pidfile, 'w') as f:
# f.write(str(os.getpid()))
. Output only the next line. | is_running(1) |
Predict the next line for this snippet: <|code_start|> self.assertFalse(is_running(pid))
self.assertLess(finished-started, 1)
else:
doublefork(pidfile, '/dev/null', '.', 0)
time.sleep(3)
os._exit(os.EX_OK)
def test_terminate_false(self):
pid = os.fork()
if pid:
started = time.time()
success = terminate(pid, 0, 0.5)
finished = time.time()
self.assertFalse(success)
self.assertTrue(is_running(pid))
self.assertLess(finished-started, 1)
# Cleaning up
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
else:
time.sleep(3)
os._exit(os.EX_OK)
def test_safe_makedirs(self):
tmp = tempfile.mkdtemp()
dir_path = os.path.join(tmp, 'aa', 'bb')
file_path = os.path.join(dir_path, 'file.ext')
<|code_end|>
with the help of current file imports:
import os
import shutil
import signal
import tempfile
import time
import unittest
from iktomi.utils.system import (
is_running, safe_makedirs, terminate, doublefork,
)
and context from other files:
# Path: iktomi/utils/system.py
# def is_running(pid):
# '''Returns True if process with PID `pid` is running. Current user must
# have permission to access process information.'''
# try:
# os.kill(pid, 0)
# except OSError as exc:
# if exc.errno == errno.ESRCH:
# return False
# raise
# return True
#
# def safe_makedirs(*files):
# for filename in files:
# dirname = os.path.dirname(filename)
# if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
# def terminate(pid, sig, timeout):
# '''Terminates process with PID `pid` and returns True if process finished
# during `timeout`. Current user must have permission to access process
# information.'''
# os.kill(pid, sig)
# start = time.time()
# while True:
# try:
# # This is requireed if it's our child to avoid zombie. Also
# # is_running() returns True for zombie process.
# _, status = os.waitpid(pid, os.WNOHANG)
# except OSError as exc:
# if exc.errno != errno.ECHILD: # pragma: nocover
# raise
# else:
# if status:
# return True
# if not is_running(pid):
# return True
# if time.time()-start>=timeout:
# return False
# time.sleep(0.1)
#
# def doublefork(pidfile, logfile, cwd, umask): # pragma: nocover
# '''Daemonize current process.
# After first fork we return to the shell and removing our self from
# controling terminal via `setsid`.
# After second fork we are not session leader any more and cant get
# controlling terminal when opening files.'''
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #1 failed: ({}) {}'.format(e.errno, e.strerror))
# os.setsid()
# os.chdir(cwd)
# os.umask(umask)
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #2 failed: ({}) {}'.format(e.errno, e.strerror))
# if logfile is not None:
# si = open('/dev/null')
# if six.PY2:
# so = open(logfile, 'a+', 0)
# else:
# so = io.open(logfile, 'ab+', 0)
# so = io.TextIOWrapper(so, write_through=True, encoding="utf-8")
#
# os.dup2(si.fileno(), 0)
# os.dup2(so.fileno(), 1)
# os.dup2(so.fileno(), 2)
# sys.stdin = si
# sys.stdout = sys.stderr = so
# with open(pidfile, 'w') as f:
# f.write(str(os.getpid()))
, which may contain function names, class names, or code. Output only the next line. | safe_makedirs(file_path) |
Predict the next line for this snippet: <|code_start|> is_running(1)
def test_is_running_true(self):
pid = os.fork()
if pid:
self.assertTrue(is_running(pid))
# Cleaning up
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
else:
time.sleep(1)
os._exit(0)
def test_is_running_false(self):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
self.assertFalse(is_running(pid))
else:
os._exit(0)
def test_terminate_child_true(self):
pid = os.fork()
if pid:
started = time.time()
<|code_end|>
with the help of current file imports:
import os
import shutil
import signal
import tempfile
import time
import unittest
from iktomi.utils.system import (
is_running, safe_makedirs, terminate, doublefork,
)
and context from other files:
# Path: iktomi/utils/system.py
# def is_running(pid):
# '''Returns True if process with PID `pid` is running. Current user must
# have permission to access process information.'''
# try:
# os.kill(pid, 0)
# except OSError as exc:
# if exc.errno == errno.ESRCH:
# return False
# raise
# return True
#
# def safe_makedirs(*files):
# for filename in files:
# dirname = os.path.dirname(filename)
# if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
# def terminate(pid, sig, timeout):
# '''Terminates process with PID `pid` and returns True if process finished
# during `timeout`. Current user must have permission to access process
# information.'''
# os.kill(pid, sig)
# start = time.time()
# while True:
# try:
# # This is requireed if it's our child to avoid zombie. Also
# # is_running() returns True for zombie process.
# _, status = os.waitpid(pid, os.WNOHANG)
# except OSError as exc:
# if exc.errno != errno.ECHILD: # pragma: nocover
# raise
# else:
# if status:
# return True
# if not is_running(pid):
# return True
# if time.time()-start>=timeout:
# return False
# time.sleep(0.1)
#
# def doublefork(pidfile, logfile, cwd, umask): # pragma: nocover
# '''Daemonize current process.
# After first fork we return to the shell and removing our self from
# controling terminal via `setsid`.
# After second fork we are not session leader any more and cant get
# controlling terminal when opening files.'''
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #1 failed: ({}) {}'.format(e.errno, e.strerror))
# os.setsid()
# os.chdir(cwd)
# os.umask(umask)
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #2 failed: ({}) {}'.format(e.errno, e.strerror))
# if logfile is not None:
# si = open('/dev/null')
# if six.PY2:
# so = open(logfile, 'a+', 0)
# else:
# so = io.open(logfile, 'ab+', 0)
# so = io.TextIOWrapper(so, write_through=True, encoding="utf-8")
#
# os.dup2(si.fileno(), 0)
# os.dup2(so.fileno(), 1)
# os.dup2(so.fileno(), 2)
# sys.stdin = si
# sys.stdout = sys.stderr = so
# with open(pidfile, 'w') as f:
# f.write(str(os.getpid()))
, which may contain function names, class names, or code. Output only the next line. | success = terminate(pid, signal.SIGKILL, 0.5) |
Predict the next line after this snippet: <|code_start|> os._exit(0)
def test_terminate_child_true(self):
pid = os.fork()
if pid:
started = time.time()
success = terminate(pid, signal.SIGKILL, 0.5)
finished = time.time()
self.assertTrue(success)
self.assertFalse(is_running(pid))
self.assertLess(finished-started, 1)
else:
time.sleep(3)
os._exit(os.EX_OK)
def test_terminate_alien_true(self):
_, pidfile = tempfile.mkstemp()
child_pid = os.fork()
if child_pid:
time.sleep(0.1)
with open(pidfile) as fp:
pid = int(fp.read())
self.assertTrue(is_running(pid))
started = time.time()
success = terminate(pid, signal.SIGKILL, 0.5)
finished = time.time()
self.assertTrue(success)
self.assertFalse(is_running(pid))
self.assertLess(finished-started, 1)
else:
<|code_end|>
using the current file's imports:
import os
import shutil
import signal
import tempfile
import time
import unittest
from iktomi.utils.system import (
is_running, safe_makedirs, terminate, doublefork,
)
and any relevant context from other files:
# Path: iktomi/utils/system.py
# def is_running(pid):
# '''Returns True if process with PID `pid` is running. Current user must
# have permission to access process information.'''
# try:
# os.kill(pid, 0)
# except OSError as exc:
# if exc.errno == errno.ESRCH:
# return False
# raise
# return True
#
# def safe_makedirs(*files):
# for filename in files:
# dirname = os.path.dirname(filename)
# if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
# def terminate(pid, sig, timeout):
# '''Terminates process with PID `pid` and returns True if process finished
# during `timeout`. Current user must have permission to access process
# information.'''
# os.kill(pid, sig)
# start = time.time()
# while True:
# try:
# # This is requireed if it's our child to avoid zombie. Also
# # is_running() returns True for zombie process.
# _, status = os.waitpid(pid, os.WNOHANG)
# except OSError as exc:
# if exc.errno != errno.ECHILD: # pragma: nocover
# raise
# else:
# if status:
# return True
# if not is_running(pid):
# return True
# if time.time()-start>=timeout:
# return False
# time.sleep(0.1)
#
# def doublefork(pidfile, logfile, cwd, umask): # pragma: nocover
# '''Daemonize current process.
# After first fork we return to the shell and removing our self from
# controling terminal via `setsid`.
# After second fork we are not session leader any more and cant get
# controlling terminal when opening files.'''
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #1 failed: ({}) {}'.format(e.errno, e.strerror))
# os.setsid()
# os.chdir(cwd)
# os.umask(umask)
# try:
# if os.fork():
# os._exit(os.EX_OK)
# except OSError as e:
# sys.exit('fork #2 failed: ({}) {}'.format(e.errno, e.strerror))
# if logfile is not None:
# si = open('/dev/null')
# if six.PY2:
# so = open(logfile, 'a+', 0)
# else:
# so = io.open(logfile, 'ab+', 0)
# so = io.TextIOWrapper(so, write_through=True, encoding="utf-8")
#
# os.dup2(si.fileno(), 0)
# os.dup2(so.fileno(), 1)
# os.dup2(so.fileno(), 2)
# sys.stdin = si
# sys.stdout = sys.stderr = so
# with open(pidfile, 'w') as f:
# f.write(str(os.getpid()))
. Output only the next line. | doublefork(pidfile, '/dev/null', '.', 0) |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
__all__ = ['URL']
if six.PY2:
else:# pragma: no cover; we check coverage only in python2 part
def construct_url(path, query, host, port, scheme, fragment=None):
<|code_end|>
, predict the immediate next line with the help of imports:
import six
from urlparse import urlparse, parse_qs, unquote
from urllib.parse import urlparse, parse_qs, unquote
from webob.multidict import MultiDict
from .url_templates import urlquote
from iktomi.utils.url import uri_to_iri_parts
and context (classes, functions, sometimes code) from other files:
# Path: iktomi/web/url_templates.py
# def urlquote(value):
# if isinstance(value, six.integer_types):
# value = six.text_type(value)
# return quote(value.encode('utf-8'))
. Output only the next line. | query = ('?' + '&'.join('{}={}'.format(urlquote(k), urlquote(v)) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
def check_terms(conv, value):
if not value:
raise convs.ValidationError('Please, accept the terms os service')
return value
class FileForm(Form):
fields = [
Field('accept',
label='I accept the terms of service',
conv=convs.Bool(check_terms),
<|code_end|>
, predict the next line using imports from the current file:
from iktomi.forms import Form, convs, widgets
from iktomi.forms.fields import Field
from iktomi.unstable.forms.files import FileFieldSet, FileFieldSetConv
and context including class names, function names, and sometimes code from other files:
# Path: iktomi/forms/convs.py
# class ValidationError(Exception):
# class Converter(object):
# class validator(object):
# class length(object):
# class CharBased(Converter):
# class Char(CharBased):
# class Int(Converter):
# class Bool(Converter):
# class EnumChoice(Converter):
# class BaseDatetime(CharBased):
# class Datetime(BaseDatetime):
# class Date(BaseDatetime):
# class Time(BaseDatetime):
# class DisplayOnly(Converter):
# class SplitDateTime(Converter):
# class Html(Char):
# class Nothing: pass
# class List(Converter):
# class ListOf(Converter):
# class FieldBlockConv(Converter):
# class SimpleFile(Converter):
# class Email(Char):
# class ModelDictConv(Converter):
# class OptionLabel(six.text_type):
# class ModelChoice(EnumChoice):
# def __init__(self, message=None, by_field=None, format_args=None):
# def translate(self, env, message):
# def fill_errors(self, field):
# def __repr__(self):
# def __init__(self, *args, **kwargs):
# def env(self):
# def _is_empty(self, value):
# def accept(self, value, silent=False):
# def to_python(self, value):
# def from_python(self, value):
# def __call__(self, *args, **kwargs):
# def assert_(self, expression, msg):
# def _existing_value(self):
# def __repr__(self):
# def __init__(self, message):
# def __call__(self, func):
# def wrapper(conv, value):
# def __init__(self, min_length, max_length):
# def __call__(self, conv, value):
# def limit(min_length, max_length):
# def between(min_value, max_value):
# def wrapper(conv, value):
# def num_limit(min_value, max_value):
# def clean_value(self, value):
# def max_length(self):
# def to_python(self, value):
# def from_python(self, value):
# def to_python(self, value):
# def from_python(self, value):
# def to_python(self, value):
# def from_python(self, value):
# def from_python(self, value):
# def to_python(self, value):
# def options(self):
# def __init__(self, *args, **kwargs):
# def from_python(self, value):
# def to_python(self, value):
# def convert_datetime(self, value):
# def convert_datetime(self, value):
# def from_python(self, value):
# def convert_datetime(self, value):
# def from_python(self, value):
# def to_python(self, value):
# def from_python(self, value):
# def to_python(self, value):
# def _load_arg(cls, opt):
# def __init__(self, *args, **kwargs):
# def clean_value(self, value):
# def cleaner(self):
# def from_python(self, value):
# def to_python(self, value):
# def __init__(self, *args, **kwargs):
# def to_python(self, value):
# def from_python(self, value):
# def _existing_value(self):
# def _is_empty(self, file):
# def to_python(self, file):
# def from_python(self, value):
# def from_python(self, value):
# def to_python(self, value):
# def _existing_value(self):
# def __init__(self, *args, **kwargs):
# def query(self):
# def from_python(self, value):
# def to_python(self, value):
# def get_object_label(self, obj):
# def options(self):
# PROPERTIES = ['allowed_elements', 'allowed_attributes', 'allowed_protocols',
# 'allowed_classes', 'dom_callbacks', 'drop_empty_tags',
# 'wrap_inline_tags', 'split_paragraphs_by_br']
# LIST_PROPERTIES = ['allowed_elements', 'allowed_attributes',
# 'allowed_protocols', 'dom_callbacks',
# 'drop_empty_tags', 'tags_to_wrap']
#
# Path: iktomi/forms/widgets.py
# class Widget(object):
# class TextInput(Widget):
# class Textarea(Widget):
# class HiddenInput(Widget):
# class PasswordInput(Widget):
# class Select(Widget):
# class CheckBoxSelect(Select):
# class CheckBox(Widget):
# class CharDisplay(Widget):
# class AggregateWidget(Widget):
# class FieldListWidget(AggregateWidget):
# class FieldSetWidget(AggregateWidget):
# class FieldBlockWidget(FieldSetWidget):
# class FileInput(Widget):
# def __init__(self, field=None, **kwargs):
# def multiple(self):
# def input_name(self):
# def id(self):
# def env(self):
# def prepare_data(self):
# def get_raw_value(self):
# def render(self):
# def __call__(self, **kwargs):
# def get_options(self, value):
# def prepare_data(self):
# def prepare_data(self):
# def get_raw_value(self):
# def render_template_field(self):
. Output only the next line. | widget=widgets.CheckBox()), |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
class HomeView(AdminIndexView):
@expose("/")
def index(self):
return self.render('admin/home.html')
class PostView(ModelView):
column_default_sort = ('date', True)
column_searchable_list = ('name', 'title', 'content')
column_filters = ('type', 'status')
column_list = ('title', 'status', 'modified', 'date', 'type')
can_export = True
form_choices = {
'status': [
<|code_end|>
, generate the next line using the imports in this file:
from flask import current_app
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin import AdminIndexView, expose, Admin
from flask_admin.contrib.sqla import ModelView
from wtforms.validators import Required
from atomicpress import models
from atomicpress.app import db
and context (functions, classes, or occasionally code) from other files:
# Path: atomicpress/models.py
# class Blog(db.Model):
# class Author(db.Model):
# class PostType():
# class PostStatus():
# class Post(db.Model):
# class Tag(db.Model):
# class Category(db.Model):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# POST = "post"
# PAGE = "page"
# ATTACHMENT = "attachment"
# REVISION = "revision"
# NAVIGATION_MENU = "nav_menu_item"
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
#
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
. Output only the next line. | (models.PostStatus.PUBLISH, 'Publish'), |
Next line prediction: <|code_start|> form_widget_args = {
'title': {
'class': 'input-xxlarge'
},
'excerpt': {
'class': 'input-xxlarge'
},
'content': {
'rows': 10,
'class': 'input-xxlarge'
}
}
form_args = {
'guid': {
'validators': [Required()]
},
'title': {
'validators': [Required()]
},
'name': {
'validators': [Required()]
}
}
def create_admin():
app = current_app._get_current_object()
admin = Admin(app, "AtomicPress", index_view=HomeView(name='Home'))
<|code_end|>
. Use current file imports:
(from flask import current_app
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin import AdminIndexView, expose, Admin
from flask_admin.contrib.sqla import ModelView
from wtforms.validators import Required
from atomicpress import models
from atomicpress.app import db)
and context including class names, function names, or small code snippets from other files:
# Path: atomicpress/models.py
# class Blog(db.Model):
# class Author(db.Model):
# class PostType():
# class PostStatus():
# class Post(db.Model):
# class Tag(db.Model):
# class Category(db.Model):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# def __unicode__(self):
# POST = "post"
# PAGE = "page"
# ATTACHMENT = "attachment"
# REVISION = "revision"
# NAVIGATION_MENU = "nav_menu_item"
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
#
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
. Output only the next line. | admin.add_view(ModelView(models.Blog, db.session, category="Blog")) |
Predict the next line after this snippet: <|code_start|>
logger = app.logger
@manager.command
def create_db():
db.create_all()
logger.info("Database was created")
@manager.command
def drop_db(remove=False, force=False):
if not force:
if not prompt_bool("Are you sure?"):
return
db.drop_all()
logger.info("Database was dropped")
if remove:
call(["rm", app.config["DB_PATH"]])
logger.info("Database file was removed")
@manager.command
def runserver(admin=False, toolbar=False, debug=False):
if toolbar:
bar = DebugToolbarExtension(app)
if admin:
<|code_end|>
using the current file's imports:
from subprocess import call
from flask_debugtoolbar import DebugToolbarExtension
from flask_script import prompt_bool
from atomicpress.admin import create_admin
from atomicpress.app import manager, db, app
and any relevant context from other files:
# Path: atomicpress/admin.py
# def create_admin():
# app = current_app._get_current_object()
# admin = Admin(app, "AtomicPress", index_view=HomeView(name='Home'))
#
# admin.add_view(ModelView(models.Blog, db.session, category="Blog"))
# admin.add_view(ModelView(models.Author, db.session, category="Blog"))
#
# admin.add_view(PostView(models.Post, db.session, category="Post"))
# admin.add_view(ModelView(models.Tag, db.session, category="Post"))
# admin.add_view(ModelView(models.Category, db.session, category="Post"))
#
# admin.add_view(FileAdmin(app.config["UPLOADS_PATH"],
# app.config["UPLOADS_URL"],
# name='Upload files'))
#
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
. Output only the next line. | create_admin() |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
logger = app.logger
@manager.command
def create_db():
<|code_end|>
, generate the next line using the imports in this file:
from subprocess import call
from flask_debugtoolbar import DebugToolbarExtension
from flask_script import prompt_bool
from atomicpress.admin import create_admin
from atomicpress.app import manager, db, app
and context (functions, classes, or occasionally code) from other files:
# Path: atomicpress/admin.py
# def create_admin():
# app = current_app._get_current_object()
# admin = Admin(app, "AtomicPress", index_view=HomeView(name='Home'))
#
# admin.add_view(ModelView(models.Blog, db.session, category="Blog"))
# admin.add_view(ModelView(models.Author, db.session, category="Blog"))
#
# admin.add_view(PostView(models.Post, db.session, category="Post"))
# admin.add_view(ModelView(models.Tag, db.session, category="Post"))
# admin.add_view(ModelView(models.Category, db.session, category="Post"))
#
# admin.add_view(FileAdmin(app.config["UPLOADS_PATH"],
# app.config["UPLOADS_URL"],
# name='Upload files'))
#
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
. Output only the next line. | db.create_all() |
Using the snippet: <|code_start|>
@app.route("/uploads/<filename>")
def uploaded_file(filename):
return send_from_directory(app.config["UPLOADS_PATH"], filename)
@app.route("/feed/atom/")
def feed_latest_posts():
feed_url = request.url
url_root = request.url_root.strip("/")
if "SITE_URL" in app.config:
url_root = app.config["SITE_URL"]
feed_url = "%s%s" % (url_root, request.path)
feed = AtomFeed("Recent posts", feed_url=feed_url, url=url_root)
<|code_end|>
, determine the next line of code. You have imports:
import markdown
from atomicpress.app import app
from atomicpress.models import Post, PostStatus, PostType
from flask import send_from_directory
from sqlalchemy import desc
from werkzeug.contrib.atom import AtomFeed
from flask import request
and context (class names, function names, or code) available:
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
#
# Path: atomicpress/models.py
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# parent_id = db.Column(db.Integer, db.ForeignKey('post.id'))
# parent = relationship("Post", remote_side=[id])
#
# author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
# author = relationship("Author")
# status = db.Column(db.String(20), default=PostStatus.DRAFT)
# type = db.Column(db.String(20), default=PostType.POST)
# guid = db.Column(db.String(255))
# name = db.Column(db.String(200))
# title = db.Column(db.Text())
# excerpt = db.Column(db.Text())
# content = db.Column(db.Text())
# modified = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# template = db.Column(db.String(255))
#
# order = db.Column(db.Integer, default=0)
# markdown = db.Column(db.Boolean())
# mime_type = db.Column(db.String(100))
# tags = relationship("Tag",
# secondary=tag_association_table,
# backref="post")
#
# categories = relationship("Category",
# secondary=category_association_table,
# backref="post")
#
# def __unicode__(self):
# return self.title
#
# class PostStatus():
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
#
# class PostType():
# POST = "post"
# PAGE = "page"
# ATTACHMENT = "attachment"
# REVISION = "revision"
# NAVIGATION_MENU = "nav_menu_item"
. Output only the next line. | posts = Post.query.order_by(desc(Post.date)).\ |
Using the snippet: <|code_start|>
@app.route("/uploads/<filename>")
def uploaded_file(filename):
return send_from_directory(app.config["UPLOADS_PATH"], filename)
@app.route("/feed/atom/")
def feed_latest_posts():
feed_url = request.url
url_root = request.url_root.strip("/")
if "SITE_URL" in app.config:
url_root = app.config["SITE_URL"]
feed_url = "%s%s" % (url_root, request.path)
feed = AtomFeed("Recent posts", feed_url=feed_url, url=url_root)
posts = Post.query.order_by(desc(Post.date)).\
<|code_end|>
, determine the next line of code. You have imports:
import markdown
from atomicpress.app import app
from atomicpress.models import Post, PostStatus, PostType
from flask import send_from_directory
from sqlalchemy import desc
from werkzeug.contrib.atom import AtomFeed
from flask import request
and context (class names, function names, or code) available:
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
#
# Path: atomicpress/models.py
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# parent_id = db.Column(db.Integer, db.ForeignKey('post.id'))
# parent = relationship("Post", remote_side=[id])
#
# author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
# author = relationship("Author")
# status = db.Column(db.String(20), default=PostStatus.DRAFT)
# type = db.Column(db.String(20), default=PostType.POST)
# guid = db.Column(db.String(255))
# name = db.Column(db.String(200))
# title = db.Column(db.Text())
# excerpt = db.Column(db.Text())
# content = db.Column(db.Text())
# modified = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# template = db.Column(db.String(255))
#
# order = db.Column(db.Integer, default=0)
# markdown = db.Column(db.Boolean())
# mime_type = db.Column(db.String(100))
# tags = relationship("Tag",
# secondary=tag_association_table,
# backref="post")
#
# categories = relationship("Category",
# secondary=category_association_table,
# backref="post")
#
# def __unicode__(self):
# return self.title
#
# class PostStatus():
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
#
# class PostType():
# POST = "post"
# PAGE = "page"
# ATTACHMENT = "attachment"
# REVISION = "revision"
# NAVIGATION_MENU = "nav_menu_item"
. Output only the next line. | filter(Post.status == PostStatus.PUBLISH).\ |
Continue the code snippet: <|code_start|>
@app.route("/uploads/<filename>")
def uploaded_file(filename):
return send_from_directory(app.config["UPLOADS_PATH"], filename)
@app.route("/feed/atom/")
def feed_latest_posts():
feed_url = request.url
url_root = request.url_root.strip("/")
if "SITE_URL" in app.config:
url_root = app.config["SITE_URL"]
feed_url = "%s%s" % (url_root, request.path)
feed = AtomFeed("Recent posts", feed_url=feed_url, url=url_root)
posts = Post.query.order_by(desc(Post.date)).\
filter(Post.status == PostStatus.PUBLISH).\
<|code_end|>
. Use current file imports:
import markdown
from atomicpress.app import app
from atomicpress.models import Post, PostStatus, PostType
from flask import send_from_directory
from sqlalchemy import desc
from werkzeug.contrib.atom import AtomFeed
from flask import request
and context (classes, functions, or code) from other files:
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
#
# Path: atomicpress/models.py
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# parent_id = db.Column(db.Integer, db.ForeignKey('post.id'))
# parent = relationship("Post", remote_side=[id])
#
# author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
# author = relationship("Author")
# status = db.Column(db.String(20), default=PostStatus.DRAFT)
# type = db.Column(db.String(20), default=PostType.POST)
# guid = db.Column(db.String(255))
# name = db.Column(db.String(200))
# title = db.Column(db.Text())
# excerpt = db.Column(db.Text())
# content = db.Column(db.Text())
# modified = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# template = db.Column(db.String(255))
#
# order = db.Column(db.Integer, default=0)
# markdown = db.Column(db.Boolean())
# mime_type = db.Column(db.String(100))
# tags = relationship("Tag",
# secondary=tag_association_table,
# backref="post")
#
# categories = relationship("Category",
# secondary=category_association_table,
# backref="post")
#
# def __unicode__(self):
# return self.title
#
# class PostStatus():
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
#
# class PostType():
# POST = "post"
# PAGE = "page"
# ATTACHMENT = "attachment"
# REVISION = "revision"
# NAVIGATION_MENU = "nav_menu_item"
. Output only the next line. | filter(Post.type == PostType.POST) |
Here is a snippet: <|code_start|>
def gen_post_status():
"""
Show only published posts outside debug.
"""
if not app.config["DEBUG"]:
<|code_end|>
. Write the next line using the current file imports:
from sqlalchemy import or_, and_
from atomicpress.app import app
from atomicpress.models import Post, PostStatus
and context from other files:
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
#
# Path: atomicpress/models.py
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# parent_id = db.Column(db.Integer, db.ForeignKey('post.id'))
# parent = relationship("Post", remote_side=[id])
#
# author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
# author = relationship("Author")
# status = db.Column(db.String(20), default=PostStatus.DRAFT)
# type = db.Column(db.String(20), default=PostType.POST)
# guid = db.Column(db.String(255))
# name = db.Column(db.String(200))
# title = db.Column(db.Text())
# excerpt = db.Column(db.Text())
# content = db.Column(db.Text())
# modified = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# template = db.Column(db.String(255))
#
# order = db.Column(db.Integer, default=0)
# markdown = db.Column(db.Boolean())
# mime_type = db.Column(db.String(100))
# tags = relationship("Tag",
# secondary=tag_association_table,
# backref="post")
#
# categories = relationship("Category",
# secondary=category_association_table,
# backref="post")
#
# def __unicode__(self):
# return self.title
#
# class PostStatus():
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
, which may include functions, classes, or code. Output only the next line. | post_status = and_(Post.status == PostStatus.PUBLISH) |
Continue the code snippet: <|code_start|>
def gen_post_status():
"""
Show only published posts outside debug.
"""
if not app.config["DEBUG"]:
<|code_end|>
. Use current file imports:
from sqlalchemy import or_, and_
from atomicpress.app import app
from atomicpress.models import Post, PostStatus
and context (classes, functions, or code) from other files:
# Path: atomicpress/app.py
# DEFAULT_EXTENSIONS = (
# "atomicpress.ext.importer",
# "atomicpress.ext.exporter",
# "atomicpress.ext.ftp",
# "atomicpress.ext.s3",
# "atomicpress.ext.prefill",
# )
# DEFAULT_MARKDOWN_EXTENSIONS = [
# "markdown.extensions.tables",
# "markdown.extensions.nl2br",
# "markdown.extensions.fenced_code",
# "markdown.extensions.headerid"
# ]
# def setup(init_run=False):
# def activate_extensions():
# def activate_theme(theme):
# def run():
#
# Path: atomicpress/models.py
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# parent_id = db.Column(db.Integer, db.ForeignKey('post.id'))
# parent = relationship("Post", remote_side=[id])
#
# author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
# author = relationship("Author")
# status = db.Column(db.String(20), default=PostStatus.DRAFT)
# type = db.Column(db.String(20), default=PostType.POST)
# guid = db.Column(db.String(255))
# name = db.Column(db.String(200))
# title = db.Column(db.Text())
# excerpt = db.Column(db.Text())
# content = db.Column(db.Text())
# modified = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# template = db.Column(db.String(255))
#
# order = db.Column(db.Integer, default=0)
# markdown = db.Column(db.Boolean())
# mime_type = db.Column(db.String(100))
# tags = relationship("Tag",
# secondary=tag_association_table,
# backref="post")
#
# categories = relationship("Category",
# secondary=category_association_table,
# backref="post")
#
# def __unicode__(self):
# return self.title
#
# class PostStatus():
# PUBLISH = "publish"
# DRAFT = "draft"
# FUTURE = "future"
# PENDING = "pending"
# PRIVATE = "private"
# TRASH = "trash"
. Output only the next line. | post_status = and_(Post.status == PostStatus.PUBLISH) |
Next line prediction: <|code_start|>
__default = {
"host": "",
"username": None,
"password": None,
}
def set_default(host=None, username=None, password=None,
**kwargs):
__default["host"] = host
__default["username"] = username
__default["password"] = password
def sync_folder(local_path, destination_path, **kwargs):
client = _create_client()
client.sync_dir(local_path, destination_path, **kwargs)
def _create_client():
<|code_end|>
. Use current file imports:
(from .ftpsync import FtpSync)
and context including class names, function names, or small code snippets from other files:
# Path: atomicpress/utils/ftpsync/ftpsync.py
# class FtpSync():
# conn = None
# host = None
# username = None
# password = None
#
# def __init__(self, host=None, username=None, password=None):
# self.host = host
# self.username = username
# self.password = password
#
# def _login(self):
# self.conn = ftplib.FTP(self.host)
# self.conn.login(self.username, self.password)
#
# def _close(self):
# self.conn.close()
#
# def remove_dir(self, destination_path):
# """
# Remove folder. Based on https://gist.github.com/artlogic/2632647.
# """
#
# wd = self.conn.pwd()
#
# try:
# names = self.conn.nlst(destination_path)
# except ftplib.all_errors as e:
# # some FTP servers complain when you try and list non-existent paths
# logger.debug('FtpRmTree: Could not remove {0}: {1}'.format(
# destination_path, e))
# return
#
# for name in names:
# if os.path.split(name)[1] in ('.', '..'):
# continue
#
# try:
# self.conn.cwd(name) # if we can cwd to it, it's a folder
# self.conn.cwd(wd) # don't try a nuke a folder we're in
# self.remove_dir(name)
# except ftplib.all_errors:
# self.conn.delete(name)
#
# try:
# self.conn.rmd(destination_path)
# except ftplib.all_errors as e:
# logger.debug('remove_dir: Could not remove {0}: {1}'.format(
# destination_path, e))
#
# def sync_dir(self, local_path, destination_path, override=True):
# self._login()
#
# local_dir = Path(local_path)
# target_dir = Path(destination_path)
#
# if override:
# if self._file_exist(target_dir):
# self.remove_dir(target_dir)
#
# self._upload_dir(local_dir, target_dir)
# self._close()
#
# def _file_exist(self, destination_path):
# destination_name = destination_path.name
# parent_dir = destination_path.ancestor(1)
#
# return destination_path in self.conn.nlst(parent_dir)
#
# def _upload_dir(self, local_path, destination_path):
# logger.info("- Uploading dir %s" % (local_path.name,))
#
# local_files = os.listdir(local_path)
#
# if not self._file_exist(destination_path):
# self.conn.mkd(destination_path)
#
# self.conn.cwd(destination_path)
#
# # Upload files
# for file_name in local_files:
# file_path = local_path.child(file_name)
#
# if not os.path.isdir(file_path):
# self._upload_file(file_path, destination_path.child(file_name))
#
# # Upload dirs
# for dir_name in local_files:
# dir_path = local_path.child(dir_name)
#
# if os.path.isdir(dir_path):
# self._upload_dir(dir_path, destination_path.child(dir_name))
#
# def _upload_file(self, local_path, destination_path):
# logger.info("- Uploading file %s" % (local_path.name,))
#
# try:
# if local_path.ext in (".txt", ".htm", ".html"):
# self.conn.storlines("STOR " + local_path.name,
# open(local_path))
# else:
# self.conn.storbinary("STOR " + local_path.name,
# open(local_path, "rb"), 1024)
#
# except Exception as e:
# logger.warn("%s - %s" % (e, str(e)))
. Output only the next line. | client = FtpSync(**__default) |
Given snippet: <|code_start|> from_ = 0
if size is not None:
top_buckets = top_buckets[0:size]
else:
size = total
first_page = self.new_first_page()
buckets = [ReportBucket(
search=first_page,
id=bucket['key'],
project=self['project'],
type=self['type'],
threshold=self['threshold'],
total=bucket['doc_count'],
first_seen=bucket['first_seen']['value_as_string'],
last_seen=bucket['last_seen']['value_as_string'],
)
for bucket in top_buckets]
assert self['threshold'] is not None
r = BucketPage(
buckets=buckets,
total=total,
search=self,
from_=from_,
size=size
)
return r
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from copy import copy
from partycrasher.threshold import Threshold
from partycrasher.bucket import Bucket
from partycrasher.api.search import Page, Search
from partycrasher.api.report_bucket import ReportBucket
from partycrasher.pc_encoder import pretty
and context:
# Path: partycrasher/threshold.py
# class Threshold(object):
# """
# A wrapper for a bucket threshold value. Ensures proper serialization
# between ElasticSearch and the JSON API eloggingndpoints.
# """
# __slots__ = ('_value',)
#
# def __init__(self, value):
# if isinstance(value, Threshold):
# assert isinstance(value._value, Decimal)
# # Clone the other Threshold.
# self._value = value._value
# return
# elif isinstance(value, Decimal):
# self._value = value
# return
# elif isinstance(value, string_types):
# value = value.replace('_', '.')
# elif isinstance(value, float):
# pass
# else:
# raise TypeError("Expected type %s but got %s" % (text_type, repr(value)))
#
# self._value = Decimal(value).quantize(Decimal('0.1'))
#
# def __str__(self):
# result = str(self._value)
# assert '_' not in result
# # Ensure that rounded values are always displayed with at least one
# # decimal, for aesthetics.
# if '.' not in result:
# return result + '.0'
# return result
#
# def __repr__(self):
# return "Threshold('" + str(self) + "')"
#
# def to_float(self):
# return float(self)
#
# def __float__(self):
# """
# Convert the threshold to a floating point number, for comparisons.
# Note that this should NOT be converted back to a threshold, as there
# may be a loss of data by doing the round trip.
# """
# return float(self._value)
#
# def __getattr__(self, attr):
# # Delegate everything (i.e, comparisons) to the actual Threshold
# # value.
# return getattr(self._value, attr)
#
# def __hash__(self):
# return self._value.__hash__()+1
#
# def __eq__(self, otter):
# if not isinstance(otter, Threshold):
# return False
# return self._value == otter._value
#
# def to_elasticsearch(self):
# """
# Converts the threshold to a string, suitable for serialization as an
# ElasticSearch field name. Note that full stops ('.') are verbotten in
# ElasticSearch field names.
# """ #elif isinstance(o, Bucket):
# #o.check()
# #return o.as_dict()
# #elif isinstance(o, TopMatch):
# #return o.as_dict()
#
# str_value = str(self)
# assert isinstance(self._value, Decimal)
# assert str_value.count('.') == 1, 'Invalid decimal number'
# return str_value.replace('.', '_')
#
# def __lt__(self, other):
# return float(self._value) < float(other._value)
#
# def __deepcopy__(self, memo):
# return Threshold(copy(self._value))
#
# def jsonify(self):
# return text_type(self._value)
#
# Path: partycrasher/bucket.py
# class Bucket(PCDict):
# """
# namedtuple('Bucket', 'id threshold total top_reports first_seen')
# Data class for buckets. Contains two identifiers:
# - id: The bucket's ID;
# - total: how many reports are currently in the bucket.
# """
# __slots__ = tuple()
#
# canonical_fields = {
# 'id': key_type,
# 'threshold': mustbe_threshold,
# 'first_seen': mustbe_date,
# 'last_seen': mustbe_date,
# 'total': mustbe_int,
# }
#
# #def __init__(self, *args, **kwargs):
# #super(Bucket, self).__init__(*args, **kwargs)
# #assert 'id' in self
# #assert 'threshold' in self
#
# @classmethod
# def new(cls, threshold, **kwargs):
# kwargs['threshold'] = threshold
# assert 'id' not in kwargs
# kwargs['id'] = random_bucketid()
# return Bucket(**kwargs)
#
# def jsonify(self):
# return self.as_dict()
#
# Path: partycrasher/api/report_bucket.py
# class ReportBucket(Bucket):
# __slots__ = ('reports',)
#
# def __init__(self, search, from_=None, size=None, **kwargs):
# super(ReportBucket, self).__init__(kwargs)
# self.reports = ReportBucketSearch(search=search,
# bucket_id=self['id'],
# threshold=self['threshold'],
# from_=from_,
# size=size
# )
#
# def restify(self):
# d = copy(self._d)
# d['reports'] = self.reports
# return d
which might include code, classes, or functions. Output only the next line. | class ReportThreshold(Threshold): |
Using the snippet: <|code_start|> #if from_ is not None:
#assert from_ >= 0
#actual_size = actual_size + from_
#if size is not None:
#assert size >= 0
#(query["aggs"]["top_buckets_filtered"]["aggs"]
#["top_buckets"]["terms"]["size"]) = actual_size
#debug(pretty(query))
response = self.context.search(body=query)
#debug(pretty(response))
# Oh, ElasticSearch! You and your verbose responses!
top_buckets = (response['aggregations']
['top_buckets_filtered']
['top_buckets']
['buckets'])
total = len(top_buckets)
if from_ is not None:
top_buckets = top_buckets[from_:]
else:
from_ = 0
if size is not None:
top_buckets = top_buckets[0:size]
else:
size = total
first_page = self.new_first_page()
<|code_end|>
, determine the next line of code. You have imports:
import logging
from copy import copy
from partycrasher.threshold import Threshold
from partycrasher.bucket import Bucket
from partycrasher.api.search import Page, Search
from partycrasher.api.report_bucket import ReportBucket
from partycrasher.pc_encoder import pretty
and context (class names, function names, or code) available:
# Path: partycrasher/threshold.py
# class Threshold(object):
# """
# A wrapper for a bucket threshold value. Ensures proper serialization
# between ElasticSearch and the JSON API eloggingndpoints.
# """
# __slots__ = ('_value',)
#
# def __init__(self, value):
# if isinstance(value, Threshold):
# assert isinstance(value._value, Decimal)
# # Clone the other Threshold.
# self._value = value._value
# return
# elif isinstance(value, Decimal):
# self._value = value
# return
# elif isinstance(value, string_types):
# value = value.replace('_', '.')
# elif isinstance(value, float):
# pass
# else:
# raise TypeError("Expected type %s but got %s" % (text_type, repr(value)))
#
# self._value = Decimal(value).quantize(Decimal('0.1'))
#
# def __str__(self):
# result = str(self._value)
# assert '_' not in result
# # Ensure that rounded values are always displayed with at least one
# # decimal, for aesthetics.
# if '.' not in result:
# return result + '.0'
# return result
#
# def __repr__(self):
# return "Threshold('" + str(self) + "')"
#
# def to_float(self):
# return float(self)
#
# def __float__(self):
# """
# Convert the threshold to a floating point number, for comparisons.
# Note that this should NOT be converted back to a threshold, as there
# may be a loss of data by doing the round trip.
# """
# return float(self._value)
#
# def __getattr__(self, attr):
# # Delegate everything (i.e, comparisons) to the actual Threshold
# # value.
# return getattr(self._value, attr)
#
# def __hash__(self):
# return self._value.__hash__()+1
#
# def __eq__(self, otter):
# if not isinstance(otter, Threshold):
# return False
# return self._value == otter._value
#
# def to_elasticsearch(self):
# """
# Converts the threshold to a string, suitable for serialization as an
# ElasticSearch field name. Note that full stops ('.') are verbotten in
# ElasticSearch field names.
# """ #elif isinstance(o, Bucket):
# #o.check()
# #return o.as_dict()
# #elif isinstance(o, TopMatch):
# #return o.as_dict()
#
# str_value = str(self)
# assert isinstance(self._value, Decimal)
# assert str_value.count('.') == 1, 'Invalid decimal number'
# return str_value.replace('.', '_')
#
# def __lt__(self, other):
# return float(self._value) < float(other._value)
#
# def __deepcopy__(self, memo):
# return Threshold(copy(self._value))
#
# def jsonify(self):
# return text_type(self._value)
#
# Path: partycrasher/bucket.py
# class Bucket(PCDict):
# """
# namedtuple('Bucket', 'id threshold total top_reports first_seen')
# Data class for buckets. Contains two identifiers:
# - id: The bucket's ID;
# - total: how many reports are currently in the bucket.
# """
# __slots__ = tuple()
#
# canonical_fields = {
# 'id': key_type,
# 'threshold': mustbe_threshold,
# 'first_seen': mustbe_date,
# 'last_seen': mustbe_date,
# 'total': mustbe_int,
# }
#
# #def __init__(self, *args, **kwargs):
# #super(Bucket, self).__init__(*args, **kwargs)
# #assert 'id' in self
# #assert 'threshold' in self
#
# @classmethod
# def new(cls, threshold, **kwargs):
# kwargs['threshold'] = threshold
# assert 'id' not in kwargs
# kwargs['id'] = random_bucketid()
# return Bucket(**kwargs)
#
# def jsonify(self):
# return self.as_dict()
#
# Path: partycrasher/api/report_bucket.py
# class ReportBucket(Bucket):
# __slots__ = ('reports',)
#
# def __init__(self, search, from_=None, size=None, **kwargs):
# super(ReportBucket, self).__init__(kwargs)
# self.reports = ReportBucketSearch(search=search,
# bucket_id=self['id'],
# threshold=self['threshold'],
# from_=from_,
# size=size
# )
#
# def restify(self):
# d = copy(self._d)
# d['reports'] = self.reports
# return d
. Output only the next line. | buckets = [ReportBucket( |
Here is a snippet: <|code_start|>class RestClient:
def __init__(self, root_url="http://localhost:5000/"):
self.origin = root_url.rstrip('/')
@property
def root_url(self):
"""
The root URL of the REST service.
"""
return self.origin + '/'
def path_to(self, *args):
"""
Create a URL path, relative to the current origin.
"""
return '/'.join((self.origin,) + args)
def get_a_bunch_of_crashes(self, date_range_start, limit):
bunch = []
step = 100
for from_ in range(0, limit, step):
query = {
'from': from_,
'since': date_range_start,
'size': step,
}
response = requests.get(self.path_to('*', 'search'), params=query)
response.raise_for_status()
for crash in response.json():
<|code_end|>
. Write the next line using the current file imports:
import requests
from partycrasher.crash import Crash
and context from other files:
# Path: partycrasher/crash.py
# class Crash(PCDict):
#
# __slots__ = tuple()
#
# synonyms = {
# 'crash_id': 'database_id', # Mozilla
# 'os_ver' : 'os_version', # Mozilla
# 'cpu_arch' : 'cpu', # Mozilla
# 'frames' : 'stacktrace', # Mozilla
# }
#
# canonical_fields = {
# 'date': mustbe_date,
# 'stacktrace': PCType(
# checker=Stacktrace,
# converter=Stacktrace,
# ),
# 'database_id': mustbe_string,
# 'project': mustbe_project,
# 'type': mustbe_crash_type,
# 'buckets': mustbe_buckets,
# }
#
# def get_bucket_id(self, threshold):
# key = Threshold(threshold).to_elasticsearch()
# try:
# buckets = self['buckets']
# except KeyError:
# raise Exception('No assigned buckets for: {!r}'.format(self))
# try:
# return buckets[key]
# except KeyError:
# raise Exception('Buckets threshold {} not assigned for: '
# '{!r}'.format(key, self))
#
# @classmethod
# def load_from_file(cls, path):
# from partycrasher import launchpad_crash
# crash_classes = [ launchpad_crash.LaunchpadCrash ]
#
# crash = None
# for crash_class in crash_classes:
# try:
# crash = crash_class.load_from_file(path)
# except NotImplementedError:
# raise
# else:
# break
# if crash is None:
# raise NotImplementedError("I don't know how to load this!")
# return crash
#
# @staticmethod
# def make_id(project, database_id):
# raise NotImplementedError("make_id removed")
#
# @property
# def id(self):
# return self['database_id']
# #elif isinstance(o, Buckets):
# #return o.json_serializable()
#
# def jsonify(self):
# return self.as_dict()
#
# @classmethod
# def fromjson(cls, s):
# d = json.loads(s)
# assert isinstance(d, dict)
# c = cls(d)
# return c
, which may include functions, classes, or code. Output only the next line. | crash = Crash(crash) |
Predict the next line after this snippet: <|code_start|>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class Strategy(object):
def __init__(self, config, index):
self.config = config
self.index = index
def query(self, crash, explain):
return self.searcher.query(crash, explain)
def matching_buckets(self, *args, **kwargs):
raise NotImplementedError()
class MLT(Strategy):
def __init__(self, *args, **kwargs):
super(MLT, self).__init__(*args, **kwargs)
<|code_end|>
using the current file's imports:
from partycrasher.more_like_this import MoreLikeThis
from partycrasher.bucket import Buckets
and any relevant context from other files:
# Path: partycrasher/more_like_this.py
# class MoreLikeThis(MoreLikeThisSearcher):
# """ Class to setup MLT search config. """
# def __init__(self, index, config):
# always_remove_fields = [r'^database_id',
# r'^buckets',
# r'force_bucket',
# r'stacktrace\.depth',
# r'^date',
# r'logdf']
# filterer = CrashFilter(config.remove_fields+always_remove_fields,
# config.keep_fields)
# rescore_filterer = CrashFilter(config.rescore_remove_fields+always_remove_fields,
# config.rescore_keep_fields)
# super(MoreLikeThis,self).__init__(
# index=index,
# max_query_terms=config.max_query_terms,
# terminate_after=config.terminate_after,
# min_score=config.min_score,
# filterer=filterer,
# rescore_filterer=rescore_filterer,
# rescore_window_size=config.rescore_window_size,
# rescore_weight=config.rescore_weight,
# search_weight=config.search_weight
# )
#
# Path: partycrasher/bucket.py
# class Buckets(object):
# __slots__ = ('_od',)
#
# """Proxy for OrderedDict"""
# def __init__(self, _initial_d=None, **kwargs):
# self._od = dict()
# for k, v in kwargs.items():
# self._od[k] = v
# if _initial_d is not None:
# for k, v in _initial_d.items():
# self._od[k] = v
# #self._od = OrderedDict(sorted(self._od))
#
# def __getattr__(self, a):
# return getattr(self._od, a)
#
# def __setitem__(self, k, v):
# if k == 'top_match':
# if not (isinstance(v, TopMatch) or v is None):
# v = TopMatch(v)
# else:
# if not isinstance(k, Threshold):
# k = Threshold(k)
# if not (isinstance(v, Bucket) or v is None):
# v = Bucket(v)
# if v is not None:
# assert v['threshold'] == k
# return self._od.__setitem__(k, v)
#
# def __getitem__(self, k):
# return self._od.__getitem__(k)
#
# def __delitem__(self, k):
# return self._od.__delitem__(k)
#
# def __eq__(self, other):
# if isinstance(other, Buckets):
# return self._od.__eq__(other._od)
# else:
# return self._od.__eq__(other)
#
# def __copy__(self):
# new = Buckets()
# new._od = self._od.__copy__()
# return new
#
# def keys(self):
# return self._od.keys()
#
# def iterkeys(self):
# return self._od.iterkeys()
#
# def __iter__(self):
# return self._od.__iter__()
#
# def __deepcopy__(self, memo):
# for k, v in self._od.items():
# assert not isinstance(k, Decimal)
# assert not isinstance(v, Decimal)
# new = self.__class__()
# new._od = deepcopy(self._od, memo)
# for k, v in new._od.items():
# assert not isinstance(k, Decimal)
# assert not isinstance(v, Decimal)
# return new
#
# def jsonify(self):
# d = OrderedDict()
# for k, v in self._od.items():
# k = text_type(k)
# d[k] = v
# return d
#
# def create(self):
# for k, v in self._od.items():
# if isinstance(k, Threshold):
# if v is None:
# self[k] = Bucket.new(k)
. Output only the next line. | self.searcher = MoreLikeThis(config=self.config, index=self.index) |
Predict the next line after this snippet: <|code_start|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class Strategy(object):
def __init__(self, config, index):
self.config = config
self.index = index
def query(self, crash, explain):
return self.searcher.query(crash, explain)
def matching_buckets(self, *args, **kwargs):
raise NotImplementedError()
class MLT(Strategy):
def __init__(self, *args, **kwargs):
super(MLT, self).__init__(*args, **kwargs)
self.searcher = MoreLikeThis(config=self.config, index=self.index)
self.max_top_match_score = 0
self.total_top_match_scores = 0
self.total_matches = 0
def matching_buckets(self, thresholds, search_result):
<|code_end|>
using the current file's imports:
from partycrasher.more_like_this import MoreLikeThis
from partycrasher.bucket import Buckets
and any relevant context from other files:
# Path: partycrasher/more_like_this.py
# class MoreLikeThis(MoreLikeThisSearcher):
# """ Class to setup MLT search config. """
# def __init__(self, index, config):
# always_remove_fields = [r'^database_id',
# r'^buckets',
# r'force_bucket',
# r'stacktrace\.depth',
# r'^date',
# r'logdf']
# filterer = CrashFilter(config.remove_fields+always_remove_fields,
# config.keep_fields)
# rescore_filterer = CrashFilter(config.rescore_remove_fields+always_remove_fields,
# config.rescore_keep_fields)
# super(MoreLikeThis,self).__init__(
# index=index,
# max_query_terms=config.max_query_terms,
# terminate_after=config.terminate_after,
# min_score=config.min_score,
# filterer=filterer,
# rescore_filterer=rescore_filterer,
# rescore_window_size=config.rescore_window_size,
# rescore_weight=config.rescore_weight,
# search_weight=config.search_weight
# )
#
# Path: partycrasher/bucket.py
# class Buckets(object):
# __slots__ = ('_od',)
#
# """Proxy for OrderedDict"""
# def __init__(self, _initial_d=None, **kwargs):
# self._od = dict()
# for k, v in kwargs.items():
# self._od[k] = v
# if _initial_d is not None:
# for k, v in _initial_d.items():
# self._od[k] = v
# #self._od = OrderedDict(sorted(self._od))
#
# def __getattr__(self, a):
# return getattr(self._od, a)
#
# def __setitem__(self, k, v):
# if k == 'top_match':
# if not (isinstance(v, TopMatch) or v is None):
# v = TopMatch(v)
# else:
# if not isinstance(k, Threshold):
# k = Threshold(k)
# if not (isinstance(v, Bucket) or v is None):
# v = Bucket(v)
# if v is not None:
# assert v['threshold'] == k
# return self._od.__setitem__(k, v)
#
# def __getitem__(self, k):
# return self._od.__getitem__(k)
#
# def __delitem__(self, k):
# return self._od.__delitem__(k)
#
# def __eq__(self, other):
# if isinstance(other, Buckets):
# return self._od.__eq__(other._od)
# else:
# return self._od.__eq__(other)
#
# def __copy__(self):
# new = Buckets()
# new._od = self._od.__copy__()
# return new
#
# def keys(self):
# return self._od.keys()
#
# def iterkeys(self):
# return self._od.iterkeys()
#
# def __iter__(self):
# return self._od.__iter__()
#
# def __deepcopy__(self, memo):
# for k, v in self._od.items():
# assert not isinstance(k, Decimal)
# assert not isinstance(v, Decimal)
# new = self.__class__()
# new._od = deepcopy(self._od, memo)
# for k, v in new._od.items():
# assert not isinstance(k, Decimal)
# assert not isinstance(v, Decimal)
# return new
#
# def jsonify(self):
# d = OrderedDict()
# for k, v in self._od.items():
# k = text_type(k)
# d[k] = v
# return d
#
# def create(self):
# for k, v in self._od.items():
# if isinstance(k, Threshold):
# if v is None:
# self[k] = Bucket.new(k)
. Output only the next line. | matching_buckets = Buckets() |
Predict the next line after this snippet: <|code_start|># This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
if PY3:
elif PY2:
class Thresholds(Mapping):
"""
Represents the thresholds available under a certain search.
"""
def __init__(self, search):
self.search = search
# Lazy-load projects
self._d = self.get_thresholds()
def get_thresholds(self):
thresholds = {
<|code_end|>
using the current file's imports:
from copy import deepcopy
from six import text_type, string_types
from six import PY2, PY3
from collections.abc import Mapping
from collections import Mapping
from partycrasher.threshold import Threshold
from partycrasher.api.report_threshold import ReportThreshold
and any relevant context from other files:
# Path: partycrasher/threshold.py
# class Threshold(object):
# """
# A wrapper for a bucket threshold value. Ensures proper serialization
# between ElasticSearch and the JSON API eloggingndpoints.
# """
# __slots__ = ('_value',)
#
# def __init__(self, value):
# if isinstance(value, Threshold):
# assert isinstance(value._value, Decimal)
# # Clone the other Threshold.
# self._value = value._value
# return
# elif isinstance(value, Decimal):
# self._value = value
# return
# elif isinstance(value, string_types):
# value = value.replace('_', '.')
# elif isinstance(value, float):
# pass
# else:
# raise TypeError("Expected type %s but got %s" % (text_type, repr(value)))
#
# self._value = Decimal(value).quantize(Decimal('0.1'))
#
# def __str__(self):
# result = str(self._value)
# assert '_' not in result
# # Ensure that rounded values are always displayed with at least one
# # decimal, for aesthetics.
# if '.' not in result:
# return result + '.0'
# return result
#
# def __repr__(self):
# return "Threshold('" + str(self) + "')"
#
# def to_float(self):
# return float(self)
#
# def __float__(self):
# """
# Convert the threshold to a floating point number, for comparisons.
# Note that this should NOT be converted back to a threshold, as there
# may be a loss of data by doing the round trip.
# """
# return float(self._value)
#
# def __getattr__(self, attr):
# # Delegate everything (i.e, comparisons) to the actual Threshold
# # value.
# return getattr(self._value, attr)
#
# def __hash__(self):
# return self._value.__hash__()+1
#
# def __eq__(self, otter):
# if not isinstance(otter, Threshold):
# return False
# return self._value == otter._value
#
# def to_elasticsearch(self):
# """
# Converts the threshold to a string, suitable for serialization as an
# ElasticSearch field name. Note that full stops ('.') are verbotten in
# ElasticSearch field names.
# """ #elif isinstance(o, Bucket):
# #o.check()
# #return o.as_dict()
# #elif isinstance(o, TopMatch):
# #return o.as_dict()
#
# str_value = str(self)
# assert isinstance(self._value, Decimal)
# assert str_value.count('.') == 1, 'Invalid decimal number'
# return str_value.replace('.', '_')
#
# def __lt__(self, other):
# return float(self._value) < float(other._value)
#
# def __deepcopy__(self, memo):
# return Threshold(copy(self._value))
#
# def jsonify(self):
# return text_type(self._value)
#
# Path: partycrasher/api/report_threshold.py
# class ReportThreshold(Threshold):
# def __init__(self, search, result, from_=None, size=None):
# super(ReportThreshold, self).__init__(result)
# search = BucketSearch(search=search, threshold=Threshold(self))
# self.buckets = BucketSearch(
# search=search,
# from_=from_,
# size=size
# )
#
# def restify(self):
# return self.buckets
. Output only the next line. | t: ReportThreshold(self.search, t) for t in self.search.thresholds |
Based on the snippet: <|code_start|>
class StringifiedList(list):
def __init__(self, value=[], **kwargs):
if isinstance(value, list):
if len(value) == 0:
return
else:
self.extend(value)
else:
raise AttributeError
def extend(self, arg):
return super(StringifiedList, self).extend(map(stringify_value, arg))
def append(self, *args):
return self.extend(args)
def __setitem__(self, index, value):
return super(StringifiedList, self).__setitem__(index, stringify_value(value))
def __setslice__(self, i, j, seq):
return super(StringifiedList, self).__setitem__(i, j, map(stringify_value, seq))
def __eq__(self, other):
return (super(StringifiedList, self).__eq__(other)
and self.__class__ == other.__class__)
class Stacktrace(StringifiedList):
<|code_end|>
, predict the immediate next line with the help of imports:
from partycrasher.crash import Stackframe
and context (classes, functions, sometimes code) from other files:
# Path: partycrasher/crash.py
# class Stackframe(PCDict):
# """
# Represents a Stackframe in a crash object. Proxy object for a dictionary.
# """
# __slots__ = tuple()
#
# synonyms = {}
#
# canonical_fields = {
# 'depth': mustbe_int,
# 'address': maybe_string,
# 'function': maybe_string,
# 'args': maybe_string,
# 'file': maybe_string,
# 'dylib': maybe_string,
# }
#
# def jsonify(self):
# assert self["function"] != "None"
# return self.as_dict()
. Output only the next line. | stackframe_class = Stackframe |
Predict the next line for this snippet: <|code_start|># Copyright (C) 2015, 2016 Joshua Charles Campbell
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class Project(object):
"""
Metadata about a project.
"""
__slots__ = ('name',)
def __init__(self, project):
if isinstance(project, Project):
self.name = project.name
else:
try:
self.name = key_type(project)
except BadKeyNameError as e:
<|code_end|>
with the help of current file imports:
from six import string_types
from partycrasher.pc_exceptions import BadProjectNameError, BadKeyNameError
from partycrasher.pc_type import (
PCType,
PCMaybeType,
PCMultiType,
key_type
)
import re
and context from other files:
# Path: partycrasher/pc_exceptions.py
# class BadProjectNameError(PartyCrasherError):
# """Invalid project name in crash data. Project names must be alphanumeric (including underscores)."""
# http_code = 400
#
# def __init__(self, project_name, **kwargs):
# message = "Bad project name: %s" % project_name
# super(BadProjectNameError, self).__init__(message)
# self.project_name = project_name
#
# class BadKeyNameError(PartyCrasherError):
# """Invalid key-value pair in crash data. Keys must be alphanumeric (including underscores)."""
# http_code = 400
#
# def __init__(self, key_name, **kwargs):
# message = "Bad key name: %s" % key_name
# super(BadKeyNameError, self).__init__(message)
# self.key_name = key_name
, which may contain function names, class names, or code. Output only the next line. | raise BadProjectNameError(repr(project)) |
Predict the next line for this snippet: <|code_start|>
# Copyright (C) 2015, 2016 Joshua Charles Campbell
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class Project(object):
"""
Metadata about a project.
"""
__slots__ = ('name',)
def __init__(self, project):
if isinstance(project, Project):
self.name = project.name
else:
try:
self.name = key_type(project)
<|code_end|>
with the help of current file imports:
from six import string_types
from partycrasher.pc_exceptions import BadProjectNameError, BadKeyNameError
from partycrasher.pc_type import (
PCType,
PCMaybeType,
PCMultiType,
key_type
)
import re
and context from other files:
# Path: partycrasher/pc_exceptions.py
# class BadProjectNameError(PartyCrasherError):
# """Invalid project name in crash data. Project names must be alphanumeric (including underscores)."""
# http_code = 400
#
# def __init__(self, project_name, **kwargs):
# message = "Bad project name: %s" % project_name
# super(BadProjectNameError, self).__init__(message)
# self.project_name = project_name
#
# class BadKeyNameError(PartyCrasherError):
# """Invalid key-value pair in crash data. Keys must be alphanumeric (including underscores)."""
# http_code = 400
#
# def __init__(self, key_name, **kwargs):
# message = "Bad key name: %s" % key_name
# super(BadKeyNameError, self).__init__(message)
# self.key_name = key_name
, which may contain function names, class names, or code. Output only the next line. | except BadKeyNameError as e: |
Continue the code snippet: <|code_start|> "Stacktrace",
"Stacktrace.gz",
"Stacktrace.txt.1",
]
trace = None
first_crash = None
for fname in try_files:
stack_path = os.path.join(path, fname)
if os.path.isfile(stack_path):
try:
trace = LaunchpadStack.load_from_file(stack_path)
except Exception as e:
if first_crash is None:
first_crash = e
continue
if trace is None:
ERROR(stack_path + " did not contain a stack trace!")
if first_crash is not None:
raise first_crash
else:
break
if trace is None:
raise IOError("No stacktrace file found in %s" % (path))
crash['stacktrace'] = trace
crash['project'] = 'Ubuntu'
#print json.dumps(crash, indent=4, default=repr)
else:
raise NotImplementedError("Not a directory, I don't know how to load this.")
return crash
<|code_end|>
. Use current file imports:
from partycrasher.crash import Crash, Stacktrace, Stackframe
from logging import critical, error, warning, info, debug
from partycrasher.stringified import fix_key_for_es
import os, re, io, gzip, json, sys
import dateparser
import unicodedata
import datetime
import logging
import unittest
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
and context (classes, functions, or code) from other files:
# Path: partycrasher/crash.py
# class Crash(PCDict):
#
# __slots__ = tuple()
#
# synonyms = {
# 'crash_id': 'database_id', # Mozilla
# 'os_ver' : 'os_version', # Mozilla
# 'cpu_arch' : 'cpu', # Mozilla
# 'frames' : 'stacktrace', # Mozilla
# }
#
# canonical_fields = {
# 'date': mustbe_date,
# 'stacktrace': PCType(
# checker=Stacktrace,
# converter=Stacktrace,
# ),
# 'database_id': mustbe_string,
# 'project': mustbe_project,
# 'type': mustbe_crash_type,
# 'buckets': mustbe_buckets,
# }
#
# def get_bucket_id(self, threshold):
# key = Threshold(threshold).to_elasticsearch()
# try:
# buckets = self['buckets']
# except KeyError:
# raise Exception('No assigned buckets for: {!r}'.format(self))
# try:
# return buckets[key]
# except KeyError:
# raise Exception('Buckets threshold {} not assigned for: '
# '{!r}'.format(key, self))
#
# @classmethod
# def load_from_file(cls, path):
# from partycrasher import launchpad_crash
# crash_classes = [ launchpad_crash.LaunchpadCrash ]
#
# crash = None
# for crash_class in crash_classes:
# try:
# crash = crash_class.load_from_file(path)
# except NotImplementedError:
# raise
# else:
# break
# if crash is None:
# raise NotImplementedError("I don't know how to load this!")
# return crash
#
# @staticmethod
# def make_id(project, database_id):
# raise NotImplementedError("make_id removed")
#
# @property
# def id(self):
# return self['database_id']
# #elif isinstance(o, Buckets):
# #return o.json_serializable()
#
# def jsonify(self):
# return self.as_dict()
#
# @classmethod
# def fromjson(cls, s):
# d = json.loads(s)
# assert isinstance(d, dict)
# c = cls(d)
# return c
#
# class Stacktrace(PCList):
# __slots__ = tuple()
# member_type = Stackframe
# member_converter = Stackframe
#
# def jsonify(self):
# return self._l
#
# class Stackframe(PCDict):
# """
# Represents a Stackframe in a crash object. Proxy object for a dictionary.
# """
# __slots__ = tuple()
#
# synonyms = {}
#
# canonical_fields = {
# 'depth': mustbe_int,
# 'address': maybe_string,
# 'function': maybe_string,
# 'args': maybe_string,
# 'file': maybe_string,
# 'dylib': maybe_string,
# }
#
# def jsonify(self):
# assert self["function"] != "None"
# return self.as_dict()
#
# Path: partycrasher/stringified.py
# def fix_key_for_es(key):
# if isinstance(key, bytes):
# key = key.decode(encoding='utf-8', errors='replace')
# key = key.replace('.', '_')
# key = key.replace(':', '_')
# key = key.replace(' ', '_')
# return key
. Output only the next line. | class LaunchpadCrash(Crash): |
Predict the next line for this snippet: <|code_start|> except:
ERROR(line)
raise
if frame.get('function') == '??':
frame['function'] = None
leftover_extras = []
if 'file' in frame:
match = fl.match(frame['file'])
if match is not None:
frame['file'] = match.group(1)
frame['fileline'] = match.group(2)
#print(frame['file'] + " : " + frame['fileline'], file=sys.stderr)
elif extras is not None:
for extra in extras:
extra_matched = False
if not extra_matched:
match = afl.match(extra)
if match is not None:
frame['file'] = match.group(1)
frame['fileline'] = match.group(2)
extra_matched = True
if not extra_matched:
leftover_extras.append(extra)
if len(leftover_extras) > 0:
frame['extra'] = leftover_extras
if matched:
return frame
else:
raise RuntimeError("Couldn't recognize stack frame format: %s" % (line.encode('unicode_escape')))
<|code_end|>
with the help of current file imports:
from partycrasher.crash import Crash, Stacktrace, Stackframe
from logging import critical, error, warning, info, debug
from partycrasher.stringified import fix_key_for_es
import os, re, io, gzip, json, sys
import dateparser
import unicodedata
import datetime
import logging
import unittest
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
and context from other files:
# Path: partycrasher/crash.py
# class Crash(PCDict):
#
# __slots__ = tuple()
#
# synonyms = {
# 'crash_id': 'database_id', # Mozilla
# 'os_ver' : 'os_version', # Mozilla
# 'cpu_arch' : 'cpu', # Mozilla
# 'frames' : 'stacktrace', # Mozilla
# }
#
# canonical_fields = {
# 'date': mustbe_date,
# 'stacktrace': PCType(
# checker=Stacktrace,
# converter=Stacktrace,
# ),
# 'database_id': mustbe_string,
# 'project': mustbe_project,
# 'type': mustbe_crash_type,
# 'buckets': mustbe_buckets,
# }
#
# def get_bucket_id(self, threshold):
# key = Threshold(threshold).to_elasticsearch()
# try:
# buckets = self['buckets']
# except KeyError:
# raise Exception('No assigned buckets for: {!r}'.format(self))
# try:
# return buckets[key]
# except KeyError:
# raise Exception('Buckets threshold {} not assigned for: '
# '{!r}'.format(key, self))
#
# @classmethod
# def load_from_file(cls, path):
# from partycrasher import launchpad_crash
# crash_classes = [ launchpad_crash.LaunchpadCrash ]
#
# crash = None
# for crash_class in crash_classes:
# try:
# crash = crash_class.load_from_file(path)
# except NotImplementedError:
# raise
# else:
# break
# if crash is None:
# raise NotImplementedError("I don't know how to load this!")
# return crash
#
# @staticmethod
# def make_id(project, database_id):
# raise NotImplementedError("make_id removed")
#
# @property
# def id(self):
# return self['database_id']
# #elif isinstance(o, Buckets):
# #return o.json_serializable()
#
# def jsonify(self):
# return self.as_dict()
#
# @classmethod
# def fromjson(cls, s):
# d = json.loads(s)
# assert isinstance(d, dict)
# c = cls(d)
# return c
#
# class Stacktrace(PCList):
# __slots__ = tuple()
# member_type = Stackframe
# member_converter = Stackframe
#
# def jsonify(self):
# return self._l
#
# class Stackframe(PCDict):
# """
# Represents a Stackframe in a crash object. Proxy object for a dictionary.
# """
# __slots__ = tuple()
#
# synonyms = {}
#
# canonical_fields = {
# 'depth': mustbe_int,
# 'address': maybe_string,
# 'function': maybe_string,
# 'args': maybe_string,
# 'file': maybe_string,
# 'dylib': maybe_string,
# }
#
# def jsonify(self):
# assert self["function"] != "None"
# return self.as_dict()
#
# Path: partycrasher/stringified.py
# def fix_key_for_es(key):
# if isinstance(key, bytes):
# key = key.decode(encoding='utf-8', errors='replace')
# key = key.replace('.', '_')
# key = key.replace(':', '_')
# key = key.replace(' ', '_')
# return key
, which may contain function names, class names, or code. Output only the next line. | class LaunchpadStack(Stacktrace): |
Using the snippet: <|code_start|> ch = u'?'
#raise ValueError("Bad encoding %s in: %s" % (ch.encode('unicode_escape'), line.encode('utf-8')))
elif ch == u'\ufffd':
ch = u'?'
line += ch
return line
#number address in function (args) at file from lib
naifafl = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s+\(([^\)]*)\)\s+at\s+(\S+)\sfrom\s+(\S+)\s*$')
#number address in function (args) from lib
naifal = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s+\(([^\)]*)\)\s+from\s+(.+?)\s*$')
#number address function (args) from lib (missing in)
nafal = re.compile(r'^#([\dx]+)\s+(\S+)\s+(.+?)\s+\(([^\)]*)\)\s+from\s+(.+?)\s*$')
#number address in function (args) at file
naifaf = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s+\((.*?)\)\s+at\s+(.+?)\s*$')
#number function (args) at file
nfaf = re.compile(r'^#([\dx]+)\s+(.+?)\s+\((.*?)\)\s+at\s+(\S+)\s*$')
#number address in function (args
naifa = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s*\((.*?)\)?\s*$')
#number address in function
naif = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s*$')
#number function (args
nfa = re.compile(r'^#([\dx]+)\s+(.+?)\s+\((.*?)\)?\s*$')
#number <function>
nf = re.compile(r'^#(\d+)\s+(<.*?>)\s*$')
#file: line
fl = re.compile(r'^([^:]+):(\d+)\s*$')
# at file: line
afl = re.compile(r'^\s*at\s+([^\s:]+):(\d+)\s*$')
<|code_end|>
, determine the next line of code. You have imports:
from partycrasher.crash import Crash, Stacktrace, Stackframe
from logging import critical, error, warning, info, debug
from partycrasher.stringified import fix_key_for_es
import os, re, io, gzip, json, sys
import dateparser
import unicodedata
import datetime
import logging
import unittest
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
and context (class names, function names, or code) available:
# Path: partycrasher/crash.py
# class Crash(PCDict):
#
# __slots__ = tuple()
#
# synonyms = {
# 'crash_id': 'database_id', # Mozilla
# 'os_ver' : 'os_version', # Mozilla
# 'cpu_arch' : 'cpu', # Mozilla
# 'frames' : 'stacktrace', # Mozilla
# }
#
# canonical_fields = {
# 'date': mustbe_date,
# 'stacktrace': PCType(
# checker=Stacktrace,
# converter=Stacktrace,
# ),
# 'database_id': mustbe_string,
# 'project': mustbe_project,
# 'type': mustbe_crash_type,
# 'buckets': mustbe_buckets,
# }
#
# def get_bucket_id(self, threshold):
# key = Threshold(threshold).to_elasticsearch()
# try:
# buckets = self['buckets']
# except KeyError:
# raise Exception('No assigned buckets for: {!r}'.format(self))
# try:
# return buckets[key]
# except KeyError:
# raise Exception('Buckets threshold {} not assigned for: '
# '{!r}'.format(key, self))
#
# @classmethod
# def load_from_file(cls, path):
# from partycrasher import launchpad_crash
# crash_classes = [ launchpad_crash.LaunchpadCrash ]
#
# crash = None
# for crash_class in crash_classes:
# try:
# crash = crash_class.load_from_file(path)
# except NotImplementedError:
# raise
# else:
# break
# if crash is None:
# raise NotImplementedError("I don't know how to load this!")
# return crash
#
# @staticmethod
# def make_id(project, database_id):
# raise NotImplementedError("make_id removed")
#
# @property
# def id(self):
# return self['database_id']
# #elif isinstance(o, Buckets):
# #return o.json_serializable()
#
# def jsonify(self):
# return self.as_dict()
#
# @classmethod
# def fromjson(cls, s):
# d = json.loads(s)
# assert isinstance(d, dict)
# c = cls(d)
# return c
#
# class Stacktrace(PCList):
# __slots__ = tuple()
# member_type = Stackframe
# member_converter = Stackframe
#
# def jsonify(self):
# return self._l
#
# class Stackframe(PCDict):
# """
# Represents a Stackframe in a crash object. Proxy object for a dictionary.
# """
# __slots__ = tuple()
#
# synonyms = {}
#
# canonical_fields = {
# 'depth': mustbe_int,
# 'address': maybe_string,
# 'function': maybe_string,
# 'args': maybe_string,
# 'file': maybe_string,
# 'dylib': maybe_string,
# }
#
# def jsonify(self):
# assert self["function"] != "None"
# return self.as_dict()
#
# Path: partycrasher/stringified.py
# def fix_key_for_es(key):
# if isinstance(key, bytes):
# key = key.decode(encoding='utf-8', errors='replace')
# key = key.replace('.', '_')
# key = key.replace(':', '_')
# key = key.replace(' ', '_')
# return key
. Output only the next line. | class LaunchpadFrame(Stackframe): |
Using the snippet: <|code_start|> "CompizPlugins",
"version.xserver-xorg-video-nouveau",
"version.xserver-xorg-video-ati",
"DistUpgraded",
"version.libdrm2",
"version.xserver-xorg-video-intel",
"LiveMediaBuild",
"Candidate",
"GraphicsCard",
"version.compiz",
"dmi.board.asset.tag",
"Subsystem",
"CompositorRunning",
"ProcCmdLine",
"CheckboxSystem",
"CheckboxSubmission",
"version.libgl1-mesa-dri-experimental",
"version.libgl1-mesa-dri",
"version.xserver-xorg-input-evdev",
"version.xserver-xorg",
"version.xserver-xorg-core",
"Lsusb",
"version.ia32-libs",
"CurrentDesktop",
"DkmsStatus",
"RelatedPackageVersions",
"Binary package hint",
"extra"
]
<|code_end|>
, determine the next line of code. You have imports:
from partycrasher.crash import Crash, Stacktrace, Stackframe
from logging import critical, error, warning, info, debug
from partycrasher.stringified import fix_key_for_es
import os, re, io, gzip, json, sys
import dateparser
import unicodedata
import datetime
import logging
import unittest
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
import tempfile
import os
import shutil
import datetime
and context (class names, function names, or code) available:
# Path: partycrasher/crash.py
# class Crash(PCDict):
#
# __slots__ = tuple()
#
# synonyms = {
# 'crash_id': 'database_id', # Mozilla
# 'os_ver' : 'os_version', # Mozilla
# 'cpu_arch' : 'cpu', # Mozilla
# 'frames' : 'stacktrace', # Mozilla
# }
#
# canonical_fields = {
# 'date': mustbe_date,
# 'stacktrace': PCType(
# checker=Stacktrace,
# converter=Stacktrace,
# ),
# 'database_id': mustbe_string,
# 'project': mustbe_project,
# 'type': mustbe_crash_type,
# 'buckets': mustbe_buckets,
# }
#
# def get_bucket_id(self, threshold):
# key = Threshold(threshold).to_elasticsearch()
# try:
# buckets = self['buckets']
# except KeyError:
# raise Exception('No assigned buckets for: {!r}'.format(self))
# try:
# return buckets[key]
# except KeyError:
# raise Exception('Buckets threshold {} not assigned for: '
# '{!r}'.format(key, self))
#
# @classmethod
# def load_from_file(cls, path):
# from partycrasher import launchpad_crash
# crash_classes = [ launchpad_crash.LaunchpadCrash ]
#
# crash = None
# for crash_class in crash_classes:
# try:
# crash = crash_class.load_from_file(path)
# except NotImplementedError:
# raise
# else:
# break
# if crash is None:
# raise NotImplementedError("I don't know how to load this!")
# return crash
#
# @staticmethod
# def make_id(project, database_id):
# raise NotImplementedError("make_id removed")
#
# @property
# def id(self):
# return self['database_id']
# #elif isinstance(o, Buckets):
# #return o.json_serializable()
#
# def jsonify(self):
# return self.as_dict()
#
# @classmethod
# def fromjson(cls, s):
# d = json.loads(s)
# assert isinstance(d, dict)
# c = cls(d)
# return c
#
# class Stacktrace(PCList):
# __slots__ = tuple()
# member_type = Stackframe
# member_converter = Stackframe
#
# def jsonify(self):
# return self._l
#
# class Stackframe(PCDict):
# """
# Represents a Stackframe in a crash object. Proxy object for a dictionary.
# """
# __slots__ = tuple()
#
# synonyms = {}
#
# canonical_fields = {
# 'depth': mustbe_int,
# 'address': maybe_string,
# 'function': maybe_string,
# 'args': maybe_string,
# 'file': maybe_string,
# 'dylib': maybe_string,
# }
#
# def jsonify(self):
# assert self["function"] != "None"
# return self.as_dict()
#
# Path: partycrasher/stringified.py
# def fix_key_for_es(key):
# if isinstance(key, bytes):
# key = key.decode(encoding='utf-8', errors='replace')
# key = key.replace('.', '_')
# key = key.replace(':', '_')
# key = key.replace(' ', '_')
# return key
. Output only the next line. | save_fields = [fix_key_for_es(k) for k in save_fields] |
Here is a snippet: <|code_start|> }
}
return properties
def termvectors(self, **kwargs):
assert 'index' not in kwargs
return self.esstore.es.termvectors(
index=self.index_base,
**kwargs
)
# SMURT Proxy to the ES API
def search(self, body, **kwargs):
assert 'index' not in kwargs
if isinstance(body, string_types):
pass
else:
body=elastify(body)
tries = 0
while True:
tries += 1
try:
return self.esstore.es.search(
index=self.index_base,
body=body,
**kwargs)
except ElasticsearchException as e:
if (tries <= 1):
self.esstore.yellow()
else:
<|code_end|>
. Write the next line using the current file imports:
from six import string_types
from elasticsearch import TransportError
from elasticsearch import ElasticsearchException
from partycrasher.threshold import Threshold
from partycrasher.more_like_this import MoreLikeThis
from partycrasher.es.elastify import elastify
from partycrasher.pc_exceptions import ESError
import logging
and context from other files:
# Path: partycrasher/threshold.py
# class Threshold(object):
# """
# A wrapper for a bucket threshold value. Ensures proper serialization
# between ElasticSearch and the JSON API eloggingndpoints.
# """
# __slots__ = ('_value',)
#
# def __init__(self, value):
# if isinstance(value, Threshold):
# assert isinstance(value._value, Decimal)
# # Clone the other Threshold.
# self._value = value._value
# return
# elif isinstance(value, Decimal):
# self._value = value
# return
# elif isinstance(value, string_types):
# value = value.replace('_', '.')
# elif isinstance(value, float):
# pass
# else:
# raise TypeError("Expected type %s but got %s" % (text_type, repr(value)))
#
# self._value = Decimal(value).quantize(Decimal('0.1'))
#
# def __str__(self):
# result = str(self._value)
# assert '_' not in result
# # Ensure that rounded values are always displayed with at least one
# # decimal, for aesthetics.
# if '.' not in result:
# return result + '.0'
# return result
#
# def __repr__(self):
# return "Threshold('" + str(self) + "')"
#
# def to_float(self):
# return float(self)
#
# def __float__(self):
# """
# Convert the threshold to a floating point number, for comparisons.
# Note that this should NOT be converted back to a threshold, as there
# may be a loss of data by doing the round trip.
# """
# return float(self._value)
#
# def __getattr__(self, attr):
# # Delegate everything (i.e, comparisons) to the actual Threshold
# # value.
# return getattr(self._value, attr)
#
# def __hash__(self):
# return self._value.__hash__()+1
#
# def __eq__(self, otter):
# if not isinstance(otter, Threshold):
# return False
# return self._value == otter._value
#
# def to_elasticsearch(self):
# """
# Converts the threshold to a string, suitable for serialization as an
# ElasticSearch field name. Note that full stops ('.') are verbotten in
# ElasticSearch field names.
# """ #elif isinstance(o, Bucket):
# #o.check()
# #return o.as_dict()
# #elif isinstance(o, TopMatch):
# #return o.as_dict()
#
# str_value = str(self)
# assert isinstance(self._value, Decimal)
# assert str_value.count('.') == 1, 'Invalid decimal number'
# return str_value.replace('.', '_')
#
# def __lt__(self, other):
# return float(self._value) < float(other._value)
#
# def __deepcopy__(self, memo):
# return Threshold(copy(self._value))
#
# def jsonify(self):
# return text_type(self._value)
#
# Path: partycrasher/more_like_this.py
# class MoreLikeThis(MoreLikeThisSearcher):
# """ Class to setup MLT search config. """
# def __init__(self, index, config):
# always_remove_fields = [r'^database_id',
# r'^buckets',
# r'force_bucket',
# r'stacktrace\.depth',
# r'^date',
# r'logdf']
# filterer = CrashFilter(config.remove_fields+always_remove_fields,
# config.keep_fields)
# rescore_filterer = CrashFilter(config.rescore_remove_fields+always_remove_fields,
# config.rescore_keep_fields)
# super(MoreLikeThis,self).__init__(
# index=index,
# max_query_terms=config.max_query_terms,
# terminate_after=config.terminate_after,
# min_score=config.min_score,
# filterer=filterer,
# rescore_filterer=rescore_filterer,
# rescore_window_size=config.rescore_window_size,
# rescore_weight=config.rescore_weight,
# search_weight=config.search_weight
# )
#
# Path: partycrasher/pc_exceptions.py
# class ESError(PartyCrasherError):
# """
# ElasticSearch returned an unexpected/unhandled error.
# """
# def __init__(self, ex, **kwargs):
# (t, v, tb) = sys.exc_info()
# message = ('ElasticSearch Exception: '
# '%s.' % str(ex))
# super(PartyCrasherError, self).__init__(message, **kwargs)
# self.original_traceback = tb
# self.original_type = repr(t)
# self.original_value = repr(v)
# if isinstance(ex, TransportError):
# self.es_status_code = ex.status_code
# self.es_error = ex.error
# self.es_info = ex.info
# self.es_description = str(ex)
, which may include functions, classes, or code. Output only the next line. | raise ESError(e) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright (C) 2017 Joshua Charles Campbell
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class ReportType(CrashType):
"""
API object representing a particular type inside a search context.
"""
def __init__(self, search, report_type):
super(ReportType, self).__init__(report_type)
self.original_search = search
search=Search(search=search, type=report_type)
self.reports = search
<|code_end|>
, generate the next line using the imports in this file:
from six import text_type, string_types
from copy import copy
from partycrasher.crash_type import CrashType
from partycrasher.api.thresholds import Thresholds
from partycrasher.api.search import Search
from partycrasher.api.projects import Projects
from partycrasher.api.cache import cached_threshold
and context (functions, classes, or occasionally code) from other files:
# Path: partycrasher/api/thresholds.py
# class Thresholds(Mapping):
# """
# Represents the thresholds available under a certain search.
# """
# def __init__(self, search):
# self.search = search
# # Lazy-load projects
# self._d = self.get_thresholds()
#
# def get_thresholds(self):
# thresholds = {
# t: ReportThreshold(self.search, t) for t in self.search.thresholds
# }
# return thresholds
#
# def __getitem__(self, key):
# return self._d.__getitem__(key)
#
# def __iter__(self):
# return self._d.__iter__()
#
# def __len__(self):
# return self._d.__len__()
#
# def restify(self):
# d = {}
# for k, v in self._d.items():
# d[str(k)] = v
# return d
#
# Path: partycrasher/api/cache.py
# def cached_threshold(search):
# global cached_thresholds
# if search in cached_thresholds:
# #DEBUG("HIT")
# return cached_thresholds[search]
# else:
# #DEBUG("MISS " + repr(search._d))
# if search['threshold'] is None:
# buckets = Thresholds(search)
# else:
# buckets = ReportThreshold(search, search['threshold'])
# cached_thresholds[search] = buckets
# return buckets
. Output only the next line. | self.buckets = cached_threshold(search) |
Next line prediction: <|code_start|># This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
logger = logging.getLogger(__name__)
ERROR = logger.error
WARN = logger.warn
INFO = logger.info
DEBUG = logger.debug
#cached_thresholds = weakref.WeakValueDictionary()
cached_thresholds = {}
def cached_threshold(search):
global cached_thresholds
if search in cached_thresholds:
#DEBUG("HIT")
return cached_thresholds[search]
else:
#DEBUG("MISS " + repr(search._d))
if search['threshold'] is None:
buckets = Thresholds(search)
else:
<|code_end|>
. Use current file imports:
(import logging
import weakref
from partycrasher.api.report_threshold import ReportThreshold
from partycrasher.api.thresholds import Thresholds)
and context including class names, function names, or small code snippets from other files:
# Path: partycrasher/api/report_threshold.py
# class ReportThreshold(Threshold):
# def __init__(self, search, result, from_=None, size=None):
# super(ReportThreshold, self).__init__(result)
# search = BucketSearch(search=search, threshold=Threshold(self))
# self.buckets = BucketSearch(
# search=search,
# from_=from_,
# size=size
# )
#
# def restify(self):
# return self.buckets
#
# Path: partycrasher/api/thresholds.py
# class Thresholds(Mapping):
# """
# Represents the thresholds available under a certain search.
# """
# def __init__(self, search):
# self.search = search
# # Lazy-load projects
# self._d = self.get_thresholds()
#
# def get_thresholds(self):
# thresholds = {
# t: ReportThreshold(self.search, t) for t in self.search.thresholds
# }
# return thresholds
#
# def __getitem__(self, key):
# return self._d.__getitem__(key)
#
# def __iter__(self):
# return self._d.__iter__()
#
# def __len__(self):
# return self._d.__len__()
#
# def restify(self):
# d = {}
# for k, v in self._d.items():
# d[str(k)] = v
# return d
. Output only the next line. | buckets = ReportThreshold(search, search['threshold']) |
Next line prediction: <|code_start|># of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
logger = logging.getLogger(__name__)
ERROR = logger.error
WARN = logger.warn
INFO = logger.info
DEBUG = logger.debug
#cached_thresholds = weakref.WeakValueDictionary()
cached_thresholds = {}
def cached_threshold(search):
global cached_thresholds
if search in cached_thresholds:
#DEBUG("HIT")
return cached_thresholds[search]
else:
#DEBUG("MISS " + repr(search._d))
if search['threshold'] is None:
<|code_end|>
. Use current file imports:
(import logging
import weakref
from partycrasher.api.report_threshold import ReportThreshold
from partycrasher.api.thresholds import Thresholds)
and context including class names, function names, or small code snippets from other files:
# Path: partycrasher/api/report_threshold.py
# class ReportThreshold(Threshold):
# def __init__(self, search, result, from_=None, size=None):
# super(ReportThreshold, self).__init__(result)
# search = BucketSearch(search=search, threshold=Threshold(self))
# self.buckets = BucketSearch(
# search=search,
# from_=from_,
# size=size
# )
#
# def restify(self):
# return self.buckets
#
# Path: partycrasher/api/thresholds.py
# class Thresholds(Mapping):
# """
# Represents the thresholds available under a certain search.
# """
# def __init__(self, search):
# self.search = search
# # Lazy-load projects
# self._d = self.get_thresholds()
#
# def get_thresholds(self):
# thresholds = {
# t: ReportThreshold(self.search, t) for t in self.search.thresholds
# }
# return thresholds
#
# def __getitem__(self, key):
# return self._d.__getitem__(key)
#
# def __iter__(self):
# return self._d.__iter__()
#
# def __len__(self):
# return self._d.__len__()
#
# def restify(self):
# d = {}
# for k, v in self._d.items():
# d[str(k)] = v
# return d
. Output only the next line. | buckets = Thresholds(search) |
Here is a snippet: <|code_start|>
# Copyright (C) 2016 Eddie Antonio Santos <easantos@ualberta.ca>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utilties used in rest_service; these are kept here to unclutter the API file.
"""
logger = logging.getLogger(__name__)
ERROR = logger.error
WARN = logger.warn
INFO = logger.info
DEBUG = logger.debug
<|code_end|>
. Write the next line using the current file imports:
from six import string_types, text_type
from flask import json, jsonify, request, redirect, make_response, url_for
from partycrasher.pc_encoder import pretty
from partycrasher.pc_exceptions import PartyCrasherError
from partycrasher.pc_type import maybe_date, maybe_int
from partycrasher.project import multi_project
from partycrasher.crash_type import multi_crash_type
import weakref
import re
import distutils
import sys
import traceback
import logging
and context from other files:
# Path: partycrasher/pc_exceptions.py
# class PartyCrasherError(Exception):
# """An error occured within PartyCrasher."""
# http_code = 500
#
# def __init__(self, message=None, **kwargs):
# self.__dict__.update(kwargs)
# if message is None:
# super(PartyCrasherError, self).__init__()
# else:
# super(PartyCrasherError, self).__init__(message)
#
# def get_extra(self):
# extra = dict(self.__dict__)
# #del extra['args']
# extra['description'] = self.__class__.__doc__
# return extra
#
# Path: partycrasher/project.py
# class Project(object):
# def __init__(self, project):
# def __str__(self):
# def __copy__(self):
# def __hash__(self):
# def __eq__(self, other):
# def __repr__(self):
# def jsonify(self):
, which may include functions, classes, or code. Output only the next line. | class BadRequest(PartyCrasherError): |
Given the following code snippet before the placeholder: <|code_start|> value_a=d[k],
value_b=v)
else:
d[k] = v
return d
def merge(d, dest, src):
if dest in d:
if src in d:
raise KeyConflictError(key=dest,
value_a=d[dest],
value_b=d[src])
else:
pass
else:
if src in d:
d[dest]=d[src]
del d[src]
else:
pass
def make_search(args, **kwargs):
s = kwargs
merge(args, 'from', 'from_')
maybe_set(s, 'from', maybe_int(args.get('from', None)))
maybe_set(s, 'size', maybe_int(args.get('size', None)))
maybe_set(s, 'query_string', args.get('q', None))
maybe_set(s, 'since', maybe_date(args.get('since', None)))
maybe_set(s, 'until', maybe_date(args.get('until', None)))
merge(args, 'project', 'projects')
<|code_end|>
, predict the next line using imports from the current file:
from six import string_types, text_type
from flask import json, jsonify, request, redirect, make_response, url_for
from partycrasher.pc_encoder import pretty
from partycrasher.pc_exceptions import PartyCrasherError
from partycrasher.pc_type import maybe_date, maybe_int
from partycrasher.project import multi_project
from partycrasher.crash_type import multi_crash_type
import weakref
import re
import distutils
import sys
import traceback
import logging
and context including class names, function names, and sometimes code from other files:
# Path: partycrasher/pc_exceptions.py
# class PartyCrasherError(Exception):
# """An error occured within PartyCrasher."""
# http_code = 500
#
# def __init__(self, message=None, **kwargs):
# self.__dict__.update(kwargs)
# if message is None:
# super(PartyCrasherError, self).__init__()
# else:
# super(PartyCrasherError, self).__init__(message)
#
# def get_extra(self):
# extra = dict(self.__dict__)
# #del extra['args']
# extra['description'] = self.__class__.__doc__
# return extra
#
# Path: partycrasher/project.py
# class Project(object):
# def __init__(self, project):
# def __str__(self):
# def __copy__(self):
# def __hash__(self):
# def __eq__(self, other):
# def __repr__(self):
# def jsonify(self):
. Output only the next line. | maybe_set(s, 'project', multi_project(args.get('project', None))) |
Continue the code snippet: <|code_start|> client)
)
p = Pool(8)
results = reduce(operator.add, p.map(get_similarity, tocompute))
for crash, other, similarity in results:
if crash not in similaritys:
similaritys[crash] = {}
similaritys[crash][other] = similarity
#if crash == other:
#print("%s self: %f" % (crash, similarity))
# ES computes asymmetric similaritys so lets average them
for ci in similaritys:
for cj in similaritys[ci]:
ij = similaritys[ci][cj]
ji = similaritys[cj][ci]
if (ij != ji):
avg = (ij + ji)/2
similaritys[ci][cj] = avg
similaritys[cj][ci] = avg
return similaritys
def compute_metrics(date_range_start, rest_service_url):
client = RestClient(rest_service_url)
crashes = client.get_a_bunch_of_crashes(date_range_start, 500)
similaritys = get_similaritys(crashes, client)
#print(pretty(similaritys))
#print(pretty(crashes))
for i in sorted(crashes[0]['buckets']):
try:
<|code_end|>
. Use current file imports:
import json
import operator
from multiprocessing import Pool
from partycrasher.crash import Crash, pretty
from partycrasher.rest_client import RestClient
from partycrasher.threshold import Threshold
and context (classes, functions, or code) from other files:
# Path: partycrasher/crash.py
# class Stackframe(PCDict):
# class Stacktrace(PCList):
# class Crash(PCDict):
# class TestCrash(unittest.TestCase):
# def jsonify(self):
# def jsonify(self):
# def get_bucket_id(self, threshold):
# def load_from_file(cls, path):
# def make_id(project, database_id):
# def id(self):
# def jsonify(self):
# def fromjson(cls, s):
# def test_serdes(self):
# def test_desser(self):
#
# Path: partycrasher/threshold.py
# class Threshold(object):
# """
# A wrapper for a bucket threshold value. Ensures proper serialization
# between ElasticSearch and the JSON API eloggingndpoints.
# """
# __slots__ = ('_value',)
#
# def __init__(self, value):
# if isinstance(value, Threshold):
# assert isinstance(value._value, Decimal)
# # Clone the other Threshold.
# self._value = value._value
# return
# elif isinstance(value, Decimal):
# self._value = value
# return
# elif isinstance(value, string_types):
# value = value.replace('_', '.')
# elif isinstance(value, float):
# pass
# else:
# raise TypeError("Expected type %s but got %s" % (text_type, repr(value)))
#
# self._value = Decimal(value).quantize(Decimal('0.1'))
#
# def __str__(self):
# result = str(self._value)
# assert '_' not in result
# # Ensure that rounded values are always displayed with at least one
# # decimal, for aesthetics.
# if '.' not in result:
# return result + '.0'
# return result
#
# def __repr__(self):
# return "Threshold('" + str(self) + "')"
#
# def to_float(self):
# return float(self)
#
# def __float__(self):
# """
# Convert the threshold to a floating point number, for comparisons.
# Note that this should NOT be converted back to a threshold, as there
# may be a loss of data by doing the round trip.
# """
# return float(self._value)
#
# def __getattr__(self, attr):
# # Delegate everything (i.e, comparisons) to the actual Threshold
# # value.
# return getattr(self._value, attr)
#
# def __hash__(self):
# return self._value.__hash__()+1
#
# def __eq__(self, otter):
# if not isinstance(otter, Threshold):
# return False
# return self._value == otter._value
#
# def to_elasticsearch(self):
# """
# Converts the threshold to a string, suitable for serialization as an
# ElasticSearch field name. Note that full stops ('.') are verbotten in
# ElasticSearch field names.
# """ #elif isinstance(o, Bucket):
# #o.check()
# #return o.as_dict()
# #elif isinstance(o, TopMatch):
# #return o.as_dict()
#
# str_value = str(self)
# assert isinstance(self._value, Decimal)
# assert str_value.count('.') == 1, 'Invalid decimal number'
# return str_value.replace('.', '_')
#
# def __lt__(self, other):
# return float(self._value) < float(other._value)
#
# def __deepcopy__(self, memo):
# return Threshold(copy(self._value))
#
# def jsonify(self):
# return text_type(self._value)
. Output only the next line. | i = Threshold(i) |
Given snippet: <|code_start|>#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
Register changed Ops: TFR, EXG
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
log = logging.getLogger("MC6809")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import sys
import unittest
from MC6809.tests.test_base import BaseCPUTestCase
and context:
# Path: MC6809/tests/test_base.py
# class BaseCPUTestCase(BaseTestCase):
# UNITTEST_CFG_DICT = {
# "verbosity": None,
# "display_cycle": False,
# "trace": None,
# "bus_socket_host": None,
# "bus_socket_port": None,
# "ram": None,
# "rom": None,
# "max_ops": None,
# "use_bus": False,
# }
#
# def setUp(self):
# cfg = TestCfg(self.UNITTEST_CFG_DICT)
# memory = Memory(cfg)
# self.cpu = CPU(memory, cfg)
#
# def cpu_test_run(self, start, end, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# log.debug("memory load at $%x: %s", start,
# ", ".join("$%x" % i for i in mem)
# )
# self.cpu.memory.load(start, mem)
# if end is None:
# end = start + len(mem)
# self.cpu.test_run(start, end)
# cpu_test_run.__test__ = False # Exclude from nose
#
# def cpu_test_run2(self, start, count, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# self.cpu.memory.load(start, mem)
# self.cpu.test_run2(start, count)
# cpu_test_run2.__test__ = False # Exclude from nose
#
# def assertMemory(self, start, mem):
# for index, should_byte in enumerate(mem):
# address = start + index
# is_byte = self.cpu.memory.read_byte(address)
#
# msg = f"${is_byte:02x} is not ${should_byte:02x} at address ${address:04x} (index: {index:d})"
# self.assertEqual(is_byte, should_byte, msg)
which might include code, classes, or functions. Output only the next line. | class Test6809_TFR(BaseCPUTestCase): |
Based on the snippet: <|code_start|>#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
:created: 2013-2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2013-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
log = logging.getLogger("MC6809")
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import sys
import unittest
from MC6809.tests.test_base import BaseCPUTestCase
and context (classes, functions, sometimes code) from other files:
# Path: MC6809/tests/test_base.py
# class BaseCPUTestCase(BaseTestCase):
# UNITTEST_CFG_DICT = {
# "verbosity": None,
# "display_cycle": False,
# "trace": None,
# "bus_socket_host": None,
# "bus_socket_port": None,
# "ram": None,
# "rom": None,
# "max_ops": None,
# "use_bus": False,
# }
#
# def setUp(self):
# cfg = TestCfg(self.UNITTEST_CFG_DICT)
# memory = Memory(cfg)
# self.cpu = CPU(memory, cfg)
#
# def cpu_test_run(self, start, end, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# log.debug("memory load at $%x: %s", start,
# ", ".join("$%x" % i for i in mem)
# )
# self.cpu.memory.load(start, mem)
# if end is None:
# end = start + len(mem)
# self.cpu.test_run(start, end)
# cpu_test_run.__test__ = False # Exclude from nose
#
# def cpu_test_run2(self, start, count, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# self.cpu.memory.load(start, mem)
# self.cpu.test_run2(start, count)
# cpu_test_run2.__test__ = False # Exclude from nose
#
# def assertMemory(self, start, mem):
# for index, should_byte in enumerate(mem):
# address = start + index
# is_byte = self.cpu.memory.read_byte(address)
#
# msg = f"${is_byte:02x} is not ${should_byte:02x} at address ${address:04x} (index: {index:d})"
# self.assertEqual(is_byte, should_byte, msg)
. Output only the next line. | class Test6809_Arithmetic(BaseCPUTestCase): |
Based on the snippet: <|code_start|>#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
Test store and load ops
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
log = logging.getLogger("MC6809")
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import sys
import unittest
from MC6809.tests.test_base import BaseStackTestCase
and context (classes, functions, sometimes code) from other files:
# Path: MC6809/tests/test_base.py
# class BaseStackTestCase(BaseCPUTestCase):
# INITIAL_SYSTEM_STACK_ADDR = 0x1000
# INITIAL_USER_STACK_ADDR = 0x2000
#
# def setUp(self):
# super().setUp()
# self.cpu.system_stack_pointer.set(self.INITIAL_SYSTEM_STACK_ADDR)
# self.cpu.user_stack_pointer.set(self.INITIAL_USER_STACK_ADDR)
. Output only the next line. | class Test6809_Store(BaseStackTestCase): |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
Test store and load ops
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
log = logging.getLogger("MC6809")
<|code_end|>
using the current file's imports:
import logging
import sys
import unittest
from MC6809.tests.test_base import BaseCPUTestCase
and any relevant context from other files:
# Path: MC6809/tests/test_base.py
# class BaseCPUTestCase(BaseTestCase):
# UNITTEST_CFG_DICT = {
# "verbosity": None,
# "display_cycle": False,
# "trace": None,
# "bus_socket_host": None,
# "bus_socket_port": None,
# "ram": None,
# "rom": None,
# "max_ops": None,
# "use_bus": False,
# }
#
# def setUp(self):
# cfg = TestCfg(self.UNITTEST_CFG_DICT)
# memory = Memory(cfg)
# self.cpu = CPU(memory, cfg)
#
# def cpu_test_run(self, start, end, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# log.debug("memory load at $%x: %s", start,
# ", ".join("$%x" % i for i in mem)
# )
# self.cpu.memory.load(start, mem)
# if end is None:
# end = start + len(mem)
# self.cpu.test_run(start, end)
# cpu_test_run.__test__ = False # Exclude from nose
#
# def cpu_test_run2(self, start, count, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# self.cpu.memory.load(start, mem)
# self.cpu.test_run2(start, count)
# cpu_test_run2.__test__ = False # Exclude from nose
#
# def assertMemory(self, start, mem):
# for index, should_byte in enumerate(mem):
# address = start + index
# is_byte = self.cpu.memory.read_byte(address)
#
# msg = f"${is_byte:02x} is not ${should_byte:02x} at address ${address:04x} (index: {index:d})"
# self.assertEqual(is_byte, should_byte, msg)
. Output only the next line. | class Test6809_AddressModes_LowLevel(BaseCPUTestCase): |
Based on the snippet: <|code_start|> print(f"Import error: {err}")
print()
print("Please install 'click' !")
print("more info: http://click.pocoo.org")
sys.exit(-1)
@click.group()
@click.version_option(MC6809.__version__)
def cli():
"""
MC6809 is a Open source (GPL v3 or later) emulator
for the legendary 6809 CPU, used in 30 years old homecomputer
Dragon 32 and Tandy TRS-80 Color Computer (CoCo)...
Created by Jens Diemer
Homepage: https://github.com/6809/MC6809
"""
pass
DEFAULT_LOOPS = 5
DEFAULT_MULTIPLY = 15
@cli.command(help="Run a MC6809 emulation benchmark")
@click.option("--loops", default=DEFAULT_LOOPS,
help=f"How many benchmark loops should be run? (default: {DEFAULT_LOOPS:d})")
@click.option("--multiply", default=DEFAULT_MULTIPLY,
help=f"Test data multiplier (default: {DEFAULT_MULTIPLY:d})")
def benchmark(loops, multiply):
<|code_end|>
, predict the immediate next line with the help of imports:
import cProfile
import pstats
import sys
import MC6809
import click
from MC6809.core.bechmark import run_benchmark
and context (classes, functions, sometimes code) from other files:
# Path: MC6809/core/bechmark.py
# def run_benchmark(loops, multiply):
# total_duration = 0
# total_cycles = 0
# bench_class = Test6809_Program2()
#
# # --------------------------------------------------------------------------
#
# duration, cycles = bench_class.crc16_benchmark(loops, multiply)
# total_duration += duration
# total_cycles += cycles
#
# # --------------------------------------------------------------------------
#
# duration, cycles = bench_class.crc32_benchmark(loops, multiply)
# total_duration += duration
# total_cycles += cycles
#
# # --------------------------------------------------------------------------
# print("-" * 79)
# print(
# f"\nTotal of {loops:d} benchmak loops run in {total_duration:.2f} sec"
# f" {locale_format_number(total_cycles)} CPU cycles."
# )
# print("\tavg.: %s CPU cycles/sec" % locale_format_number(total_cycles / total_duration))
. Output only the next line. | run_benchmark(loops, multiply) |
Here is a snippet: <|code_start|>#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
Test CPU with some small Assembler programs
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
log = logging.getLogger("MC6809")
<|code_end|>
. Write the next line using the current file imports:
import binascii
import logging
import sys
import unittest
from decimal import Decimal
from MC6809.tests.test_base import BaseStackTestCase
and context from other files:
# Path: MC6809/tests/test_base.py
# class BaseStackTestCase(BaseCPUTestCase):
# INITIAL_SYSTEM_STACK_ADDR = 0x1000
# INITIAL_USER_STACK_ADDR = 0x2000
#
# def setUp(self):
# super().setUp()
# self.cpu.system_stack_pointer.set(self.INITIAL_SYSTEM_STACK_ADDR)
# self.cpu.user_stack_pointer.set(self.INITIAL_USER_STACK_ADDR)
, which may include functions, classes, or code. Output only the next line. | class Test6809_Program(BaseStackTestCase): |
Continue the code snippet: <|code_start|>"""
MC6809 - 6809 CPU emulator in Python
=======================================
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
<|code_end|>
. Use current file imports:
from MC6809.core.configs import BaseConfig
and context (classes, functions, or code) from other files:
# Path: MC6809/core/configs.py
# class BaseConfig:
# # # http address/port number for the CPU control server
# # CPU_CONTROL_ADDR = "127.0.0.1"
# # CPU_CONTROL_PORT = 6809
#
# # How many ops should be execute before make a control server update cycle?
# BURST_COUNT = 10000
#
# DEFAULT_ROMS = {}
#
# def __init__(self, cfg_dict):
# self.cfg_dict = cfg_dict
# self.cfg_dict["cfg_module"] = self.__module__ # FIXME: !
#
# log.debug("cfg_dict: %s", repr(cfg_dict))
#
# # # socket address for internal bus I/O:
# # if cfg_dict["bus_socket_host"] and cfg_dict["bus_socket_port"]:
# # self.bus = True
# # self.bus_socket_host = cfg_dict["bus_socket_host"]
# # self.bus_socket_port = cfg_dict["bus_socket_port"]
# # else:
# # self.bus = None # Will be set in cpu6809.start_CPU()
#
# assert not hasattr(
# cfg_dict, "ram"), f"cfg_dict.ram is deprecated! Remove it from: {self.cfg_dict.__class__.__name__}"
#
# # if cfg_dict["rom"]:
# # raw_rom_cfg = cfg_dict["rom"]
# # raise NotImplementedError("TODO: create rom cfg!")
# # else:
# self.rom_cfg = self.DEFAULT_ROMS
#
# if cfg_dict["trace"]:
# self.trace = True
# else:
# self.trace = False
#
# self.verbosity = cfg_dict["verbosity"]
#
# self.mem_info = DummyMemInfo()
# self.memory_byte_middlewares = {}
# self.memory_word_middlewares = {}
#
# def _get_initial_Memory(self, size):
# return [0x00] * size
#
# def get_initial_RAM(self):
# return self._get_initial_Memory(self.RAM_SIZE)
#
# def get_initial_ROM(self):
# return self._get_initial_Memory(self.ROM_SIZE)
#
# # def get_initial_ROM(self):
# # start=cfg.ROM_START, size=cfg.ROM_SIZE
# # self.start = start
# # self.end = start + size
# # self._mem = [0x00] * size
#
# def print_debug_info(self):
# print(f"Config: '{self.__class__.__name__}'")
#
# for name, value in inspect.getmembers(self): # , inspect.isdatadescriptor):
# if name.startswith("_"):
# continue
# # print name, type(value)
# if not isinstance(value, (int, str, list, tuple, dict)):
# continue
# if isinstance(value, int):
# print(f"{name:>20} = {value:<6} in hex: {hex(value):>7}")
# else:
# print(f"{name:>20} = {value}")
. Output only the next line. | class TestCfg(BaseConfig): |
Given snippet: <|code_start|>#!/usr/bin/env python
"""
MC6809 - 6809 CPU emulator in Python
=======================================
6809 is Big-Endian
Links:
http://dragondata.worldofdragon.org/Publications/inside-dragon.htm
http://www.burgins.com/m6809.html
http://koti.mbnet.fi/~atjs/mc6809/
:copyleft: 2013-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
Based on:
* ApplyPy by James Tauber (MIT license)
* XRoar emulator by Ciaran Anscomb (GPL license)
more info, see README
"""
class InterruptMixin:
# ---- Not Implemented, yet. ----
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from MC6809.components.cpu_utils.instruction_caller import opcode
and context:
# Path: MC6809/components/cpu_utils/instruction_caller.py
# def opcode(*opcodes):
# """A decorator for opcodes"""
# def decorator(func):
# setattr(func, "_is_opcode", True)
# setattr(func, "_opcodes", opcodes)
# return func
# return decorator
which might include code, classes, or functions. Output only the next line. | @opcode( # AND condition code register, then wait for interrupt |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
:created: 2013 by Jens Diemer - www.jensdiemer.de
:copyleft: 2013-2014 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
<|code_end|>
. Use current file imports:
import unittest
from MC6809.tests.test_base import BaseCPUTestCase
and context (classes, functions, or code) from other files:
# Path: MC6809/tests/test_base.py
# class BaseCPUTestCase(BaseTestCase):
# UNITTEST_CFG_DICT = {
# "verbosity": None,
# "display_cycle": False,
# "trace": None,
# "bus_socket_host": None,
# "bus_socket_port": None,
# "ram": None,
# "rom": None,
# "max_ops": None,
# "use_bus": False,
# }
#
# def setUp(self):
# cfg = TestCfg(self.UNITTEST_CFG_DICT)
# memory = Memory(cfg)
# self.cpu = CPU(memory, cfg)
#
# def cpu_test_run(self, start, end, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# log.debug("memory load at $%x: %s", start,
# ", ".join("$%x" % i for i in mem)
# )
# self.cpu.memory.load(start, mem)
# if end is None:
# end = start + len(mem)
# self.cpu.test_run(start, end)
# cpu_test_run.__test__ = False # Exclude from nose
#
# def cpu_test_run2(self, start, count, mem):
# for cell in mem:
# self.assertLess(-1, cell, f"${cell:x} < 0")
# self.assertGreater(0x100, cell, f"${cell:x} > 0xff")
# self.cpu.memory.load(start, mem)
# self.cpu.test_run2(start, count)
# cpu_test_run2.__test__ = False # Exclude from nose
#
# def assertMemory(self, start, mem):
# for index, should_byte in enumerate(mem):
# address = start + index
# is_byte = self.cpu.memory.read_byte(address)
#
# msg = f"${is_byte:02x} is not ${should_byte:02x} at address ${address:04x} (index: {index:d})"
# self.assertEqual(is_byte, should_byte, msg)
. Output only the next line. | class CC_AccumulatorTestCase(BaseCPUTestCase): |
Given the following code snippet before the placeholder: <|code_start|>
addr = stack_pointer.value
# log.info(
# log.error(
# "%x|\tpush word $%x to %s stack at $%x\t|%s",
# self.last_op_address, word, stack_pointer.name, addr,
# self.cfg.mem_info.get_shortest(self.last_op_address)
# )
self.memory.write_word(addr, word)
# hi, lo = divmod(word, 0x100)
# self.push_byte(hi)
# self.push_byte(lo)
def pull_word(self, stack_pointer):
addr = stack_pointer.value
word = self.memory.read_word(addr)
# log.info(
# log.error(
# "%x|\tpull word $%x from %s stack at $%x\t|%s",
# self.last_op_address, word, stack_pointer.name, addr,
# self.cfg.mem_info.get_shortest(self.last_op_address)
# )
# FIXME: self.system_stack_pointer += 2
stack_pointer.increment(2)
return word
####
<|code_end|>
, predict the next line using imports from the current file:
from MC6809.components.cpu_utils.instruction_caller import opcode
from MC6809.components.MC6809data.MC6809_op_data import REG_A, REG_B, REG_CC, REG_DP, REG_PC, REG_U, REG_X, REG_Y
and context including class names, function names, and sometimes code from other files:
# Path: MC6809/components/cpu_utils/instruction_caller.py
# def opcode(*opcodes):
# """A decorator for opcodes"""
# def decorator(func):
# setattr(func, "_is_opcode", True)
# setattr(func, "_opcodes", opcodes)
# return func
# return decorator
#
# Path: MC6809/components/MC6809data/MC6809_op_data.py
# REG_A = "A"
#
# REG_B = "B"
#
# REG_CC = "CC"
#
# REG_DP = "DP"
#
# REG_PC = "PC"
#
# REG_U = "U"
#
# REG_X = "X"
#
# REG_Y = "Y"
. Output only the next line. | @opcode( # Push A, B, CC, DP, D, X, Y, U, or PC onto stack |
Next line prediction: <|code_start|> assert register in (self.system_stack_pointer, self.user_stack_pointer)
def push(register_str, stack_pointer):
register_obj = self.register_str2object[register_str]
data = register_obj.value
# log.debug("\tpush %s with data $%x", register_obj.name, data)
if register_obj.WIDTH == 8:
self.push_byte(register, data)
else:
assert register_obj.WIDTH == 16
self.push_word(register, data)
# log.debug("$%x PSH%s post byte: $%x", self.program_counter, register.name, m)
# m = postbyte
if m & 0x80:
push(REG_PC, register) # 16 bit program counter register
if m & 0x40:
push(REG_U, register) # 16 bit user-stack pointer
if m & 0x20:
push(REG_Y, register) # 16 bit index register
if m & 0x10:
push(REG_X, register) # 16 bit index register
if m & 0x08:
push(REG_DP, register) # 8 bit direct page register
if m & 0x04:
push(REG_B, register) # 8 bit accumulator
if m & 0x02:
<|code_end|>
. Use current file imports:
(from MC6809.components.cpu_utils.instruction_caller import opcode
from MC6809.components.MC6809data.MC6809_op_data import REG_A, REG_B, REG_CC, REG_DP, REG_PC, REG_U, REG_X, REG_Y)
and context including class names, function names, or small code snippets from other files:
# Path: MC6809/components/cpu_utils/instruction_caller.py
# def opcode(*opcodes):
# """A decorator for opcodes"""
# def decorator(func):
# setattr(func, "_is_opcode", True)
# setattr(func, "_opcodes", opcodes)
# return func
# return decorator
#
# Path: MC6809/components/MC6809data/MC6809_op_data.py
# REG_A = "A"
#
# REG_B = "B"
#
# REG_CC = "CC"
#
# REG_DP = "DP"
#
# REG_PC = "PC"
#
# REG_U = "U"
#
# REG_X = "X"
#
# REG_Y = "Y"
. Output only the next line. | push(REG_A, register) # 8 bit accumulator |
Here is a snippet: <|code_start|> CC bits "HNZVC": -----
"""
assert register in (self.system_stack_pointer, self.user_stack_pointer)
def push(register_str, stack_pointer):
register_obj = self.register_str2object[register_str]
data = register_obj.value
# log.debug("\tpush %s with data $%x", register_obj.name, data)
if register_obj.WIDTH == 8:
self.push_byte(register, data)
else:
assert register_obj.WIDTH == 16
self.push_word(register, data)
# log.debug("$%x PSH%s post byte: $%x", self.program_counter, register.name, m)
# m = postbyte
if m & 0x80:
push(REG_PC, register) # 16 bit program counter register
if m & 0x40:
push(REG_U, register) # 16 bit user-stack pointer
if m & 0x20:
push(REG_Y, register) # 16 bit index register
if m & 0x10:
push(REG_X, register) # 16 bit index register
if m & 0x08:
push(REG_DP, register) # 8 bit direct page register
if m & 0x04:
<|code_end|>
. Write the next line using the current file imports:
from MC6809.components.cpu_utils.instruction_caller import opcode
from MC6809.components.MC6809data.MC6809_op_data import REG_A, REG_B, REG_CC, REG_DP, REG_PC, REG_U, REG_X, REG_Y
and context from other files:
# Path: MC6809/components/cpu_utils/instruction_caller.py
# def opcode(*opcodes):
# """A decorator for opcodes"""
# def decorator(func):
# setattr(func, "_is_opcode", True)
# setattr(func, "_opcodes", opcodes)
# return func
# return decorator
#
# Path: MC6809/components/MC6809data/MC6809_op_data.py
# REG_A = "A"
#
# REG_B = "B"
#
# REG_CC = "CC"
#
# REG_DP = "DP"
#
# REG_PC = "PC"
#
# REG_U = "U"
#
# REG_X = "X"
#
# REG_Y = "Y"
, which may include functions, classes, or code. Output only the next line. | push(REG_B, register) # 8 bit accumulator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.