Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|> @property
def decimal_separator(self):
return self._decimal_separator
@decimal_separator.setter
def decimal_separator(self, new_decimal_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_decimal_separator in _VALID_DECIMAL_SEPARATORS
self._decimal_separator = new_decimal_separator
@property
def thousands_separator(self):
return self._thousands_separator
@thousands_separator.setter
def thousands_separator(self, new_thousands_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_thousands_separator in _VALID_THOUSANDS_SEPARATORS
self._thousands_separator = new_thousands_separator
def set_property(self, name, value, location=None):
r"""
Set data format property ``name`` to ``value`` possibly translating ``value`` from
a human readable representation to an internal one.
:param str name: any of the ``KEY_*`` constants
:param value: the value to set the property to as it would show up in a CID. \
In some cases, the value will be translated to an internal representation. \
<|code_end|>
, generate the next line using the imports in this file:
import codecs
import string
import token
import tokenize
from cutplace import _compat, _tools, errors, ranges
from cutplace._tools import generated_tokens
and context (functions, classes, or occasionally code) from other files:
# Path: cutplace/_compat.py
# def text_repr(text):
# def token_io_readline(text):
# def csv_reader(source_text_stream, dialect=csv.excel, **keywords):
# def csv_writer(target_text_stream, dialect=csv.excel, **keywords):
#
# Path: cutplace/_tools.py
# LOG_LEVEL_NAME_TO_LEVEL_MAP = {
# "debug": logging.DEBUG,
# "info": logging.INFO,
# "warning": logging.WARNING,
# "error": logging.ERROR,
# "critical": logging.CRITICAL,
# }
# def mkdirs(folder):
# def validated_python_name(name, value):
# def generated_tokens(text):
# def human_readable_list(items, final_separator="or"):
# def tokenize_without_space(text):
# def token_text(toky):
# def is_newline_token(some_token):
# def is_eof_token(some_token):
# def is_comma_token(some_token):
# def with_suffix(path, suffix=""):
# def length_of_int(int_value):
#
# Path: cutplace/errors.py
# NAME_TO_ASCII_CODE_MAP = {"cr": 13, "ff": 12, "lf": 10, "tab": 9, "vt": 11}
# class Location(object):
# class CutplaceError(Exception):
# class DataError(CutplaceError):
# class InterfaceError(CutplaceError):
# class RangeValueError(DataError):
# class DataFormatError(DataError):
# class FieldValueError(DataError):
# class CheckError(DataError):
# def __init__(self, file_path, has_column=False, has_cell=False, has_sheet=False):
# def __copy__(self):
# def advance_column(self, amount=1):
# def advance_cell(self, amount=1):
# def set_cell(self, new_cell):
# def advance_line(self, amount=1):
# def advance_sheet(self):
# def cell(self):
# def column(self):
# def line(self):
# def _get_sheet(self):
# def _set_sheet(self, new_sheet):
# def __str__(self):
# def __repr__(self):
# def __lt__(self, other):
# def __eq__(self, other):
# def create_caller_location(modules_to_ignore=None, has_column=False, has_cell=False, has_sheet=False):
# def __init__(self, message, location=None, see_also_message=None, see_also_location=None, cause=None):
# def location(self):
# def message(self):
# def see_also_message(self):
# def see_also_location(self):
# def cause(self):
# def prepend_message(self, prefix, new_location):
# def __str__(self):
#
# Path: cutplace/ranges.py
# ELLIPSIS = "\u2026"
# MAX_INTEGER = 2 ** 31 - 1
# MIN_INTEGER = -(2 ** 31)
# DEFAULT_INTEGER_RANGE_TEXT = "%d...%d" % (MIN_INTEGER, MAX_INTEGER)
# MAX_DECIMAL_TEXT = "9999999999999999999.999999999999"
# MIN_DECIMAL_TEXT = "-" + MAX_DECIMAL_TEXT
# DEFAULT_DECIMAL_RANGE_TEXT = "%s...%s" % (MIN_DECIMAL_TEXT, MAX_DECIMAL_TEXT)
# DEFAULT_PRECISION = len(MAX_DECIMAL_TEXT.split(".")[1])
# DEFAULT_SCALE = len(MAX_DECIMAL_TEXT) - 1
# def code_for_number_token(name, value, location):
# def code_for_symbolic_token(name, value, location):
# def code_for_string_token(name, value, location):
# def create_range_from_length(length_range):
# def _decimal_as_text(decimal_value, precision=DEFAULT_PRECISION):
# def __init__(self, description, default=None):
# def description(self):
# def items(self):
# def lower_limit(self):
# def upper_limit(self):
# def _repr_item(self, item):
# def __repr__(self):
# def __str__(self):
# def _items_overlap(self, some, other):
# def _item_contains(self, item, value):
# def validate(self, name, value, location=None):
# def __init__(self, description, default=None, location=None):
# def precision(self):
# def scale(self):
# def __repr__(self):
# def __str__(self):
# def _repr_item(self, item):
# def validate(self, name, value, location=None):
# class Range(object):
# class DecimalRange(Range):
#
# Path: cutplace/_tools.py
# def generated_tokens(text):
# toky = list(tokenize.generate_tokens(_compat.token_io_readline(text)))
# if len(toky) >= 2 and is_newline_token(toky[-2]) and is_eof_token(toky[-1]):
# # HACK: Remove newline that generated_tokens() adds starting with Python 3.x but not before.
# del toky[-2]
# for result in toky:
# yield result
. Output only the next line. | For example ``set_property(KEY_LINE_DELIMITER, 'lf')`` results in \ |
Continue the code snippet: <|code_start|> @property
def decimal_separator(self):
return self._decimal_separator
@decimal_separator.setter
def decimal_separator(self, new_decimal_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_decimal_separator in _VALID_DECIMAL_SEPARATORS
self._decimal_separator = new_decimal_separator
@property
def thousands_separator(self):
return self._thousands_separator
@thousands_separator.setter
def thousands_separator(self, new_thousands_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_thousands_separator in _VALID_THOUSANDS_SEPARATORS
self._thousands_separator = new_thousands_separator
def set_property(self, name, value, location=None):
r"""
Set data format property ``name`` to ``value`` possibly translating ``value`` from
a human readable representation to an internal one.
:param str name: any of the ``KEY_*`` constants
:param value: the value to set the property to as it would show up in a CID. \
In some cases, the value will be translated to an internal representation. \
<|code_end|>
. Use current file imports:
import codecs
import string
import token
import tokenize
from cutplace import _compat, _tools, errors, ranges
from cutplace._tools import generated_tokens
and context (classes, functions, or code) from other files:
# Path: cutplace/_compat.py
# def text_repr(text):
# def token_io_readline(text):
# def csv_reader(source_text_stream, dialect=csv.excel, **keywords):
# def csv_writer(target_text_stream, dialect=csv.excel, **keywords):
#
# Path: cutplace/_tools.py
# LOG_LEVEL_NAME_TO_LEVEL_MAP = {
# "debug": logging.DEBUG,
# "info": logging.INFO,
# "warning": logging.WARNING,
# "error": logging.ERROR,
# "critical": logging.CRITICAL,
# }
# def mkdirs(folder):
# def validated_python_name(name, value):
# def generated_tokens(text):
# def human_readable_list(items, final_separator="or"):
# def tokenize_without_space(text):
# def token_text(toky):
# def is_newline_token(some_token):
# def is_eof_token(some_token):
# def is_comma_token(some_token):
# def with_suffix(path, suffix=""):
# def length_of_int(int_value):
#
# Path: cutplace/errors.py
# NAME_TO_ASCII_CODE_MAP = {"cr": 13, "ff": 12, "lf": 10, "tab": 9, "vt": 11}
# class Location(object):
# class CutplaceError(Exception):
# class DataError(CutplaceError):
# class InterfaceError(CutplaceError):
# class RangeValueError(DataError):
# class DataFormatError(DataError):
# class FieldValueError(DataError):
# class CheckError(DataError):
# def __init__(self, file_path, has_column=False, has_cell=False, has_sheet=False):
# def __copy__(self):
# def advance_column(self, amount=1):
# def advance_cell(self, amount=1):
# def set_cell(self, new_cell):
# def advance_line(self, amount=1):
# def advance_sheet(self):
# def cell(self):
# def column(self):
# def line(self):
# def _get_sheet(self):
# def _set_sheet(self, new_sheet):
# def __str__(self):
# def __repr__(self):
# def __lt__(self, other):
# def __eq__(self, other):
# def create_caller_location(modules_to_ignore=None, has_column=False, has_cell=False, has_sheet=False):
# def __init__(self, message, location=None, see_also_message=None, see_also_location=None, cause=None):
# def location(self):
# def message(self):
# def see_also_message(self):
# def see_also_location(self):
# def cause(self):
# def prepend_message(self, prefix, new_location):
# def __str__(self):
#
# Path: cutplace/ranges.py
# ELLIPSIS = "\u2026"
# MAX_INTEGER = 2 ** 31 - 1
# MIN_INTEGER = -(2 ** 31)
# DEFAULT_INTEGER_RANGE_TEXT = "%d...%d" % (MIN_INTEGER, MAX_INTEGER)
# MAX_DECIMAL_TEXT = "9999999999999999999.999999999999"
# MIN_DECIMAL_TEXT = "-" + MAX_DECIMAL_TEXT
# DEFAULT_DECIMAL_RANGE_TEXT = "%s...%s" % (MIN_DECIMAL_TEXT, MAX_DECIMAL_TEXT)
# DEFAULT_PRECISION = len(MAX_DECIMAL_TEXT.split(".")[1])
# DEFAULT_SCALE = len(MAX_DECIMAL_TEXT) - 1
# def code_for_number_token(name, value, location):
# def code_for_symbolic_token(name, value, location):
# def code_for_string_token(name, value, location):
# def create_range_from_length(length_range):
# def _decimal_as_text(decimal_value, precision=DEFAULT_PRECISION):
# def __init__(self, description, default=None):
# def description(self):
# def items(self):
# def lower_limit(self):
# def upper_limit(self):
# def _repr_item(self, item):
# def __repr__(self):
# def __str__(self):
# def _items_overlap(self, some, other):
# def _item_contains(self, item, value):
# def validate(self, name, value, location=None):
# def __init__(self, description, default=None, location=None):
# def precision(self):
# def scale(self):
# def __repr__(self):
# def __str__(self):
# def _repr_item(self, item):
# def validate(self, name, value, location=None):
# class Range(object):
# class DecimalRange(Range):
#
# Path: cutplace/_tools.py
# def generated_tokens(text):
# toky = list(tokenize.generate_tokens(_compat.token_io_readline(text)))
# if len(toky) >= 2 and is_newline_token(toky[-2]) and is_eof_token(toky[-1]):
# # HACK: Remove newline that generated_tokens() adds starting with Python 3.x but not before.
# del toky[-2]
# for result in toky:
# yield result
. Output only the next line. | For example ``set_property(KEY_LINE_DELIMITER, 'lf')`` results in \ |
Here is a snippet: <|code_start|> return self._decimal_separator
@decimal_separator.setter
def decimal_separator(self, new_decimal_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_decimal_separator in _VALID_DECIMAL_SEPARATORS
self._decimal_separator = new_decimal_separator
@property
def thousands_separator(self):
return self._thousands_separator
@thousands_separator.setter
def thousands_separator(self, new_thousands_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_thousands_separator in _VALID_THOUSANDS_SEPARATORS
self._thousands_separator = new_thousands_separator
def set_property(self, name, value, location=None):
r"""
Set data format property ``name`` to ``value`` possibly translating ``value`` from
a human readable representation to an internal one.
:param str name: any of the ``KEY_*`` constants
:param value: the value to set the property to as it would show up in a CID. \
In some cases, the value will be translated to an internal representation. \
For example ``set_property(KEY_LINE_DELIMITER, 'lf')`` results in \
:py:attr:`cutplace.data.line_delimiter` being ``'\n'``.
<|code_end|>
. Write the next line using the current file imports:
import codecs
import string
import token
import tokenize
from cutplace import _compat, _tools, errors, ranges
from cutplace._tools import generated_tokens
and context from other files:
# Path: cutplace/_compat.py
# def text_repr(text):
# def token_io_readline(text):
# def csv_reader(source_text_stream, dialect=csv.excel, **keywords):
# def csv_writer(target_text_stream, dialect=csv.excel, **keywords):
#
# Path: cutplace/_tools.py
# LOG_LEVEL_NAME_TO_LEVEL_MAP = {
# "debug": logging.DEBUG,
# "info": logging.INFO,
# "warning": logging.WARNING,
# "error": logging.ERROR,
# "critical": logging.CRITICAL,
# }
# def mkdirs(folder):
# def validated_python_name(name, value):
# def generated_tokens(text):
# def human_readable_list(items, final_separator="or"):
# def tokenize_without_space(text):
# def token_text(toky):
# def is_newline_token(some_token):
# def is_eof_token(some_token):
# def is_comma_token(some_token):
# def with_suffix(path, suffix=""):
# def length_of_int(int_value):
#
# Path: cutplace/errors.py
# NAME_TO_ASCII_CODE_MAP = {"cr": 13, "ff": 12, "lf": 10, "tab": 9, "vt": 11}
# class Location(object):
# class CutplaceError(Exception):
# class DataError(CutplaceError):
# class InterfaceError(CutplaceError):
# class RangeValueError(DataError):
# class DataFormatError(DataError):
# class FieldValueError(DataError):
# class CheckError(DataError):
# def __init__(self, file_path, has_column=False, has_cell=False, has_sheet=False):
# def __copy__(self):
# def advance_column(self, amount=1):
# def advance_cell(self, amount=1):
# def set_cell(self, new_cell):
# def advance_line(self, amount=1):
# def advance_sheet(self):
# def cell(self):
# def column(self):
# def line(self):
# def _get_sheet(self):
# def _set_sheet(self, new_sheet):
# def __str__(self):
# def __repr__(self):
# def __lt__(self, other):
# def __eq__(self, other):
# def create_caller_location(modules_to_ignore=None, has_column=False, has_cell=False, has_sheet=False):
# def __init__(self, message, location=None, see_also_message=None, see_also_location=None, cause=None):
# def location(self):
# def message(self):
# def see_also_message(self):
# def see_also_location(self):
# def cause(self):
# def prepend_message(self, prefix, new_location):
# def __str__(self):
#
# Path: cutplace/ranges.py
# ELLIPSIS = "\u2026"
# MAX_INTEGER = 2 ** 31 - 1
# MIN_INTEGER = -(2 ** 31)
# DEFAULT_INTEGER_RANGE_TEXT = "%d...%d" % (MIN_INTEGER, MAX_INTEGER)
# MAX_DECIMAL_TEXT = "9999999999999999999.999999999999"
# MIN_DECIMAL_TEXT = "-" + MAX_DECIMAL_TEXT
# DEFAULT_DECIMAL_RANGE_TEXT = "%s...%s" % (MIN_DECIMAL_TEXT, MAX_DECIMAL_TEXT)
# DEFAULT_PRECISION = len(MAX_DECIMAL_TEXT.split(".")[1])
# DEFAULT_SCALE = len(MAX_DECIMAL_TEXT) - 1
# def code_for_number_token(name, value, location):
# def code_for_symbolic_token(name, value, location):
# def code_for_string_token(name, value, location):
# def create_range_from_length(length_range):
# def _decimal_as_text(decimal_value, precision=DEFAULT_PRECISION):
# def __init__(self, description, default=None):
# def description(self):
# def items(self):
# def lower_limit(self):
# def upper_limit(self):
# def _repr_item(self, item):
# def __repr__(self):
# def __str__(self):
# def _items_overlap(self, some, other):
# def _item_contains(self, item, value):
# def validate(self, name, value, location=None):
# def __init__(self, description, default=None, location=None):
# def precision(self):
# def scale(self):
# def __repr__(self):
# def __str__(self):
# def _repr_item(self, item):
# def validate(self, name, value, location=None):
# class Range(object):
# class DecimalRange(Range):
#
# Path: cutplace/_tools.py
# def generated_tokens(text):
# toky = list(tokenize.generate_tokens(_compat.token_io_readline(text)))
# if len(toky) >= 2 and is_newline_token(toky[-2]) and is_eof_token(toky[-1]):
# # HACK: Remove newline that generated_tokens() adds starting with Python 3.x but not before.
# del toky[-2]
# for result in toky:
# yield result
, which may include functions, classes, or code. Output only the next line. | :type location: str or None |
Given snippet: <|code_start|> @property
def decimal_separator(self):
return self._decimal_separator
@decimal_separator.setter
def decimal_separator(self, new_decimal_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_decimal_separator in _VALID_DECIMAL_SEPARATORS
self._decimal_separator = new_decimal_separator
@property
def thousands_separator(self):
return self._thousands_separator
@thousands_separator.setter
def thousands_separator(self, new_thousands_separator):
assert self.format in (FORMAT_DELIMITED, FORMAT_FIXED)
assert new_thousands_separator in _VALID_THOUSANDS_SEPARATORS
self._thousands_separator = new_thousands_separator
def set_property(self, name, value, location=None):
r"""
Set data format property ``name`` to ``value`` possibly translating ``value`` from
a human readable representation to an internal one.
:param str name: any of the ``KEY_*`` constants
:param value: the value to set the property to as it would show up in a CID. \
In some cases, the value will be translated to an internal representation. \
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import codecs
import string
import token
import tokenize
from cutplace import _compat, _tools, errors, ranges
from cutplace._tools import generated_tokens
and context:
# Path: cutplace/_compat.py
# def text_repr(text):
# def token_io_readline(text):
# def csv_reader(source_text_stream, dialect=csv.excel, **keywords):
# def csv_writer(target_text_stream, dialect=csv.excel, **keywords):
#
# Path: cutplace/_tools.py
# LOG_LEVEL_NAME_TO_LEVEL_MAP = {
# "debug": logging.DEBUG,
# "info": logging.INFO,
# "warning": logging.WARNING,
# "error": logging.ERROR,
# "critical": logging.CRITICAL,
# }
# def mkdirs(folder):
# def validated_python_name(name, value):
# def generated_tokens(text):
# def human_readable_list(items, final_separator="or"):
# def tokenize_without_space(text):
# def token_text(toky):
# def is_newline_token(some_token):
# def is_eof_token(some_token):
# def is_comma_token(some_token):
# def with_suffix(path, suffix=""):
# def length_of_int(int_value):
#
# Path: cutplace/errors.py
# NAME_TO_ASCII_CODE_MAP = {"cr": 13, "ff": 12, "lf": 10, "tab": 9, "vt": 11}
# class Location(object):
# class CutplaceError(Exception):
# class DataError(CutplaceError):
# class InterfaceError(CutplaceError):
# class RangeValueError(DataError):
# class DataFormatError(DataError):
# class FieldValueError(DataError):
# class CheckError(DataError):
# def __init__(self, file_path, has_column=False, has_cell=False, has_sheet=False):
# def __copy__(self):
# def advance_column(self, amount=1):
# def advance_cell(self, amount=1):
# def set_cell(self, new_cell):
# def advance_line(self, amount=1):
# def advance_sheet(self):
# def cell(self):
# def column(self):
# def line(self):
# def _get_sheet(self):
# def _set_sheet(self, new_sheet):
# def __str__(self):
# def __repr__(self):
# def __lt__(self, other):
# def __eq__(self, other):
# def create_caller_location(modules_to_ignore=None, has_column=False, has_cell=False, has_sheet=False):
# def __init__(self, message, location=None, see_also_message=None, see_also_location=None, cause=None):
# def location(self):
# def message(self):
# def see_also_message(self):
# def see_also_location(self):
# def cause(self):
# def prepend_message(self, prefix, new_location):
# def __str__(self):
#
# Path: cutplace/ranges.py
# ELLIPSIS = "\u2026"
# MAX_INTEGER = 2 ** 31 - 1
# MIN_INTEGER = -(2 ** 31)
# DEFAULT_INTEGER_RANGE_TEXT = "%d...%d" % (MIN_INTEGER, MAX_INTEGER)
# MAX_DECIMAL_TEXT = "9999999999999999999.999999999999"
# MIN_DECIMAL_TEXT = "-" + MAX_DECIMAL_TEXT
# DEFAULT_DECIMAL_RANGE_TEXT = "%s...%s" % (MIN_DECIMAL_TEXT, MAX_DECIMAL_TEXT)
# DEFAULT_PRECISION = len(MAX_DECIMAL_TEXT.split(".")[1])
# DEFAULT_SCALE = len(MAX_DECIMAL_TEXT) - 1
# def code_for_number_token(name, value, location):
# def code_for_symbolic_token(name, value, location):
# def code_for_string_token(name, value, location):
# def create_range_from_length(length_range):
# def _decimal_as_text(decimal_value, precision=DEFAULT_PRECISION):
# def __init__(self, description, default=None):
# def description(self):
# def items(self):
# def lower_limit(self):
# def upper_limit(self):
# def _repr_item(self, item):
# def __repr__(self):
# def __str__(self):
# def _items_overlap(self, some, other):
# def _item_contains(self, item, value):
# def validate(self, name, value, location=None):
# def __init__(self, description, default=None, location=None):
# def precision(self):
# def scale(self):
# def __repr__(self):
# def __str__(self):
# def _repr_item(self, item):
# def validate(self, name, value, location=None):
# class Range(object):
# class DecimalRange(Range):
#
# Path: cutplace/_tools.py
# def generated_tokens(text):
# toky = list(tokenize.generate_tokens(_compat.token_io_readline(text)))
# if len(toky) >= 2 and is_newline_token(toky[-2]) and is_eof_token(toky[-1]):
# # HACK: Remove newline that generated_tokens() adds starting with Python 3.x but not before.
# del toky[-2]
# for result in toky:
# yield result
which might include code, classes, or functions. Output only the next line. | For example ``set_property(KEY_LINE_DELIMITER, 'lf')`` results in \ |
Given the following code snippet before the placeholder: <|code_start|>#: Format name for Open Document spreadsheets (ODS).
FORMAT_ODS = "ods"
KEY_ALLOWED_CHARACTERS = "allowed_characters"
KEY_ENCODING = "encoding"
KEY_ESCAPE_CHARACTER = "escape_character"
KEY_FORMAT = "format"
KEY_HEADER = "header"
KEY_ITEM_DELIMITER = "item_delimiter"
KEY_LINE_DELIMITER = "line_delimiter"
KEY_QUOTE_CHARACTER = "quote_character"
KEY_SHEET = "sheet"
KEY_SKIP_INITIAL_SPACE = "skip_initial_space"
KEY_DECIMAL_SEPARATOR = "decimal_separator"
KEY_THOUSANDS_SEPARATOR = "thousands_separator"
_VALID_QUOTE_CHARACTERS = ['"', "'"]
_VALID_ESCAPE_CHARACTERS = ['"', "\\"]
_VALID_DECIMAL_SEPARATORS = [".", ","]
_VALID_THOUSANDS_SEPARATORS = [",", ".", ""]
_VALID_FORMATS = [FORMAT_DELIMITED, FORMAT_EXCEL, FORMAT_FIXED, FORMAT_ODS]
class DataFormat(object):
"""
General data format of a file describing the basic structure.
"""
def __init__(self, format_name, location=None):
r"""
<|code_end|>
, predict the next line using imports from the current file:
import codecs
import string
import token
import tokenize
from cutplace import _compat, _tools, errors, ranges
from cutplace._tools import generated_tokens
and context including class names, function names, and sometimes code from other files:
# Path: cutplace/_compat.py
# def text_repr(text):
# def token_io_readline(text):
# def csv_reader(source_text_stream, dialect=csv.excel, **keywords):
# def csv_writer(target_text_stream, dialect=csv.excel, **keywords):
#
# Path: cutplace/_tools.py
# LOG_LEVEL_NAME_TO_LEVEL_MAP = {
# "debug": logging.DEBUG,
# "info": logging.INFO,
# "warning": logging.WARNING,
# "error": logging.ERROR,
# "critical": logging.CRITICAL,
# }
# def mkdirs(folder):
# def validated_python_name(name, value):
# def generated_tokens(text):
# def human_readable_list(items, final_separator="or"):
# def tokenize_without_space(text):
# def token_text(toky):
# def is_newline_token(some_token):
# def is_eof_token(some_token):
# def is_comma_token(some_token):
# def with_suffix(path, suffix=""):
# def length_of_int(int_value):
#
# Path: cutplace/errors.py
# NAME_TO_ASCII_CODE_MAP = {"cr": 13, "ff": 12, "lf": 10, "tab": 9, "vt": 11}
# class Location(object):
# class CutplaceError(Exception):
# class DataError(CutplaceError):
# class InterfaceError(CutplaceError):
# class RangeValueError(DataError):
# class DataFormatError(DataError):
# class FieldValueError(DataError):
# class CheckError(DataError):
# def __init__(self, file_path, has_column=False, has_cell=False, has_sheet=False):
# def __copy__(self):
# def advance_column(self, amount=1):
# def advance_cell(self, amount=1):
# def set_cell(self, new_cell):
# def advance_line(self, amount=1):
# def advance_sheet(self):
# def cell(self):
# def column(self):
# def line(self):
# def _get_sheet(self):
# def _set_sheet(self, new_sheet):
# def __str__(self):
# def __repr__(self):
# def __lt__(self, other):
# def __eq__(self, other):
# def create_caller_location(modules_to_ignore=None, has_column=False, has_cell=False, has_sheet=False):
# def __init__(self, message, location=None, see_also_message=None, see_also_location=None, cause=None):
# def location(self):
# def message(self):
# def see_also_message(self):
# def see_also_location(self):
# def cause(self):
# def prepend_message(self, prefix, new_location):
# def __str__(self):
#
# Path: cutplace/ranges.py
# ELLIPSIS = "\u2026"
# MAX_INTEGER = 2 ** 31 - 1
# MIN_INTEGER = -(2 ** 31)
# DEFAULT_INTEGER_RANGE_TEXT = "%d...%d" % (MIN_INTEGER, MAX_INTEGER)
# MAX_DECIMAL_TEXT = "9999999999999999999.999999999999"
# MIN_DECIMAL_TEXT = "-" + MAX_DECIMAL_TEXT
# DEFAULT_DECIMAL_RANGE_TEXT = "%s...%s" % (MIN_DECIMAL_TEXT, MAX_DECIMAL_TEXT)
# DEFAULT_PRECISION = len(MAX_DECIMAL_TEXT.split(".")[1])
# DEFAULT_SCALE = len(MAX_DECIMAL_TEXT) - 1
# def code_for_number_token(name, value, location):
# def code_for_symbolic_token(name, value, location):
# def code_for_string_token(name, value, location):
# def create_range_from_length(length_range):
# def _decimal_as_text(decimal_value, precision=DEFAULT_PRECISION):
# def __init__(self, description, default=None):
# def description(self):
# def items(self):
# def lower_limit(self):
# def upper_limit(self):
# def _repr_item(self, item):
# def __repr__(self):
# def __str__(self):
# def _items_overlap(self, some, other):
# def _item_contains(self, item, value):
# def validate(self, name, value, location=None):
# def __init__(self, description, default=None, location=None):
# def precision(self):
# def scale(self):
# def __repr__(self):
# def __str__(self):
# def _repr_item(self, item):
# def validate(self, name, value, location=None):
# class Range(object):
# class DecimalRange(Range):
#
# Path: cutplace/_tools.py
# def generated_tokens(text):
# toky = list(tokenize.generate_tokens(_compat.token_io_readline(text)))
# if len(toky) >= 2 and is_newline_token(toky[-2]) and is_eof_token(toky[-1]):
# # HACK: Remove newline that generated_tokens() adds starting with Python 3.x but not before.
# del toky[-2]
# for result in toky:
# yield result
. Output only the next line. | Create a new data format. |
Predict the next line after this snippet: <|code_start|>
def standard_groupingForInj(abf,target=200):
for i in range(abf.sweeps):
abf.setSweep(i)
if abf.protoSeqY[1]==target: #first step is target pA injection
swhlab.ap.check_AP_group(abf,i)
swhlab.plot.save(abf,tag='05-grouping',resize=False)
### --- SWHLab4 protocols ---
def proto_00_01_gf(abf=exampleABF):
"""gap free recording"""
standard_inspect(abf)
def proto_00_02_egf(abf=exampleABF):
"""episodic with no epochs (virtually gap free)"""
standard_inspect(abf)
def proto_01_01_HP010(abf=exampleABF):
"""hyperpolarization step. Use to calculate tau and stuff."""
swhlab.memtest.memtest(abf) #knows how to do IC memtest
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
swhlab.plot.save(abf,tag="tau")
def proto_01_11_rampStep(abf=exampleABF):
"""each sweep is a ramp (of set size) which builds on the last sweep.
Used for detection of AP properties from first few APs."""
standard_inspect(abf)
swhlab.ap.detect(abf)
swhlab.ap.check_sweep(abf) #eyeball how well event detection worked
<|code_end|>
using the current file's imports:
import os
import sys
import glob
import pylab
import numpy as np
import traceback
import swhlab
from swhlab.core import common as cm #shorthand
from swhlab.indexing.indexing import genIndex
and any relevant context from other files:
# Path: swhlab/core.py
# def abfIDfromFname(fname):
# def abfProtocol(fname):
# def headerHTML(header,fname):
# def __init__(self, fname, createFolder=False):
# def setsweep(self, sweep=0, channel=0):
# def sweepList(self):
# def setsweeps(self):
# def comments_load(self):
# def generate_protocol(self):
# def get_protocol(self,sweep):
# def get_protocol_sequence(self,sweep):
# def clamp_values(self,timePoint=0):
# def epochTimes(self,nEpoch=2):
# def average(self,t1=0,t2=None,setsweep=False):
# def averageSweep(self,sweepFirst=0,sweepLast=None):
# def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False):
# def sweepYfiltered(self):
# def sweepYsmartbase(self):
# def phasicNet(self,biggestEvent=50,m1=.5,m2=None):
# def output_touch(self):
# def output_clean(self):
# def inspect(self):
# class ABF:
# Y = proto[step]['fEpochInitLevel']+proto[step]['fEpochLevelInc']*self.sweep
# I1,I2=int(t1*self.pointsPerSec),int(t2*self.pointsPerSec)
# Y=self.sweepYsmartbase()[int(m1):int(m2)]
#
# Path: swhlab/indexing/indexing.py
# class INDEX:
# def __init__(self,ABFfolder):
# def scan(self):
# def convertImages(self):
# def analyzeAll(self):
# def analyzeABF(self,ID):
# def htmlFor(self,fname):
# def html_single_basic(self,abfID,launch=False,overwrite=False):
# def html_single_plot(self,abfID,launch=False,overwrite=False):
# def html_index(self,launch=True):
# def doStuff(ABFfolder,analyze=False,convert=False,index=True,overwrite=True,
# launch=True):
# def analyzeSingle(abfFname):
# ID="UNKNOWN"
# ID=fname[:8]
# ID="UNKNOWN"
# ID=fname[:8]
# IN=INDEX(ABFfolder)
# IN=INDEX(ABFfolder)
. Output only the next line. | swhlab.plot.save(abf,tag="check") |
Given the code snippet: <|code_start|> pylab.title("average (n=%d)"%abf.sweeps)
swhlab.plot.save(abf,tag='overlay')
def standard_groupingForInj(abf,target=200):
for i in range(abf.sweeps):
abf.setSweep(i)
if abf.protoSeqY[1]==target: #first step is target pA injection
swhlab.ap.check_AP_group(abf,i)
swhlab.plot.save(abf,tag='05-grouping',resize=False)
### --- SWHLab4 protocols ---
def proto_00_01_gf(abf=exampleABF):
"""gap free recording"""
standard_inspect(abf)
def proto_00_02_egf(abf=exampleABF):
"""episodic with no epochs (virtually gap free)"""
standard_inspect(abf)
def proto_01_01_HP010(abf=exampleABF):
"""hyperpolarization step. Use to calculate tau and stuff."""
swhlab.memtest.memtest(abf) #knows how to do IC memtest
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
swhlab.plot.save(abf,tag="tau")
def proto_01_11_rampStep(abf=exampleABF):
"""each sweep is a ramp (of set size) which builds on the last sweep.
Used for detection of AP properties from first few APs."""
standard_inspect(abf)
<|code_end|>
, generate the next line using the imports in this file:
import os
import sys
import glob
import pylab
import numpy as np
import traceback
import swhlab
from swhlab.core import common as cm #shorthand
from swhlab.indexing.indexing import genIndex
and context (functions, classes, or occasionally code) from other files:
# Path: swhlab/core.py
# def abfIDfromFname(fname):
# def abfProtocol(fname):
# def headerHTML(header,fname):
# def __init__(self, fname, createFolder=False):
# def setsweep(self, sweep=0, channel=0):
# def sweepList(self):
# def setsweeps(self):
# def comments_load(self):
# def generate_protocol(self):
# def get_protocol(self,sweep):
# def get_protocol_sequence(self,sweep):
# def clamp_values(self,timePoint=0):
# def epochTimes(self,nEpoch=2):
# def average(self,t1=0,t2=None,setsweep=False):
# def averageSweep(self,sweepFirst=0,sweepLast=None):
# def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False):
# def sweepYfiltered(self):
# def sweepYsmartbase(self):
# def phasicNet(self,biggestEvent=50,m1=.5,m2=None):
# def output_touch(self):
# def output_clean(self):
# def inspect(self):
# class ABF:
# Y = proto[step]['fEpochInitLevel']+proto[step]['fEpochLevelInc']*self.sweep
# I1,I2=int(t1*self.pointsPerSec),int(t2*self.pointsPerSec)
# Y=self.sweepYsmartbase()[int(m1):int(m2)]
#
# Path: swhlab/indexing/indexing.py
# class INDEX:
# def __init__(self,ABFfolder):
# def scan(self):
# def convertImages(self):
# def analyzeAll(self):
# def analyzeABF(self,ID):
# def htmlFor(self,fname):
# def html_single_basic(self,abfID,launch=False,overwrite=False):
# def html_single_plot(self,abfID,launch=False,overwrite=False):
# def html_index(self,launch=True):
# def doStuff(ABFfolder,analyze=False,convert=False,index=True,overwrite=True,
# launch=True):
# def analyzeSingle(abfFname):
# ID="UNKNOWN"
# ID=fname[:8]
# ID="UNKNOWN"
# ID=fname[:8]
# IN=INDEX(ABFfolder)
# IN=INDEX(ABFfolder)
. Output only the next line. | swhlab.ap.detect(abf) |
Given the code snippet: <|code_start|>
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name="home"),
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.contrib import admin
from profiles.views import (NextView, DashboardView, HomeView, NoEntriesView,
AboutView, MarkUnreadView, MarkReadView,
UnsubscribeView, subscription, ReportView)
and context (functions, classes, or occasionally code) from other files:
# Path: profiles/views.py
# class NextView(RedirectView):
# permanent = False
#
# def get_redirect_url(self, **kwargs):
# subscription = self.request.GET['subscription']
# try:
# profile = UserProfile.objects.get(next_slug=subscription)
# except Exception:
# raise Http404()
# entries = profile.entries.filter(userentrydetail__read=False)
# if not entries.exists():
# return '/noentries'
# print entries.order_by('published')
# entry = entries.order_by('published')[0]
# user_entry = entry.userentrydetail_set.get(profile=profile)
# user_entry.read = True
# user_entry.save()
# return entry.link
#
# class DashboardView(FormView):
# template_name = 'dashboard.html'
# form_class = FeedForm
# success_url = '/dashboard'
#
# def form_valid(self, form):
# user = self.request.user
# link = form.cleaned_data['link']
# if not link.startswith('http://'):
# link = 'http://{}'.format(link)
# parser = feedparser.parse(link)
# feed = parser.feed
# title = feed.title
# try:
# feed_obj = Feed.objects.get(link=link)
# except ObjectDoesNotExist:
# feed_obj = Feed(link=link, title=title)
# feed_obj.save()
# profile = user.get_profile()
# Subscription(profile=profile, feed=feed_obj).save()
# poll_feed(feed_obj)
# return super(DashboardView, self).form_valid(form)
#
# def get_context_data(self, *args, **kwargs):
# user = self.request.user
# try:
# user.get_profile()
# except:
# UserProfile(user=user).save()
# context = super(DashboardView, self).get_context_data(*args, **kwargs)
# subscriptions = Subscription.objects.filter(profile=user.get_profile())
# context['subscriptions'] = subscriptions
# return context
#
# class HomeView(TemplateView):
# template_name = 'home.html'
#
# class NoEntriesView(TemplateView):
# template_name = 'noentries.html'
#
# class AboutView(TemplateView):
# template_name = 'about.html'
#
# class MarkUnreadView(EditEntriesForFeedView):
# def dispatch(self, request, *args, **kwargs):
# feed = self._get_feed()
# self.request.user.get_profile().mark_unread(feed)
# return super(MarkUnreadView, self).dispatch(request, *args, **kwargs)
#
# class MarkReadView(EditEntriesForFeedView):
# def dispatch(self, request, *args, **kwargs):
# feed = self._get_feed()
# self.request.user.get_profile().mark_read(feed)
# return super(MarkReadView, self).dispatch(request, *args, **kwargs)
#
# class UnsubscribeView(EditEntriesForFeedView):
# def dispatch(self, request, *args, **kwargs):
# feed = self._get_feed()
# self.request.user.get_profile().unsubscribe(feed)
# return super(UnsubscribeView, self).dispatch(request, *args, **kwargs)
#
# def subscription(request):
# profile = request.user.get_profile()
# if request.method == "GET":
# subscriptions = Subscription.objects.filter(profile=profile)
# subscriptions_json = [{'id': s.feed.id,
# 'title': s.feed.title,
# 'unread_entries': profile.unread_entries(s.feed)}
# for s in subscriptions]
# return HttpResponse(json.dumps(subscriptions_json),
# content_type='application/json')
# if request.method == "POST":
# link = json.loads(request.body)['link']
# if not link.startswith('http://'):
# link = 'http://{}'.format(link)
# parser = feedparser.parse(link)
# feed = parser.feed
# try:
# title = feed.title
# except AttributeError:
# return HttpResponseBadRequest('Invalid feed')
# try:
# feed_obj = Feed.objects.get(link=link)
# except ObjectDoesNotExist:
# feed_obj = Feed(link=link, title=title)
# feed_obj.save()
# if Subscription.objects.filter(profile=profile, feed=feed_obj).exists():
# return HttpResponseBadRequest('You are already subscribed to this '
# 'feed')
# Subscription.objects.get_or_create(profile=profile, feed=feed_obj)
# try:
# poll_feed(feed_obj)
# except AttributeError:
# return HttpResponseBadRequest('Invalid feed')
# return HttpResponse()
#
# class ReportView(TemplateView):
#
# def post(self, request):
# data = json.loads(request.body)
# report_form = ReportForm({'summary': data[u'summary'],
# 'details': data[u'details']})
# if report_form.is_valid():
# report_issue(report_form.cleaned_data['summary'],
# report_form.cleaned_data['details'])
# else:
# return HttpResponseBadRequest('Form was invalid: {}'.format(report_form.errors))
# return HttpResponse("Report was submitted!")
. Output only the next line. | url(r'^openid/?', include('django_openid_auth.urls')), |
Given snippet: <|code_start|>
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name="home"),
url(r'^openid/?', include('django_openid_auth.urls')),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.contrib import admin
from profiles.views import (NextView, DashboardView, HomeView, NoEntriesView,
AboutView, MarkUnreadView, MarkReadView,
UnsubscribeView, subscription, ReportView)
and context:
# Path: profiles/views.py
# class NextView(RedirectView):
# permanent = False
#
# def get_redirect_url(self, **kwargs):
# subscription = self.request.GET['subscription']
# try:
# profile = UserProfile.objects.get(next_slug=subscription)
# except Exception:
# raise Http404()
# entries = profile.entries.filter(userentrydetail__read=False)
# if not entries.exists():
# return '/noentries'
# print entries.order_by('published')
# entry = entries.order_by('published')[0]
# user_entry = entry.userentrydetail_set.get(profile=profile)
# user_entry.read = True
# user_entry.save()
# return entry.link
#
# class DashboardView(FormView):
# template_name = 'dashboard.html'
# form_class = FeedForm
# success_url = '/dashboard'
#
# def form_valid(self, form):
# user = self.request.user
# link = form.cleaned_data['link']
# if not link.startswith('http://'):
# link = 'http://{}'.format(link)
# parser = feedparser.parse(link)
# feed = parser.feed
# title = feed.title
# try:
# feed_obj = Feed.objects.get(link=link)
# except ObjectDoesNotExist:
# feed_obj = Feed(link=link, title=title)
# feed_obj.save()
# profile = user.get_profile()
# Subscription(profile=profile, feed=feed_obj).save()
# poll_feed(feed_obj)
# return super(DashboardView, self).form_valid(form)
#
# def get_context_data(self, *args, **kwargs):
# user = self.request.user
# try:
# user.get_profile()
# except:
# UserProfile(user=user).save()
# context = super(DashboardView, self).get_context_data(*args, **kwargs)
# subscriptions = Subscription.objects.filter(profile=user.get_profile())
# context['subscriptions'] = subscriptions
# return context
#
# class HomeView(TemplateView):
# template_name = 'home.html'
#
# class NoEntriesView(TemplateView):
# template_name = 'noentries.html'
#
# class AboutView(TemplateView):
# template_name = 'about.html'
#
# class MarkUnreadView(EditEntriesForFeedView):
# def dispatch(self, request, *args, **kwargs):
# feed = self._get_feed()
# self.request.user.get_profile().mark_unread(feed)
# return super(MarkUnreadView, self).dispatch(request, *args, **kwargs)
#
# class MarkReadView(EditEntriesForFeedView):
# def dispatch(self, request, *args, **kwargs):
# feed = self._get_feed()
# self.request.user.get_profile().mark_read(feed)
# return super(MarkReadView, self).dispatch(request, *args, **kwargs)
#
# class UnsubscribeView(EditEntriesForFeedView):
# def dispatch(self, request, *args, **kwargs):
# feed = self._get_feed()
# self.request.user.get_profile().unsubscribe(feed)
# return super(UnsubscribeView, self).dispatch(request, *args, **kwargs)
#
# def subscription(request):
# profile = request.user.get_profile()
# if request.method == "GET":
# subscriptions = Subscription.objects.filter(profile=profile)
# subscriptions_json = [{'id': s.feed.id,
# 'title': s.feed.title,
# 'unread_entries': profile.unread_entries(s.feed)}
# for s in subscriptions]
# return HttpResponse(json.dumps(subscriptions_json),
# content_type='application/json')
# if request.method == "POST":
# link = json.loads(request.body)['link']
# if not link.startswith('http://'):
# link = 'http://{}'.format(link)
# parser = feedparser.parse(link)
# feed = parser.feed
# try:
# title = feed.title
# except AttributeError:
# return HttpResponseBadRequest('Invalid feed')
# try:
# feed_obj = Feed.objects.get(link=link)
# except ObjectDoesNotExist:
# feed_obj = Feed(link=link, title=title)
# feed_obj.save()
# if Subscription.objects.filter(profile=profile, feed=feed_obj).exists():
# return HttpResponseBadRequest('You are already subscribed to this '
# 'feed')
# Subscription.objects.get_or_create(profile=profile, feed=feed_obj)
# try:
# poll_feed(feed_obj)
# except AttributeError:
# return HttpResponseBadRequest('Invalid feed')
# return HttpResponse()
#
# class ReportView(TemplateView):
#
# def post(self, request):
# data = json.loads(request.body)
# report_form = ReportForm({'summary': data[u'summary'],
# 'details': data[u'details']})
# if report_form.is_valid():
# report_issue(report_form.cleaned_data['summary'],
# report_form.cleaned_data['details'])
# else:
# return HttpResponseBadRequest('Form was invalid: {}'.format(report_form.errors))
# return HttpResponse("Report was submitted!")
which might include code, classes, or functions. Output only the next line. | url(r'^admin/?', include(admin.site.urls)), |
Based on the snippet: <|code_start|>
class FeedForm(ModelForm):
class Meta:
model = Feed
fields = ['link']
widgets = {
'link': TextInput(attrs={'placeholder': 'Paste RSS address here!',
'id': 'feed-url-input'})
}
def clean(self):
<|code_end|>
, predict the immediate next line with the help of imports:
from django.forms.models import ModelForm
from django.forms.widgets import TextInput
from feeds.models import Feed
from django.core.exceptions import ValidationError
import feedparser
and context (classes, functions, sometimes code) from other files:
# Path: feeds/models.py
# class Feed(models.Model):
# title = models.CharField(max_length=200)
# link = models.CharField(max_length=1000)
#
# def __unicode__(self):
# return self.title
. Output only the next line. | cleaned_data = super(FeedForm, self).clean() |
Next line prediction: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TEST = utils.TestData(test_data.data)
class ZaqarRestTestCase(test.TestCase):
# Queues
@mock.patch.object(zaqar, 'zaqar')
def test_queue_get(self, client):
# for check test env
self.assertTrue(1 * 1 == 1)
@mock.patch.object(zaqar, 'zaqar')
def test_queue_create(self, client):
# for check test env
<|code_end|>
. Use current file imports:
(from unittest import mock
from openstack_dashboard.test import helpers as test
from openstack_dashboard.test.test_data import utils
from zaqar_ui.api.rest import zaqar
from zaqar_ui.test import test_data)
and context including class names, function names, or small code snippets from other files:
# Path: zaqar_ui/api/rest/zaqar.py
# def _convert_to_yaml(data, default_flow_style=False):
# def _load_yaml(data):
# def get(self, request, queue_name):
# def post(self, request, queue_name):
# def post(self, request, queue_name, action):
# def get(self, request):
# def delete(self, request):
# def put(self, request):
# def get(self, request, queue_name):
# def delete(self, request, queue_name):
# def put(self, request, queue_name):
# def get(self, request, queue_name):
# def post(self, request, queue_name):
# def post(self, request, queue_name, subscriber):
# def get(self, request, pool_name):
# def post(self, request, pool_name):
# def get(self, request):
# def delete(self, request):
# def put(self, request):
# def get(self, request, flavor_name):
# def post(self, request, flavor_name):
# def get(self, request):
# def delete(self, request):
# def put(self, request):
# class Queue(generic.View):
# class QueueActions(generic.View):
# class Queues(generic.View):
# class Subscriptions(generic.View):
# class Messages(generic.View):
# class Subscription(generic.View):
# class Pool(generic.View):
# class Pools(generic.View):
# class Flavor(generic.View):
# class Flavors(generic.View):
#
# Path: zaqar_ui/test/test_data.py
# def data(TEST):
. Output only the next line. | self.assertTrue(1 + 1 == 2) |
Predict the next line for this snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TEST = utils.TestData(test_data.data)
class ZaqarRestTestCase(test.TestCase):
# Queues
@mock.patch.object(zaqar, 'zaqar')
def test_queue_get(self, client):
# for check test env
self.assertTrue(1 * 1 == 1)
@mock.patch.object(zaqar, 'zaqar')
<|code_end|>
with the help of current file imports:
from unittest import mock
from openstack_dashboard.test import helpers as test
from openstack_dashboard.test.test_data import utils
from zaqar_ui.api.rest import zaqar
from zaqar_ui.test import test_data
and context from other files:
# Path: zaqar_ui/api/rest/zaqar.py
# def _convert_to_yaml(data, default_flow_style=False):
# def _load_yaml(data):
# def get(self, request, queue_name):
# def post(self, request, queue_name):
# def post(self, request, queue_name, action):
# def get(self, request):
# def delete(self, request):
# def put(self, request):
# def get(self, request, queue_name):
# def delete(self, request, queue_name):
# def put(self, request, queue_name):
# def get(self, request, queue_name):
# def post(self, request, queue_name):
# def post(self, request, queue_name, subscriber):
# def get(self, request, pool_name):
# def post(self, request, pool_name):
# def get(self, request):
# def delete(self, request):
# def put(self, request):
# def get(self, request, flavor_name):
# def post(self, request, flavor_name):
# def get(self, request):
# def delete(self, request):
# def put(self, request):
# class Queue(generic.View):
# class QueueActions(generic.View):
# class Queues(generic.View):
# class Subscriptions(generic.View):
# class Messages(generic.View):
# class Subscription(generic.View):
# class Pool(generic.View):
# class Pools(generic.View):
# class Flavor(generic.View):
# class Flavors(generic.View):
#
# Path: zaqar_ui/test/test_data.py
# def data(TEST):
, which may contain function names, class names, or code. Output only the next line. | def test_queue_create(self, client): |
Next line prediction: <|code_start|># Copyright 2015 Cisco Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def _convert_to_yaml(data, default_flow_style=False):
if not data:
return ''
try:
return yaml.safe_dump(data, default_flow_style=default_flow_style)
except Exception:
return ''
def _load_yaml(data):
<|code_end|>
. Use current file imports:
(import json
import yaml
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from zaqar_ui.api import zaqar)
and context including class names, function names, or small code snippets from other files:
# Path: zaqar_ui/api/zaqar.py
# LOG = logging.getLogger(__name__)
# RESERVED_QUEUE_METADATA = ["_max_messages_post_size", "_default_message_ttl"]
# def zaqarclient(request):
# def queue_list(request, limit=None, marker=None):
# def queue_create(request, queue_name, metadata):
# def queue_delete(request, queue_name):
# def queue_update(request, queue_name, metadata):
# def queue_get(request, queue_name):
# def queue_purge(request, queue_name, resource_types):
# def message_post(request, queue_name, messages_data):
# def message_list(request, queue_name):
# def queue_signed_url(request, queue_name, paths, ttl_seconds, methods):
# def subscription_list(request, queue_name):
# def subscription_create(request, queue_name, sub_data):
# def subscription_delete(request, queue_name, sub_data):
# def subscription_update(request, queue_name, old_data, new_data):
# def pool_list(request, limit=None, marker=None):
# def pool_create(request, pool_name, params):
# def pool_delete(request, pool_name):
# def pool_update(request, pool_name, params):
# def pool_get(request, pool_name):
# def flavor_list(request, limit=None, marker=None):
# def flavor_create(request, flavor_name, params):
# def flavor_delete(request, flavor_name):
# def flavor_update(request, flavor_name, params):
# def flavor_get(request, flavor_name):
. Output only the next line. | if not data: |
Predict the next line after this snippet: <|code_start|>
response = {}
attempt = 0
while response == {}:
url = "https://api.steampowered.com/idota2match_570/getmatchhistory/v001/?key=" + SteamAPIKey + "&account_id=" \
+ str(playerID) + "&matches_requested=" + str(amount - len(matches)) + "&start_at_match_id=" + str(start_at_match_id)
if heroID is not None:
url += '&hero_id=' + str(heroID)
if tournamentGamesOnly:
url += '&tournament_games_only=1'
if gameModeID is not None: #TODO: Game Mode is broken in Steam API! Try this again when it is working.
url += '&game_mode=' + str(gameModeID)
response = requests.get(url)
response.connection.close()
response = response.json()
if response == {}:
attempt += 1
if (attempt == 30):
print('Tried %s times, cancelling API request. (Skipped counter increases)')
break
print('Failed API request (empty json), retrying in %s seconds; attempt #%s' %(2, attempt))
<|code_end|>
using the current file's imports:
import requests, time
from steamapi.steamapikey import SteamAPIKey
from reddit.botinfo import message
and any relevant context from other files:
# Path: steamapi/steamapikey.py
#
# Path: reddit/botinfo.py
. Output only the next line. | time.sleep(1) |
Using the snippet: <|code_start|>#message = True
def requestGetMatchHistory(playerID, amount=100, heroID=None, gameModeID=None, tournamentGamesOnly=False):
if message: print('[getmatchhistory] Getting matchhistory of player id: %s' %playerID)
start_at_match_id = 0
results_remaining = True
matches = []
while(results_remaining and amount-len(matches) > 0):
response = {}
attempt = 0
while response == {}:
url = "https://api.steampowered.com/idota2match_570/getmatchhistory/v001/?key=" + SteamAPIKey + "&account_id=" \
+ str(playerID) + "&matches_requested=" + str(amount - len(matches)) + "&start_at_match_id=" + str(start_at_match_id)
if heroID is not None:
url += '&hero_id=' + str(heroID)
<|code_end|>
, determine the next line of code. You have imports:
import requests, time
from steamapi.steamapikey import SteamAPIKey
from reddit.botinfo import message
and context (class names, function names, or code) available:
# Path: steamapi/steamapikey.py
#
# Path: reddit/botinfo.py
. Output only the next line. | if tournamentGamesOnly: |
Using the snippet: <|code_start|> if message: print('[loginreddit] updating heroDictionary success')
if message: print('[loginreddit] updating proPlayerDictionary')
getproplayerlist.requestGetProPlayerList()
if message: print('[loginreddit] updating proPlayerDictionary success')
#if message: print('[loginreddit] updating dota 2 items_game schema')
#getschema.requestGetSchema()
#if message: print('[loginreddit] updating dota 2 items_game schema success')
if message: print('[loginreddit] starting threads')
if message: print('[loginreddit] starting deleteBadComments thread')
t = threading.Thread(target=workerdeletebadcomments.deleteBadComments , args = (r,))
t.start()
if message: print('[loginreddit] starting findComments thread')
t = threading.Thread(target=workerfindcomments.findComments, args = (r,))
t.start()
if message: print('[loginreddit] starting deleteRequestedComments thread')
t = threading.Thread(target=workerdeleterequestedcomments.deleteRequestedComments, args = (r,))
t.start()
if message: print('[loginreddit] starting threads success')
<|code_end|>
, determine the next line of code. You have imports:
import praw
import obot
import threading
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
from reddit.botinfo import message
and context (class names, function names, or code) available:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getschema.py
# def requestGetSchema():
# URL = "https://api.steampowered.com/IEconItems_570/GetSchemaURL/v1?key=" + SteamAPIKey
# URL = response['result']['items_game_url']
#
# Path: steamapi/getleaguelisting.py
# def requestGetLeagueListing():
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
#
# Path: reddit/botinfo.py
#
# Path: reddit/workerdeletebadcomments.py
# def deleteBadComments(r):
#
# Path: reddit/workerfindcomments.py
# def findComments(r):
#
# Path: reddit/workerdeleterequestedcomments.py
# def deleteRequestedComments(r):
#
# Path: reddit/botinfo.py
. Output only the next line. | self.r = r |
Predict the next line for this snippet: <|code_start|>#message = True
class LoginReddit:
def __init__(self):
if message: print('[loginreddit] logging in')
r = praw.Reddit(client_id=obot.client_id,
client_secret=obot.client_secret,
user_agent=obot.user_agent,
<|code_end|>
with the help of current file imports:
import praw
import obot
import threading
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
from reddit.botinfo import message
and context from other files:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getschema.py
# def requestGetSchema():
# URL = "https://api.steampowered.com/IEconItems_570/GetSchemaURL/v1?key=" + SteamAPIKey
# URL = response['result']['items_game_url']
#
# Path: steamapi/getleaguelisting.py
# def requestGetLeagueListing():
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
#
# Path: reddit/botinfo.py
#
# Path: reddit/workerdeletebadcomments.py
# def deleteBadComments(r):
#
# Path: reddit/workerfindcomments.py
# def findComments(r):
#
# Path: reddit/workerdeleterequestedcomments.py
# def deleteRequestedComments(r):
#
# Path: reddit/botinfo.py
, which may contain function names, class names, or code. Output only the next line. | username=obot.username, |
Based on the snippet: <|code_start|>#message = True
class LoginReddit:
def __init__(self):
if message: print('[loginreddit] logging in')
<|code_end|>
, predict the immediate next line with the help of imports:
import praw
import obot
import threading
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
from reddit.botinfo import message
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getschema.py
# def requestGetSchema():
# URL = "https://api.steampowered.com/IEconItems_570/GetSchemaURL/v1?key=" + SteamAPIKey
# URL = response['result']['items_game_url']
#
# Path: steamapi/getleaguelisting.py
# def requestGetLeagueListing():
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
#
# Path: reddit/botinfo.py
#
# Path: reddit/workerdeletebadcomments.py
# def deleteBadComments(r):
#
# Path: reddit/workerfindcomments.py
# def findComments(r):
#
# Path: reddit/workerdeleterequestedcomments.py
# def deleteRequestedComments(r):
#
# Path: reddit/botinfo.py
. Output only the next line. | r = praw.Reddit(client_id=obot.client_id, |
Predict the next line for this snippet: <|code_start|>
if message: print('[loginreddit] logging successful')
if message: print('[loginreddit] updating heroDictionary')
getheroes.requestGetHeroes()
if message: print('[loginreddit] updating heroDictionary success')
if message: print('[loginreddit] updating proPlayerDictionary')
getproplayerlist.requestGetProPlayerList()
if message: print('[loginreddit] updating proPlayerDictionary success')
#if message: print('[loginreddit] updating dota 2 items_game schema')
#getschema.requestGetSchema()
#if message: print('[loginreddit] updating dota 2 items_game schema success')
if message: print('[loginreddit] starting threads')
if message: print('[loginreddit] starting deleteBadComments thread')
t = threading.Thread(target=workerdeletebadcomments.deleteBadComments , args = (r,))
t.start()
if message: print('[loginreddit] starting findComments thread')
t = threading.Thread(target=workerfindcomments.findComments, args = (r,))
t.start()
if message: print('[loginreddit] starting deleteRequestedComments thread')
t = threading.Thread(target=workerdeleterequestedcomments.deleteRequestedComments, args = (r,))
<|code_end|>
with the help of current file imports:
import praw
import obot
import threading
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
from reddit.botinfo import message
and context from other files:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getschema.py
# def requestGetSchema():
# URL = "https://api.steampowered.com/IEconItems_570/GetSchemaURL/v1?key=" + SteamAPIKey
# URL = response['result']['items_game_url']
#
# Path: steamapi/getleaguelisting.py
# def requestGetLeagueListing():
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
#
# Path: reddit/botinfo.py
#
# Path: reddit/workerdeletebadcomments.py
# def deleteBadComments(r):
#
# Path: reddit/workerfindcomments.py
# def findComments(r):
#
# Path: reddit/workerdeleterequestedcomments.py
# def deleteRequestedComments(r):
#
# Path: reddit/botinfo.py
, which may contain function names, class names, or code. Output only the next line. | t.start() |
Based on the snippet: <|code_start|>#message = True
class LoginReddit:
def __init__(self):
if message: print('[loginreddit] logging in')
r = praw.Reddit(client_id=obot.client_id,
client_secret=obot.client_secret,
user_agent=obot.user_agent,
username=obot.username,
<|code_end|>
, predict the immediate next line with the help of imports:
import praw
import obot
import threading
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
from reddit.botinfo import message
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getschema.py
# def requestGetSchema():
# URL = "https://api.steampowered.com/IEconItems_570/GetSchemaURL/v1?key=" + SteamAPIKey
# URL = response['result']['items_game_url']
#
# Path: steamapi/getleaguelisting.py
# def requestGetLeagueListing():
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
#
# Path: reddit/botinfo.py
#
# Path: reddit/workerdeletebadcomments.py
# def deleteBadComments(r):
#
# Path: reddit/workerfindcomments.py
# def findComments(r):
#
# Path: reddit/workerdeleterequestedcomments.py
# def deleteRequestedComments(r):
#
# Path: reddit/botinfo.py
. Output only the next line. | password=obot.password) |
Given snippet: <|code_start|>#message = True
class LoginReddit:
def __init__(self):
if message: print('[loginreddit] logging in')
r = praw.Reddit(client_id=obot.client_id,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import praw
import obot
import threading
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
from reddit.botinfo import message
and context:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getschema.py
# def requestGetSchema():
# URL = "https://api.steampowered.com/IEconItems_570/GetSchemaURL/v1?key=" + SteamAPIKey
# URL = response['result']['items_game_url']
#
# Path: steamapi/getleaguelisting.py
# def requestGetLeagueListing():
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
#
# Path: reddit/botinfo.py
#
# Path: reddit/workerdeletebadcomments.py
# def deleteBadComments(r):
#
# Path: reddit/workerfindcomments.py
# def findComments(r):
#
# Path: reddit/workerdeleterequestedcomments.py
# def deleteRequestedComments(r):
#
# Path: reddit/botinfo.py
which might include code, classes, or functions. Output only the next line. | client_secret=obot.client_secret, |
Here is a snippet: <|code_start|>
while response == {}:
if message: print('[getmatchdetails] get match details')
URL = "https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?key=" + SteamAPIKey + "&match_id=" + str(matchID)
response = requests.get(URL)
response.connection.close()
response = response.json()
# careful Steam API sometimes returns empty JSONs!
# handle this error!
if response == {}:
attempt += 1
if (attempt == 10):
print('Tried %s times, cancelling API request. (Skipped counter increases)')
if q == None:
return response
else:
q.put(response)
break
print('Failed API request, retrying in %s seconds' %(2))
print(URL)
time.sleep(attempt * 2)
continue
else:
if q == None:
return response
else:
<|code_end|>
. Write the next line using the current file imports:
import requests
import time
from steamapi.steamapikey import SteamAPIKey
and context from other files:
# Path: steamapi/steamapikey.py
, which may include functions, classes, or code. Output only the next line. | q.put(response) |
Given the code snippet: <|code_start|>message2 = False
def findComments(r):
subreddit = r.subreddit(SUBREDDIT)
cycles = 0
if message: print('[workerfindcomments] Opening SQL Database')
sql = sqlite3.connect('sql.db')
<|code_end|>
, generate the next line using the imports in this file:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context (functions, classes, or occasionally code) from other files:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
. Output only the next line. | cur = sql.cursor() |
Predict the next line for this snippet: <|code_start|>
if pauthor.lower() == r.user.me():
# Don't reply to yourself, robot!
if message2: print('[workerfindcomments] will not reply to myself')
continue
if IGNOREAUTHORS != [] and any(auth2.lower() == pauthor.lower() for auth2 in IGNOREAUTHORS):
# This post was made by a ignoreauthor
if message2: print('[workerfindcomments] Post made by ignore author: ' + pauthor.lower())
continue
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if cur.fetchone():
if message2: print('[workerfindcomments] already replied to comment')
continue
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
pbody = post.body.lower()
if any(key.lower() in pbody for key in KEYWORDS):
try:
#post.reply('reply success!')
if message: print('[@@@][workerfindcomments] starting analyzecontent thread')
t = threading.Thread(target=workeranalyzecontent.analyzeContent, args = (post,True,)) #isPost = True
t.start()
<|code_end|>
with the help of current file imports:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context from other files:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
, which may contain function names, class names, or code. Output only the next line. | except: |
Predict the next line for this snippet: <|code_start|>message2 = False
def findComments(r):
subreddit = r.subreddit(SUBREDDIT)
cycles = 0
if message: print('[workerfindcomments] Opening SQL Database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
time.sleep(2)
while True:
try:
if message: print('[workerfindcomments] Searching %s' % SUBREDDIT)
posts = subreddit.stream.comments()
for post in posts:
pid = post.id
try:
pauthor = post.author.name
<|code_end|>
with the help of current file imports:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context from other files:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
, which may contain function names, class names, or code. Output only the next line. | except AttributeError: |
Given the following code snippet before the placeholder: <|code_start|>
for post in posts:
pid = post.id
try:
pauthor = post.author.name
except AttributeError:
if message2: print('[workerfindcomments] author is deleted, don\' care about this post')
continue
if pauthor.lower() == r.user.me():
# Don't reply to yourself, robot!
if message2: print('[workerfindcomments] will not reply to myself')
continue
if IGNOREAUTHORS != [] and any(auth2.lower() == pauthor.lower() for auth2 in IGNOREAUTHORS):
# This post was made by a ignoreauthor
if message2: print('[workerfindcomments] Post made by ignore author: ' + pauthor.lower())
continue
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if cur.fetchone():
if message2: print('[workerfindcomments] already replied to comment')
continue
<|code_end|>
, predict the next line using imports from the current file:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context including class names, function names, and sometimes code from other files:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
. Output only the next line. | cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) |
Given snippet: <|code_start|>message2 = False
def findComments(r):
subreddit = r.subreddit(SUBREDDIT)
cycles = 0
if message: print('[workerfindcomments] Opening SQL Database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
time.sleep(2)
while True:
try:
if message: print('[workerfindcomments] Searching %s' % SUBREDDIT)
posts = subreddit.stream.comments()
for post in posts:
pid = post.id
try:
pauthor = post.author.name
except AttributeError:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
which might include code, classes, or functions. Output only the next line. | if message2: print('[workerfindcomments] author is deleted, don\' care about this post') |
Here is a snippet: <|code_start|> if IGNOREAUTHORS != [] and any(auth2.lower() == pauthor.lower() for auth2 in IGNOREAUTHORS):
# This post was made by a ignoreauthor
if message2: print('[workerfindcomments] Post made by ignore author: ' + pauthor.lower())
continue
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if cur.fetchone():
if message2: print('[workerfindcomments] already replied to comment')
continue
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
pbody = post.body.lower()
if any(key.lower() in pbody for key in KEYWORDS):
try:
#post.reply('reply success!')
if message: print('[@@@][workerfindcomments] starting analyzecontent thread')
t = threading.Thread(target=workeranalyzecontent.analyzeContent, args = (post,True,)) #isPost = True
t.start()
except:
if message: print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
if message: print('[workerfindcomments] bot could not reply')
if message: print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
cycles += 1
<|code_end|>
. Write the next line using the current file imports:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context from other files:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
, which may include functions, classes, or code. Output only the next line. | except: |
Based on the snippet: <|code_start|>message2 = False
def findComments(r):
subreddit = r.subreddit(SUBREDDIT)
cycles = 0
if message: print('[workerfindcomments] Opening SQL Database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
time.sleep(2)
while True:
try:
if message: print('[workerfindcomments] Searching %s' % SUBREDDIT)
posts = subreddit.stream.comments()
for post in posts:
pid = post.id
try:
<|code_end|>
, predict the immediate next line with the help of imports:
import time
import sqlite3
import threading
from reddit.redditconstants import WAIT, SUBREDDIT, IGNOREAUTHORS, KEYWORDS, MAXPOSTS, CLEANCYCLES
from reddit import workeranalyzecontent
from reddit.botinfo import message
and context (classes, functions, sometimes code) from other files:
# Path: reddit/redditconstants.py
# WAIT = 10
#
# SUBREDDIT = "AnalyzeLast100Games+Dota2"
#
# IGNOREAUTHORS = ["dotamatch", "AnalyzeLast100Games", "Lumbditest"]
#
# KEYWORDS = ["dotabuff.com/players/", "yasp.co/players/", "opendota.com/players/", "dotabuff.com/matches/", "yasp.co/matches/", "opendota.com/matches/"]
#
# MAXPOSTS = 100
#
# CLEANCYCLES = 10
#
# Path: reddit/workeranalyzecontent.py
# def analyzeContent(post, isPost):
# def RepresentsInt(s):
#
# Path: reddit/botinfo.py
. Output only the next line. | pauthor = post.author.name |
Predict the next line for this snippet: <|code_start|> response = response.json()
print(response)
global dota2schema
URL = response['result']['items_game_url']
with open ("items_game_url.txt", "r") as text_file:
data=text_file.read()
print(data)
print(URL)
if (data != URL):
response = requests.get(URL)
response.connection.close()
response = response.text
dota2schema = vdf.loads(response)
with open('dota2schema.txt', 'w') as outfile:
json.dump(dota2schema, outfile)
with open("items_game_url.txt", "w") as text_file:
text_file.write(URL)
else:
with open ("dota2schema.txt", "r") as text_file:
<|code_end|>
with the help of current file imports:
import requests
import vdf
import json
from steamapi.steamapikey import SteamAPIKey
and context from other files:
# Path: steamapi/steamapikey.py
, which may contain function names, class names, or code. Output only the next line. | data=text_file.read() |
Given snippet: <|code_start|>#message = True
def deleteRequestedComments(r):
while True:
try:
print('[workerdeleterequestedcomments] checking mail with DELETION subject')
unread = r.inbox.unread()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import time
from reddit.redditconstants import MAXPOSTS, WAIT
from reddit.redditconstants import PRIVILEDGEDAUTHORS
and context:
# Path: reddit/redditconstants.py
# MAXPOSTS = 100
#
# WAIT = 10
#
# Path: reddit/redditconstants.py
# PRIVILEDGEDAUTHORS = ['ReaverXai', 'm4rx', 'klopjobacid', 'Decency', '0Hellspawn0', 'wykrhm', 'crimson589',
# 'Intolerable', 'lestye', 'intolerable-bot', 'D2TournamentThreads', 'AutoModerator',
# 'coronaria', 'leafeator', 'lumbdi']
which might include code, classes, or functions. Output only the next line. | for msg in unread: |
Given snippet: <|code_start|> countryFlag = '[](/%s)' %proPlayerDictionary[playerID]['country_code']
else: countryFlag = ''
playerName = '%.12s' %proPlayerDictionary[playerID]['name'] #trimming name length because of some people
result += '[DB](http://dotabuff.com/players/%s "Dotabuff: Lookup people\'s match history")/' \
'[OD](http://opendota.com/players/%s "OpenDota: Provides free replay analysis")/' \
'[STRATZ](https://stratz.com/player/%s "STRATZ: Every match, every player, every stat. Free.") %s' \
'[%s](#proplayer "name: %s, team name: %s, is locked: %s, sponsor: %s")' %(playerID, playerID, playerID,
countryFlag,
playerName,
proPlayerDictionary[playerID]['name'],
proPlayerDictionary[playerID]['team_name'],
proPlayerDictionary[playerID]['is_locked'],
proPlayerDictionary[playerID]['sponsor'])
result = result.replace('|','').replace('\\','').replace('*','')
return result
else:
if playerSummariesJson==None:
result = '[DB](http://dotabuff.com/players/%s "Dotabuff: Lookup people\'s match history")/' \
'[OD](http://opendota.com/players/%s "OpenDota: Provides free replay analysis")/' \
'[STRATZ](https://stratz.com/player/%s "STRATZ: Every match, every player, every stat. Free.")' %(playerID, playerID, playerID)
else:
for player in playerSummariesJson['response']['players']:
if int(player['steam32id']) == playerID:
try:
playerName = '%.12s' %player['personaname']
except:
playerName = ''
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from steamapi import getproplayerlist
from odotaapi import getodplayerdetails
and context:
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: odotaapi/getodplayerdetails.py
# def getODPlayerDetails(playerID, q=None):
# URL = 'https://api.opendota.com/api/players/' + str(playerID)
which might include code, classes, or functions. Output only the next line. | break |
Predict the next line for this snippet: <|code_start|>
keyValues = ['name', 'description', 'tournament_url', 'itemdef']
response = {}
attempt = 0
while response == {}:
URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
response = requests.get(URL)
response.connection.close()
response = response.json()
if response == {}:
attempt += 1
if (attempt == 5):
print('Tried %s times, cancelling API request. (Skipped counter increases)')
break
print('Failed API request, retrying in %s seconds' %(attempt * 2))
time.sleep(attempt * 2)
continue
else:
break
global leagueListingDictionary
<|code_end|>
with the help of current file imports:
import requests
import time
from steamapi.steamapikey import SteamAPIKey
and context from other files:
# Path: steamapi/steamapikey.py
, which may contain function names, class names, or code. Output only the next line. | for league in response['result']['leagues']: |
Here is a snippet: <|code_start|> 'hero_id': player.get('hero_id', None)}
introTemplate = '####	\n#####	 Hover to view [chat log](https://www.opendota.com/matches/{matchid}/chat)\n######	\n\n' \
'This chat log was provided by [**OpenDota**](https://www.opendota.com/matches/{matchid}/chat)\'s free replay parsing.\n\n'
chatHeader = 'Player | Time | Message\n' \
':-- | :-- | :--\n'
chatLineTemplate = '[](/hero-{heroName}) {playerName} | {time} | {message}\n'
intro = introTemplate.format(matchid=matchOdotaJson['match_id'])
print(chatLog)
chatResult = chatHeader
for i in range(0, len(chatLog)):
time = timeconverter.durationTimeConverter(chatLog[i]['time'])
heroName = heroDictionary[players[chatLog[i]['player_slot']]['hero_id']]
if players[chatLog[i]['player_slot']]['personaname'] is not None:
playerName = '%.9s' %players[chatLog[i]['player_slot']]['personaname']
else:
playerName = ''
if chatLog[i]['type'] == 'chat':
message = chatLog[i]['key']
elif chatLog[i]['type'] == 'chatwheel':
chatwheelFile = 'data/chatwheel.json'
chatwheel = dataIO.load_json(chatwheelFile)
message = '▶ ' + chatwheel[chatLog[i]['key']]
chatResult += chatLineTemplate.format(heroName=heroName, playerName=playerName, time=time, message = message)
<|code_end|>
. Write the next line using the current file imports:
from converter import timeconverter
from steamapi.getheroes import heroDictionary
from dataIO import dataIO
and context from other files:
# Path: converter/timeconverter.py
# def unixTimeConverter(timeVar):
# def durationTimeConverter(timeVar):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
, which may include functions, classes, or code. Output only the next line. | return (intro + chatResult + '\n---\n\n') |
Based on the snippet: <|code_start|>
def displayResult(matchJson, playerSummariesJson):
introTemplate = '####	\n#####	 ' \
'Hover to view match ID: {matchid} [DB](http://www.dotabuff.com/matches/{matchid})/' \
'[OD](https://www.opendota.com/matches/{matchid})/' \
'[STRATZ](https://stratz.com/match/{matchid})' \
'\n######	\n\n' \
'[**{teamwinner} wins {winnerkills}-{loserkills} @ {time}**](#lumbdi "{additionalinformation}")\n\n'
tableTemplate = 'Lvl | Hero | Player| K/D/A | LH/D | XPM | GPM | HD | HH | TD\n' \
':--|:--:|:--|:--|:--|:--|:--|:--|:--|:--\n'
tableLineTemplate = '{level}|{hero}|{account}|{kda}|{lhd}|{xpm}|{gpm}|{hd}|{hh}|{td}\n'
dividerTemplate = '{level}||↑Radiant↑ ↓Dire↓ |{kda}|{lhd}|{xpm}|{gpm}|{hd}|{hh}|{td}\n'
outtro = '\n\n---\n\n'
#print(introTemplate + tableTemplate + tableLineTemplate + outtroTemplate)
matchID = matchJson['result']["match_id"]
<|code_end|>
, predict the immediate next line with the help of imports:
from steamapi.getproplayerlist import proPlayerDictionary
from steamapi.getheroes import heroDictionary
from steamapi.getgameitems import itemDictionary
from converter import timeconverter, playerconverter
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getgameitems.py
# def requestGetGameItems():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: converter/timeconverter.py
# def unixTimeConverter(timeVar):
# def durationTimeConverter(timeVar):
#
# Path: converter/playerconverter.py
# def playerConverter(playerID, playerSummariesJson=None, includeMMR=False):
# def getMMR(playerID):
# MMR = None
# MMR = getMMR(playerID)
# MMR = {}
. Output only the next line. | if matchJson['result']['radiant_win']: |
Using the snippet: <|code_start|>
def displayResult(matchJson, playerSummariesJson):
introTemplate = '####	\n#####	 ' \
'Hover to view match ID: {matchid} [DB](http://www.dotabuff.com/matches/{matchid})/' \
'[OD](https://www.opendota.com/matches/{matchid})/' \
<|code_end|>
, determine the next line of code. You have imports:
from steamapi.getproplayerlist import proPlayerDictionary
from steamapi.getheroes import heroDictionary
from steamapi.getgameitems import itemDictionary
from converter import timeconverter, playerconverter
and context (class names, function names, or code) available:
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getgameitems.py
# def requestGetGameItems():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: converter/timeconverter.py
# def unixTimeConverter(timeVar):
# def durationTimeConverter(timeVar):
#
# Path: converter/playerconverter.py
# def playerConverter(playerID, playerSummariesJson=None, includeMMR=False):
# def getMMR(playerID):
# MMR = None
# MMR = getMMR(playerID)
# MMR = {}
. Output only the next line. | '[STRATZ](https://stratz.com/match/{matchid})' \ |
Based on the snippet: <|code_start|>#message = True
heroDictionary = {}
heroDictionaryDotabuff = {}
def requestGetHeroes():
if message: print('[getheroes] request get heroes...')
URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
response = requests.get(URL)
response.connection.close()
response = response.json()
global heroDictionary
global heroDictionaryDotabuff
for heroID in response['result']['heroes']:
# print str(heroID['id']) + " " + heroID['name']
<|code_end|>
, predict the immediate next line with the help of imports:
import requests
from steamapi.steamapikey import SteamAPIKey
from reddit.botinfo import message
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/steamapikey.py
#
# Path: reddit/botinfo.py
. Output only the next line. | heroID['localized_name'] = heroID['localized_name'].lower().replace(" ", "-").replace("'", "").replace("_", "") |
Given the following code snippet before the placeholder: <|code_start|>#message = True
proPlayerDictionary = {}
playerOnLeaderboard = {}
def requestGetProPlayerList():
keyValues = ['locked_until', 'team_id', 'sponsor', 'is_locked', 'fantasy_role', 'team_tag',
'name', 'country_code', 'is_pro', 'team_name']
URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
response = requests.get(URL)
response.connection.close()
response = response.json()
global proPlayerDictionary
for player in response['player_infos']:
<|code_end|>
, predict the next line using imports from the current file:
import requests
from steamapi.steamapikey import SteamAPIKey
and context including class names, function names, and sometimes code from other files:
# Path: steamapi/steamapikey.py
. Output only the next line. | proPlayerDictionary[player['account_id']] = {} |
Given snippet: <|code_start|>#message = True
itemDictionary = {}
def requestGetGameItems():
if message: print('[getgameitems] request get items...')
URL = "https://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key=" + SteamAPIKey + "&language=en_us"
response = requests.get(URL)
response.connection.close()
response = response.json()
global heroDictionary
for itemID in response['result']['items']:
# print str(heroID['id']) + " " + heroID['name']
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import requests
from steamapi.steamapikey import SteamAPIKey
from reddit.botinfo import message
and context:
# Path: steamapi/steamapikey.py
#
# Path: reddit/botinfo.py
which might include code, classes, or functions. Output only the next line. | itemID['localized_name'] = itemID['localized_name'].lower().replace(" ", "").replace("-", "").replace("_", "").replace("'", "") |
Given snippet: <|code_start|>#message = True
itemDictionary = {}
def requestGetGameItems():
if message: print('[getgameitems] request get items...')
URL = "https://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key=" + SteamAPIKey + "&language=en_us"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import requests
from steamapi.steamapikey import SteamAPIKey
from reddit.botinfo import message
and context:
# Path: steamapi/steamapikey.py
#
# Path: reddit/botinfo.py
which might include code, classes, or functions. Output only the next line. | response = requests.get(URL) |
Predict the next line after this snippet: <|code_start|> queryParameters = m.group('queryParameters')
website = m.group('website')
heroID = None
gameModeID = None
if queryParameters != None and (website == 'opendota.com' or website == 'yasp.co'):
try:
n = re.search(patternHero, queryParameters, re.I)
if n != None:
heroID = n.group('heroID')
n = re.search(patternGameMode, queryParameters, re.I)
if n != None:
gameModeID = n.group('gameModeID')
except:
print('[workeranalyzecontent] OpenDota hero filtering crashed, needs investigation')
if queryParameters != None and (website == 'dotabuff.com'):
try:
n = re.search(patternHero, queryParameters, re.I)
if n != None:
heroID = n.group('heroID')
reverseDBHeroDictionary = {v:k for k,v in getheroes.heroDictionaryDotabuff.items()}
heroID = reverseDBHeroDictionary[heroID]
except:
print('[workeranalyzecontent] Dotabuff hero filtering crashed, needs investigation')
try:
partialReply += str(averagelastxgames.averageLastXGames(int(playerID), amount=100, detailedAnalysis=False, heroID=heroID, gameModeID=gameModeID, getMMR=True))
commandCounter += 1
analyzedPlayers.append(playerID)
except:
print('[workeranalyzecontent] Could not analyze player.')
<|code_end|>
using the current file's imports:
from botcommands import averagelastxgames, match, odotachat
from steamapi import getheroes
from reddit.botinfo import message,botName
from steamapi import getheroes
import time
import re
and any relevant context from other files:
# Path: botcommands/averagelastxgames.py
# def averageLastXGames(playerID, amount=100, detailedAnalysis=False, heroID=None, gameModeID=None, tournamentGamesOnly=False, getMMR=True):
# def analyzeMatches(playerID, detailedMatches):
# def addStatsTogether(storage, player, addTogether):
# def testBit(int_type, offset):
# def amountOfTowersDestroyed(tower_status):
# def amountOfBarracksDestroyed(barracks_status):
#
# Path: botcommands/match.py
# def match(matchID):
#
#
# t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
# t.start()
#
# matchJson = getmatchdetails.getMatchDetails(matchID)
#
# accountIDs = []
# for player in matchJson['result']['players']:
# accountIDs.append(player['account_id'])
#
# playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
#
# reply = drmatch.displayResult(matchJson, playerSummariesJson)
#
# return reply
#
# Path: botcommands/odotachat.py
# def odotaChat(matchID):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: reddit/botinfo.py
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
. Output only the next line. | else: |
Given snippet: <|code_start|> heroID = n.group('heroID')
reverseDBHeroDictionary = {v:k for k,v in getheroes.heroDictionaryDotabuff.items()}
heroID = reverseDBHeroDictionary[heroID]
except:
print('[workeranalyzecontent] Dotabuff hero filtering crashed, needs investigation')
try:
partialReply += str(averagelastxgames.averageLastXGames(int(playerID), amount=100, detailedAnalysis=False, heroID=heroID, gameModeID=gameModeID, getMMR=True))
commandCounter += 1
analyzedPlayers.append(playerID)
except:
print('[workeranalyzecontent] Could not analyze player.')
else:
break
except:
print('[workeranalyzecontent] failed to average last x games on')
try:
pattern = '(yasp\.co|dotabuff\.com|opendota\.com)\/matches\/(?P<matchID>\d{1,10})(\/(?P<tab>\w+))?'
for m in re.finditer(pattern, text, re.I):
if commandCounter < 3: #Reddit has character limit, only have room for 3 match analysis
matchID = m.group('matchID')
tab = m.group('tab')
if matchID not in analyzedMatches:
if tab == 'chat':
partialReply += str(odotachat.odotaChat(matchID))
else:
partialReply += str(match.match(matchID))
commandCounter += 1
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from botcommands import averagelastxgames, match, odotachat
from steamapi import getheroes
from reddit.botinfo import message,botName
from steamapi import getheroes
import time
import re
and context:
# Path: botcommands/averagelastxgames.py
# def averageLastXGames(playerID, amount=100, detailedAnalysis=False, heroID=None, gameModeID=None, tournamentGamesOnly=False, getMMR=True):
# def analyzeMatches(playerID, detailedMatches):
# def addStatsTogether(storage, player, addTogether):
# def testBit(int_type, offset):
# def amountOfTowersDestroyed(tower_status):
# def amountOfBarracksDestroyed(barracks_status):
#
# Path: botcommands/match.py
# def match(matchID):
#
#
# t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
# t.start()
#
# matchJson = getmatchdetails.getMatchDetails(matchID)
#
# accountIDs = []
# for player in matchJson['result']['players']:
# accountIDs.append(player['account_id'])
#
# playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
#
# reply = drmatch.displayResult(matchJson, playerSummariesJson)
#
# return reply
#
# Path: botcommands/odotachat.py
# def odotaChat(matchID):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: reddit/botinfo.py
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
which might include code, classes, or functions. Output only the next line. | analyzedMatches.append(matchID) |
Given snippet: <|code_start|> partialReply += str(match.match(matchID))
commandCounter += 1
analyzedMatches.append(matchID)
else:
break
except:
print('[workeranalyzecontent] failed to get match')
replyIntro = ''
replyEnd = '[^^source](http://github.com/NNTin/Reply-Dota-2-Reddit) ^^on ^^github, [^^message](https://www.reddit.com/message/compose/?to=lumbdi) ^^the ^^owner'
replyEnd = '[^^source](http://github.com/NNTin/Reply-Dota-2-Reddit) ^^on ^^github, [^^message](https://www.reddit.com/message/compose/?to=lumbdi) ^^the ^^owner ^^on [^^Discord](https://discord.gg/Dkg79tc)'
reply = replyIntro + partialReply + replyEnd
if(commandCounter != 0):
i = 0
while i < 20:
i += 1
try:
if isPost:
my_new_comment = post.reply(reply)
print('reply success')
j = 0
while j < 20:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from botcommands import averagelastxgames, match, odotachat
from steamapi import getheroes
from reddit.botinfo import message,botName
from steamapi import getheroes
import time
import re
and context:
# Path: botcommands/averagelastxgames.py
# def averageLastXGames(playerID, amount=100, detailedAnalysis=False, heroID=None, gameModeID=None, tournamentGamesOnly=False, getMMR=True):
# def analyzeMatches(playerID, detailedMatches):
# def addStatsTogether(storage, player, addTogether):
# def testBit(int_type, offset):
# def amountOfTowersDestroyed(tower_status):
# def amountOfBarracksDestroyed(barracks_status):
#
# Path: botcommands/match.py
# def match(matchID):
#
#
# t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
# t.start()
#
# matchJson = getmatchdetails.getMatchDetails(matchID)
#
# accountIDs = []
# for player in matchJson['result']['players']:
# accountIDs.append(player['account_id'])
#
# playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
#
# reply = drmatch.displayResult(matchJson, playerSummariesJson)
#
# return reply
#
# Path: botcommands/odotachat.py
# def odotaChat(matchID):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: reddit/botinfo.py
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
which might include code, classes, or functions. Output only the next line. | j += 1 |
Given the code snippet: <|code_start|> partialReply += str(odotachat.odotaChat(matchID))
else:
partialReply += str(match.match(matchID))
commandCounter += 1
analyzedMatches.append(matchID)
else:
break
except:
print('[workeranalyzecontent] failed to get match')
replyIntro = ''
replyEnd = '[^^source](http://github.com/NNTin/Reply-Dota-2-Reddit) ^^on ^^github, [^^message](https://www.reddit.com/message/compose/?to=lumbdi) ^^the ^^owner'
replyEnd = '[^^source](http://github.com/NNTin/Reply-Dota-2-Reddit) ^^on ^^github, [^^message](https://www.reddit.com/message/compose/?to=lumbdi) ^^the ^^owner ^^on [^^Discord](https://discord.gg/Dkg79tc)'
reply = replyIntro + partialReply + replyEnd
if(commandCounter != 0):
i = 0
while i < 20:
i += 1
try:
if isPost:
my_new_comment = post.reply(reply)
print('reply success')
<|code_end|>
, generate the next line using the imports in this file:
from botcommands import averagelastxgames, match, odotachat
from steamapi import getheroes
from reddit.botinfo import message,botName
from steamapi import getheroes
import time
import re
and context (functions, classes, or occasionally code) from other files:
# Path: botcommands/averagelastxgames.py
# def averageLastXGames(playerID, amount=100, detailedAnalysis=False, heroID=None, gameModeID=None, tournamentGamesOnly=False, getMMR=True):
# def analyzeMatches(playerID, detailedMatches):
# def addStatsTogether(storage, player, addTogether):
# def testBit(int_type, offset):
# def amountOfTowersDestroyed(tower_status):
# def amountOfBarracksDestroyed(barracks_status):
#
# Path: botcommands/match.py
# def match(matchID):
#
#
# t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
# t.start()
#
# matchJson = getmatchdetails.getMatchDetails(matchID)
#
# accountIDs = []
# for player in matchJson['result']['players']:
# accountIDs.append(player['account_id'])
#
# playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
#
# reply = drmatch.displayResult(matchJson, playerSummariesJson)
#
# return reply
#
# Path: botcommands/odotachat.py
# def odotaChat(matchID):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: reddit/botinfo.py
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
. Output only the next line. | j = 0 |
Given the code snippet: <|code_start|> if isPost:
text = post.body.lower()
else:
print('this is a thread')
partialReply = ''
analyzedMatches = []
analyzedPlayers = []
commandCounter = 0
try:
pattern = '(?P<website>yasp\.co|dotabuff\.com|opendota\.com)\/players\/(?P<playerID>\d{1,9})((\/(?P<tab>\w+))?((?P<queryParameters>\S+))?)?'
patternHero = '[?&](hero|hero_id)=(?P<heroID>[\w\-]+)'
#TODO: check if gameMode filtering is working, as of 2016-11-01 query parameter broken in SteamAPI
patternGameMode = '[?&]game_mode=(?P<gameModeID>\w+)'
for m in re.finditer(pattern, text, re.I):
if commandCounter < 3: #Reddit has character limit, only have room for 3 player analysis
playerID = m.group('playerID')
if playerID not in analyzedPlayers:
queryParameters = m.group('queryParameters')
website = m.group('website')
heroID = None
gameModeID = None
if queryParameters != None and (website == 'opendota.com' or website == 'yasp.co'):
try:
n = re.search(patternHero, queryParameters, re.I)
if n != None:
<|code_end|>
, generate the next line using the imports in this file:
from botcommands import averagelastxgames, match, odotachat
from steamapi import getheroes
from reddit.botinfo import message,botName
from steamapi import getheroes
import time
import re
and context (functions, classes, or occasionally code) from other files:
# Path: botcommands/averagelastxgames.py
# def averageLastXGames(playerID, amount=100, detailedAnalysis=False, heroID=None, gameModeID=None, tournamentGamesOnly=False, getMMR=True):
# def analyzeMatches(playerID, detailedMatches):
# def addStatsTogether(storage, player, addTogether):
# def testBit(int_type, offset):
# def amountOfTowersDestroyed(tower_status):
# def amountOfBarracksDestroyed(barracks_status):
#
# Path: botcommands/match.py
# def match(matchID):
#
#
# t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
# t.start()
#
# matchJson = getmatchdetails.getMatchDetails(matchID)
#
# accountIDs = []
# for player in matchJson['result']['players']:
# accountIDs.append(player['account_id'])
#
# playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
#
# reply = drmatch.displayResult(matchJson, playerSummariesJson)
#
# return reply
#
# Path: botcommands/odotachat.py
# def odotaChat(matchID):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: reddit/botinfo.py
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
. Output only the next line. | heroID = n.group('heroID') |
Continue the code snippet: <|code_start|> pattern = '(?P<website>yasp\.co|dotabuff\.com|opendota\.com)\/players\/(?P<playerID>\d{1,9})((\/(?P<tab>\w+))?((?P<queryParameters>\S+))?)?'
patternHero = '[?&](hero|hero_id)=(?P<heroID>[\w\-]+)'
#TODO: check if gameMode filtering is working, as of 2016-11-01 query parameter broken in SteamAPI
patternGameMode = '[?&]game_mode=(?P<gameModeID>\w+)'
for m in re.finditer(pattern, text, re.I):
if commandCounter < 3: #Reddit has character limit, only have room for 3 player analysis
playerID = m.group('playerID')
if playerID not in analyzedPlayers:
queryParameters = m.group('queryParameters')
website = m.group('website')
heroID = None
gameModeID = None
if queryParameters != None and (website == 'opendota.com' or website == 'yasp.co'):
try:
n = re.search(patternHero, queryParameters, re.I)
if n != None:
heroID = n.group('heroID')
n = re.search(patternGameMode, queryParameters, re.I)
if n != None:
gameModeID = n.group('gameModeID')
except:
print('[workeranalyzecontent] OpenDota hero filtering crashed, needs investigation')
if queryParameters != None and (website == 'dotabuff.com'):
try:
n = re.search(patternHero, queryParameters, re.I)
if n != None:
heroID = n.group('heroID')
reverseDBHeroDictionary = {v:k for k,v in getheroes.heroDictionaryDotabuff.items()}
heroID = reverseDBHeroDictionary[heroID]
<|code_end|>
. Use current file imports:
from botcommands import averagelastxgames, match, odotachat
from steamapi import getheroes
from reddit.botinfo import message,botName
from steamapi import getheroes
import time
import re
and context (classes, functions, or code) from other files:
# Path: botcommands/averagelastxgames.py
# def averageLastXGames(playerID, amount=100, detailedAnalysis=False, heroID=None, gameModeID=None, tournamentGamesOnly=False, getMMR=True):
# def analyzeMatches(playerID, detailedMatches):
# def addStatsTogether(storage, player, addTogether):
# def testBit(int_type, offset):
# def amountOfTowersDestroyed(tower_status):
# def amountOfBarracksDestroyed(barracks_status):
#
# Path: botcommands/match.py
# def match(matchID):
#
#
# t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
# t.start()
#
# matchJson = getmatchdetails.getMatchDetails(matchID)
#
# accountIDs = []
# for player in matchJson['result']['players']:
# accountIDs.append(player['account_id'])
#
# playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
#
# reply = drmatch.displayResult(matchJson, playerSummariesJson)
#
# return reply
#
# Path: botcommands/odotachat.py
# def odotaChat(matchID):
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: reddit/botinfo.py
#
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
. Output only the next line. | except: |
Based on the snippet: <|code_start|>
def match(matchID):
t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
t.start()
matchJson = getmatchdetails.getMatchDetails(matchID)
<|code_end|>
, predict the immediate next line with the help of imports:
from steamapi import getmatchdetails, getplayersummaries
from displayreddit import drmatch
from odotaapi import requestparsematch
import threading
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/getmatchdetails.py
# def getMatchDetails(matchID, q=None):
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?key=" + SteamAPIKey + "&match_id=" + str(matchID)
#
# Path: steamapi/getplayersummaries.py
# def getPlayerSummaries(accountIDs):
# URL = "https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key=" + SteamAPIKey + "&steamids=" + accountIDString
#
# Path: displayreddit/drmatch.py
# def displayResult(matchJson, playerSummariesJson):
#
# Path: odotaapi/requestparsematch.py
# def requestParseMatch(matchID, holdUntilParsed=False):
# def isParsed(jobId):
. Output only the next line. | accountIDs = [] |
Predict the next line after this snippet: <|code_start|>
def match(matchID):
t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
t.start()
matchJson = getmatchdetails.getMatchDetails(matchID)
accountIDs = []
for player in matchJson['result']['players']:
<|code_end|>
using the current file's imports:
from steamapi import getmatchdetails, getplayersummaries
from displayreddit import drmatch
from odotaapi import requestparsematch
import threading
and any relevant context from other files:
# Path: steamapi/getmatchdetails.py
# def getMatchDetails(matchID, q=None):
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?key=" + SteamAPIKey + "&match_id=" + str(matchID)
#
# Path: steamapi/getplayersummaries.py
# def getPlayerSummaries(accountIDs):
# URL = "https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key=" + SteamAPIKey + "&steamids=" + accountIDString
#
# Path: displayreddit/drmatch.py
# def displayResult(matchJson, playerSummariesJson):
#
# Path: odotaapi/requestparsematch.py
# def requestParseMatch(matchID, holdUntilParsed=False):
# def isParsed(jobId):
. Output only the next line. | accountIDs.append(player['account_id']) |
Given the code snippet: <|code_start|>
def match(matchID):
t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
t.start()
matchJson = getmatchdetails.getMatchDetails(matchID)
<|code_end|>
, generate the next line using the imports in this file:
from steamapi import getmatchdetails, getplayersummaries
from displayreddit import drmatch
from odotaapi import requestparsematch
import threading
and context (functions, classes, or occasionally code) from other files:
# Path: steamapi/getmatchdetails.py
# def getMatchDetails(matchID, q=None):
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?key=" + SteamAPIKey + "&match_id=" + str(matchID)
#
# Path: steamapi/getplayersummaries.py
# def getPlayerSummaries(accountIDs):
# URL = "https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key=" + SteamAPIKey + "&steamids=" + accountIDString
#
# Path: displayreddit/drmatch.py
# def displayResult(matchJson, playerSummariesJson):
#
# Path: odotaapi/requestparsematch.py
# def requestParseMatch(matchID, holdUntilParsed=False):
# def isParsed(jobId):
. Output only the next line. | accountIDs = [] |
Given snippet: <|code_start|>
def match(matchID):
t = threading.Thread(target=requestparsematch.requestParseMatch, args = (matchID,))
t.start()
matchJson = getmatchdetails.getMatchDetails(matchID)
accountIDs = []
for player in matchJson['result']['players']:
accountIDs.append(player['account_id'])
playerSummariesJson = getplayersummaries.getPlayerSummaries(accountIDs)
reply = drmatch.displayResult(matchJson, playerSummariesJson)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from steamapi import getmatchdetails, getplayersummaries
from displayreddit import drmatch
from odotaapi import requestparsematch
import threading
and context:
# Path: steamapi/getmatchdetails.py
# def getMatchDetails(matchID, q=None):
# URL = "https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?key=" + SteamAPIKey + "&match_id=" + str(matchID)
#
# Path: steamapi/getplayersummaries.py
# def getPlayerSummaries(accountIDs):
# URL = "https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key=" + SteamAPIKey + "&steamids=" + accountIDString
#
# Path: displayreddit/drmatch.py
# def displayResult(matchJson, playerSummariesJson):
#
# Path: odotaapi/requestparsematch.py
# def requestParseMatch(matchID, holdUntilParsed=False):
# def isParsed(jobId):
which might include code, classes, or functions. Output only the next line. | return reply |
Based on the snippet: <|code_start|> attempt = 0
while response == {}:
URL = 'https://api.steampowered.com/IDOTA2MatchStats_570/GetRealtimeStats/v1?key=' + SteamAPIKey + '&server_steam_id=' + str(serverSteamID)
print(URL)
response = requests.get(URL)
response.connection.close()
response = response.json()
if response == {}:
attempt += 1
if (attempt == 5):
print('Tried %s times, cancelling API request. (Skipped counter increases)')
break
print('Failed API request, retrying in %s seconds' %(attempt * 2))
time.sleep(attempt * 2)
continue
else:
break
# careful Steam API sometimes returns empty JSONs!
# handle this error!
except:
response = {}
# future, retry until it works!
<|code_end|>
, predict the immediate next line with the help of imports:
import requests, time
from steamapi.steamapikey import SteamAPIKey
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/steamapikey.py
. Output only the next line. | return response |
Given snippet: <|code_start|>
def odotaChat(matchID):
requestparsematch.requestParseMatch(matchID, holdUntilParsed=True)
matchJson = getodmatchdetails.getODMatchDetails(matchID)
if matchJson['chat'] is not None:
reply = drodotachat.displayResult(matchJson)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from odotaapi import getodmatchdetails, requestparsematch
from displayreddit import drodotachat
and context:
# Path: odotaapi/getodmatchdetails.py
# def getODMatchDetails(matchID, q=None):
# URL = 'https://api.opendota.com/api/matches/' + str(matchID)
#
# Path: odotaapi/requestparsematch.py
# def requestParseMatch(matchID, holdUntilParsed=False):
# def isParsed(jobId):
#
# Path: displayreddit/drodotachat.py
# def displayResult(matchOdotaJson):
which might include code, classes, or functions. Output only the next line. | return reply |
Given the code snippet: <|code_start|>
def odotaChat(matchID):
requestparsematch.requestParseMatch(matchID, holdUntilParsed=True)
matchJson = getodmatchdetails.getODMatchDetails(matchID)
if matchJson['chat'] is not None:
reply = drodotachat.displayResult(matchJson)
<|code_end|>
, generate the next line using the imports in this file:
from odotaapi import getodmatchdetails, requestparsematch
from displayreddit import drodotachat
and context (functions, classes, or occasionally code) from other files:
# Path: odotaapi/getodmatchdetails.py
# def getODMatchDetails(matchID, q=None):
# URL = 'https://api.opendota.com/api/matches/' + str(matchID)
#
# Path: odotaapi/requestparsematch.py
# def requestParseMatch(matchID, holdUntilParsed=False):
# def isParsed(jobId):
#
# Path: displayreddit/drodotachat.py
# def displayResult(matchOdotaJson):
. Output only the next line. | return reply |
Based on the snippet: <|code_start|>
#from steamapi.getheroes import heroDictionary, requestGetHeroes
def printTableLine(name, source, divider):
keyValues = ['kills', 'deaths', 'assists', 'last_hits', 'denies', 'gold_per_min', 'xp_per_min', 'hero_damage', 'tower_damage', 'hero_healing']
#'level', 'gold', 'gold_spent',
tableLine = '**%s**' %name
for key in keyValues:
tableLine = tableLine + ' | ' + str(round((source[key]/divider), 2))
tableLine = tableLine + ' | ' + str(source.get('leaver_status', 0))
#TODO provide match ids of leaver games (fuck this shit)
return tableLine + '\n'
def printHeroLine(name, playedHeroes):
sortedPlayedHeroes = sorted(playedHeroes.keys(), key=lambda x:playedHeroes[x].get('count', 0), reverse=1)
i = 0
resultHeroes = str(name) + ' | '
for heroID in sortedPlayedHeroes:
if (playedHeroes[heroID]['count'] == 0):
break
heroValues = (playedHeroes[heroID]['count'],heroDictionary[heroID],playedHeroes[heroID]['wins'],playedHeroes[heroID]['count'] - playedHeroes[heroID]['wins'], round(100 * playedHeroes[heroID]['wins'] / playedHeroes[heroID]['count'],2))
resultHeroes = resultHeroes + '%sx[](/hero-%s "%s wins, %s losses (%s' %heroValues + '%' + ')") '
i = i + 1
if (i == 8):
<|code_end|>
, predict the immediate next line with the help of imports:
from steamapi.getheroes import heroDictionary
from steamapi.getproplayerlist import proPlayerDictionary
from misc.idnamedict import gameMode
from converter import playerconverter
and context (classes, functions, sometimes code) from other files:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: misc/idnamedict.py
# def gameMode(gameModeID):
# return {
# -1: 'skipped',
# 0 : 'Unknown',
# 1 : 'All Pick',
# 2 : 'Captains Mode',
# 3 : 'Random Draft',
# 4 : 'Single Draft',
# 5 : 'All Random',
# 6 : '?? INTRO/DEATH ??',
# 7 : 'The Diretide',
# 8 : 'Reverse Captains Mode',
# 9 : 'Greeviling',
# 10 : 'Tutorial',
# 11 : 'Mid Only',
# 12 : 'Least Played',
# 13 : 'New Player Pool',
# 14 : 'Compendium Matchmaking',
# 15 : 'Custom',
# 16 : 'Captains Draft',
# 17 : 'Balanced Draft',
# 18 : 'Ability Draft',
# 19 : '?? Event ??',
# 20 : 'All Random Death Match',
# 21 : '1vs1 Solo Mid',
# 22 : 'Ranked All Pick',
# }.get(gameModeID, 'Unkown')
#
# Path: converter/playerconverter.py
# def playerConverter(playerID, playerSummariesJson=None, includeMMR=False):
# def getMMR(playerID):
# MMR = None
# MMR = getMMR(playerID)
# MMR = {}
. Output only the next line. | break |
Using the snippet: <|code_start|>
playedModes[gameMode(i)] = len(analysis['general'][gameMode(i)])
sortedModes = sorted(playedModes.keys(), key=lambda x:playedModes[x], reverse=1)
modeString = ''
for mode in sortedModes:
modeString = modeString + ', %s %s' %(playedModes[mode], mode)
skippedMessage = ''
if(len(analysis['general']['skipped']) != 0):
skippedMessage = ' (%s skipped)' %len(analysis['general']['skipped'])
intro = '####	\n#####	 Hover to view player analysis %s\n######	\n\n' %youDescription
if MMR != None:
MMRText = ''
for key in MMR.keys():
MMRText = MMRText + '%s MMR **%s**, ' %(key, MMR[key])
MMRText = MMRText[:-2]
intro = intro + 'Player MMR (powered by OpenDota): %s. \n' %MMRText
intro = intro + 'Analyzed a total of %s matches%s. (%s wins' %(len(detailedMatches), skippedMessage, len(analysis['general']['wins']))
intro = intro + modeString
intro = intro + ') '
intro = intro + ' \n[Hover over links to display more information.](#a "%s")\n\n' %averageInformation
resultTable = 'average | kills | deaths | assists | last hits | denies | gpm | xpm | hero damage | tower damage | hero healing | leaver count (total)\n'
<|code_end|>
, determine the next line of code. You have imports:
from steamapi.getheroes import heroDictionary
from steamapi.getproplayerlist import proPlayerDictionary
from misc.idnamedict import gameMode
from converter import playerconverter
and context (class names, function names, or code) available:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: misc/idnamedict.py
# def gameMode(gameModeID):
# return {
# -1: 'skipped',
# 0 : 'Unknown',
# 1 : 'All Pick',
# 2 : 'Captains Mode',
# 3 : 'Random Draft',
# 4 : 'Single Draft',
# 5 : 'All Random',
# 6 : '?? INTRO/DEATH ??',
# 7 : 'The Diretide',
# 8 : 'Reverse Captains Mode',
# 9 : 'Greeviling',
# 10 : 'Tutorial',
# 11 : 'Mid Only',
# 12 : 'Least Played',
# 13 : 'New Player Pool',
# 14 : 'Compendium Matchmaking',
# 15 : 'Custom',
# 16 : 'Captains Draft',
# 17 : 'Balanced Draft',
# 18 : 'Ability Draft',
# 19 : '?? Event ??',
# 20 : 'All Random Death Match',
# 21 : '1vs1 Solo Mid',
# 22 : 'Ranked All Pick',
# }.get(gameModeID, 'Unkown')
#
# Path: converter/playerconverter.py
# def playerConverter(playerID, playerSummariesJson=None, includeMMR=False):
# def getMMR(playerID):
# MMR = None
# MMR = getMMR(playerID)
# MMR = {}
. Output only the next line. | resultTable = resultTable + '-------|-----|------|-------|---------|------|---|---|-----------|------------|------------|--------------------\n' |
Next line prediction: <|code_start|>
#from steamapi.getheroes import heroDictionary, requestGetHeroes
def printTableLine(name, source, divider):
keyValues = ['kills', 'deaths', 'assists', 'last_hits', 'denies', 'gold_per_min', 'xp_per_min', 'hero_damage', 'tower_damage', 'hero_healing']
#'level', 'gold', 'gold_spent',
tableLine = '**%s**' %name
for key in keyValues:
tableLine = tableLine + ' | ' + str(round((source[key]/divider), 2))
tableLine = tableLine + ' | ' + str(source.get('leaver_status', 0))
#TODO provide match ids of leaver games (fuck this shit)
return tableLine + '\n'
<|code_end|>
. Use current file imports:
(from steamapi.getheroes import heroDictionary
from steamapi.getproplayerlist import proPlayerDictionary
from misc.idnamedict import gameMode
from converter import playerconverter)
and context including class names, function names, or small code snippets from other files:
# Path: steamapi/getheroes.py
# def requestGetHeroes():
# URL = "https://api.steampowered.com/IEconDOTA2_570/GetHeroes/v0001/?key=" + SteamAPIKey + "&language=en_us"
#
# Path: steamapi/getproplayerlist.py
# def requestGetProPlayerList():
# URL = "https://api.steampowered.com/IDOTA2Fantasy_570/GetProPlayerList/v1?key=" + SteamAPIKey
#
# Path: misc/idnamedict.py
# def gameMode(gameModeID):
# return {
# -1: 'skipped',
# 0 : 'Unknown',
# 1 : 'All Pick',
# 2 : 'Captains Mode',
# 3 : 'Random Draft',
# 4 : 'Single Draft',
# 5 : 'All Random',
# 6 : '?? INTRO/DEATH ??',
# 7 : 'The Diretide',
# 8 : 'Reverse Captains Mode',
# 9 : 'Greeviling',
# 10 : 'Tutorial',
# 11 : 'Mid Only',
# 12 : 'Least Played',
# 13 : 'New Player Pool',
# 14 : 'Compendium Matchmaking',
# 15 : 'Custom',
# 16 : 'Captains Draft',
# 17 : 'Balanced Draft',
# 18 : 'Ability Draft',
# 19 : '?? Event ??',
# 20 : 'All Random Death Match',
# 21 : '1vs1 Solo Mid',
# 22 : 'Ranked All Pick',
# }.get(gameModeID, 'Unkown')
#
# Path: converter/playerconverter.py
# def playerConverter(playerID, playerSummariesJson=None, includeMMR=False):
# def getMMR(playerID):
# MMR = None
# MMR = getMMR(playerID)
# MMR = {}
. Output only the next line. | def printHeroLine(name, playedHeroes): |
Predict the next line for this snippet: <|code_start|>
def getPlayerSummaries(accountIDs):
accountIDString = ''
for accountID in accountIDs:
accountIDString += str(76561197960265728 + accountID) + ','
<|code_end|>
with the help of current file imports:
import requests
import time
from steamapi.steamapikey import SteamAPIKey
and context from other files:
# Path: steamapi/steamapikey.py
, which may contain function names, class names, or code. Output only the next line. | accountIDString = accountIDString[:-1] |
Continue the code snippet: <|code_start|> """Insert multiple blocks. If a block already exists, the data is
appended. blocks must be a list of tuples where each tuple consists
of (namespace, offset, key, data)"""
start = 0
bulk_insert = self.bulk_insert
blocks_len = len(blocks)
select = 'SELECT ?,?,?,"",0'
query = 'INSERT OR IGNORE INTO gauged_data (namespace, offset, ' \
'`key`, data, flags) '
execute = self.cursor.execute
while start < blocks_len:
rows = blocks[start:start+bulk_insert]
params = []
for namespace, offset, key, _, _ in rows:
params.extend((namespace, offset, key))
insert = (select + ' UNION ') * (len(rows) - 1) + select
execute(query + insert, params)
start += bulk_insert
for namespace, offset, key, data, flags in blocks:
execute('UPDATE gauged_data SET data = CAST(data || ? AS BLOB),'
'flags = ? WHERE namespace = ? AND offset = ? AND '
'`key` = ?', (data, flags, namespace, offset, key))
def block_offset_bounds(self, namespace):
"""Get the minimum and maximum block offset for the specified
namespace"""
cursor = self.cursor
cursor.execute('SELECT MIN(offset), MAX(offset) '
'FROM gauged_statistics '
'WHERE namespace = ?', (namespace,))
<|code_end|>
. Use current file imports:
from collections import OrderedDict
from .interface import DriverInterface
and context (classes, functions, or code) from other files:
# Path: gauged/drivers/interface.py
# class DriverInterface(object):
#
# MAX_KEY = 1024
#
# def create_schema(self):
# raise NotImplementedError
#
# def clear_schema(self):
# raise NotImplementedError
#
# def drop_schema(self):
# raise NotImplementedError
#
# def prepare_migrations(self):
# raise NotImplementedError
#
# def keys(self, namespace, prefix=None, limit=None, offset=None):
# raise NotImplementedError
#
# def lookup_ids(self, keys):
# raise NotImplementedError
#
# def get_block(self, namespace, offset, key):
# raise NotImplementedError
#
# def insert_keys(self, keys):
# raise NotImplementedError
#
# def replace_blocks(self, blocks):
# raise NotImplementedError
#
# def insert_or_append_blocks(self, blocks):
# raise NotImplementedError
#
# def commit(self):
# raise NotImplementedError
#
# def block_offset_bounds(self, namespace):
# raise NotImplementedError
#
# def set_metadata(self, metadata, replace=True):
# raise NotImplementedError
#
# def get_metadata(self, key):
# raise NotImplementedError
#
# def set_writer_position(self, name, timestamp):
# raise NotImplementedError
#
# def get_writer_position(self, name):
# raise NotImplementedError
#
# def get_namespaces(self):
# raise NotImplementedError
#
# def remove_namespace(self, namespace):
# raise NotImplementedError
#
# def clear_from(self, offset, timestamp):
# raise NotImplementedError
#
# def clear_key_after(self, key, namespace, offset=None, timestamp=None):
# raise NotImplementedError
#
# def clear_key_before(self, key, namespace, offset=None, timestamp=None):
# raise NotImplementedError
#
# def get_cache(self, namespace, query_hash, length, start, end):
# pass
#
# def add_cache(self, namespace, key, query_hash, length, cache):
# pass
#
# def remove_cache(self, namespace, key=None):
# pass
#
# def add_namespace_statistics(self, namespace, offset,
# data_points, byte_count):
# raise NotImplementedError
#
# def get_namespace_statistics(self, namespace, start_offset, end_offset):
# raise NotImplementedError
. Output only the next line. | return cursor.fetchone() |
Predict the next line for this snippet: <|code_start|>"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
class TimeSeries(object):
"""A representation of a time series with a fixed interval"""
def __init__(self, points):
"""Initialise the time series. `points` is expected to be either a list
of tuples where each tuple represents a point (timestamp, value), or a
dict where the keys are timestamps. Timestamps are expected to be in
milliseconds"""
if isinstance(points, dict):
points = points.items()
self.points = sorted(points)
<|code_end|>
with the help of current file imports:
from ..utilities import table_repr, to_datetime
and context from other files:
# Path: gauged/utilities.py
# def table_repr(columns, rows, data, padding=2):
# """Generate a table for cli output"""
# padding = ' ' * padding
# column_lengths = [len(column) for column in columns]
# for row in rows:
# for i, column in enumerate(columns):
# item = str(data[row][column])
# column_lengths[i] = max(len(item), column_lengths[i])
# max_row_length = max(len(row) for row in rows) if len(rows) else 0
# table_row = ' ' * max_row_length
# for i, column in enumerate(columns):
# table_row += padding + column.rjust(column_lengths[i])
# table_rows = [table_row]
# for row in rows:
# table_row = row.rjust(max_row_length)
# for i, column in enumerate(columns):
# item = str(data[row][column])
# table_row += padding + item.rjust(column_lengths[i])
# table_rows.append(table_row)
# return '\n'.join(table_rows)
#
# def to_datetime(milliseconds):
# """Convert a timestamp in milliseconds to a datetime"""
# return datetime.fromtimestamp(milliseconds // 1000)
, which may contain function names, class names, or code. Output only the next line. | @property |
Predict the next line after this snippet: <|code_start|>"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
class TimeSeries(object):
"""A representation of a time series with a fixed interval"""
def __init__(self, points):
"""Initialise the time series. `points` is expected to be either a list
of tuples where each tuple represents a point (timestamp, value), or a
dict where the keys are timestamps. Timestamps are expected to be in
milliseconds"""
if isinstance(points, dict):
points = points.items()
self.points = sorted(points)
@property
<|code_end|>
using the current file's imports:
from ..utilities import table_repr, to_datetime
and any relevant context from other files:
# Path: gauged/utilities.py
# def table_repr(columns, rows, data, padding=2):
# """Generate a table for cli output"""
# padding = ' ' * padding
# column_lengths = [len(column) for column in columns]
# for row in rows:
# for i, column in enumerate(columns):
# item = str(data[row][column])
# column_lengths[i] = max(len(item), column_lengths[i])
# max_row_length = max(len(row) for row in rows) if len(rows) else 0
# table_row = ' ' * max_row_length
# for i, column in enumerate(columns):
# table_row += padding + column.rjust(column_lengths[i])
# table_rows = [table_row]
# for row in rows:
# table_row = row.rjust(max_row_length)
# for i, column in enumerate(columns):
# item = str(data[row][column])
# table_row += padding + item.rjust(column_lengths[i])
# table_rows.append(table_row)
# return '\n'.join(table_rows)
#
# def to_datetime(milliseconds):
# """Convert a timestamp in milliseconds to a datetime"""
# return datetime.fromtimestamp(milliseconds // 1000)
. Output only the next line. | def timestamps(self): |
Here is a snippet: <|code_start|> 'flags = VALUES(flags)'
execute = self.cursor.execute
to_buffer = self.to_buffer
while start < blocks_len:
rows = blocks[start:start+bulk_insert]
params = []
for namespace, offset, key, data, flags in rows:
params.extend((namespace, offset, key, to_buffer(data), flags))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert + post, params)
start += bulk_insert
def block_offset_bounds(self, namespace):
"""Get the minimum and maximum block offset for the specified
namespace"""
cursor = self.cursor
cursor.execute('SELECT CONVERT(MIN(offset), UNSIGNED),'
'CONVERT(MAX(offset), UNSIGNED) '
'FROM gauged_statistics WHERE namespace = %s',
(namespace,))
return cursor.fetchone()
def set_metadata(self, metadata, replace=True):
params = [param for params in metadata.iteritems() for param in params]
query = 'REPLACE' if replace else 'INSERT IGNORE'
query += ' INTO gauged_metadata VALUES (%s,%s)'
query += ',(%s,%s)' * (len(metadata) - 1)
self.cursor.execute(query, params)
self.db.commit()
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from warnings import filterwarnings
from .interface import DriverInterface
and context from other files:
# Path: gauged/drivers/interface.py
# class DriverInterface(object):
#
# MAX_KEY = 1024
#
# def create_schema(self):
# raise NotImplementedError
#
# def clear_schema(self):
# raise NotImplementedError
#
# def drop_schema(self):
# raise NotImplementedError
#
# def prepare_migrations(self):
# raise NotImplementedError
#
# def keys(self, namespace, prefix=None, limit=None, offset=None):
# raise NotImplementedError
#
# def lookup_ids(self, keys):
# raise NotImplementedError
#
# def get_block(self, namespace, offset, key):
# raise NotImplementedError
#
# def insert_keys(self, keys):
# raise NotImplementedError
#
# def replace_blocks(self, blocks):
# raise NotImplementedError
#
# def insert_or_append_blocks(self, blocks):
# raise NotImplementedError
#
# def commit(self):
# raise NotImplementedError
#
# def block_offset_bounds(self, namespace):
# raise NotImplementedError
#
# def set_metadata(self, metadata, replace=True):
# raise NotImplementedError
#
# def get_metadata(self, key):
# raise NotImplementedError
#
# def set_writer_position(self, name, timestamp):
# raise NotImplementedError
#
# def get_writer_position(self, name):
# raise NotImplementedError
#
# def get_namespaces(self):
# raise NotImplementedError
#
# def remove_namespace(self, namespace):
# raise NotImplementedError
#
# def clear_from(self, offset, timestamp):
# raise NotImplementedError
#
# def clear_key_after(self, key, namespace, offset=None, timestamp=None):
# raise NotImplementedError
#
# def clear_key_before(self, key, namespace, offset=None, timestamp=None):
# raise NotImplementedError
#
# def get_cache(self, namespace, query_hash, length, start, end):
# pass
#
# def add_cache(self, namespace, key, query_hash, length, cache):
# pass
#
# def remove_cache(self, namespace, key=None):
# pass
#
# def add_namespace_statistics(self, namespace, offset,
# data_points, byte_count):
# raise NotImplementedError
#
# def get_namespace_statistics(self, namespace, start_offset, end_offset):
# raise NotImplementedError
, which may include functions, classes, or code. Output only the next line. | def get_metadata(self, key): |
Here is a snippet: <|code_start|> """Clear all gauged data"""
execute = self.cursor.execute
execute("""TRUNCATE gauged_data;
TRUNCATE gauged_keys RESTART IDENTITY;
TRUNCATE gauged_writer_history;
TRUNCATE gauged_cache;
TRUNCATE gauged_statistics""")
self.db.commit()
def drop_schema(self):
"""Drop all gauged tables"""
try:
self.cursor.execute("""
DROP TABLE IF EXISTS gauged_data;
DROP TABLE IF EXISTS gauged_keys;
DROP TABLE IF EXISTS gauged_writer_history;
DROP TABLE IF EXISTS gauged_cache;
DROP TABLE IF EXISTS gauged_statistics;
DROP TABLE IF EXISTS gauged_metadata""")
self.db.commit()
except self.psycopg2.InternalError: # pragma: no cover
self.db.rollback()
def prepare_migrations(self):
migrations = OrderedDict()
migrations['0.4.1'] = ''
migrations['1.0.0'] = [
'TRUNCATE gauged_cache',
'ALTER TABLE gauged_cache ADD COLUMN key bigint NOT NULL'
]
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from .interface import DriverInterface
and context from other files:
# Path: gauged/drivers/interface.py
# class DriverInterface(object):
#
# MAX_KEY = 1024
#
# def create_schema(self):
# raise NotImplementedError
#
# def clear_schema(self):
# raise NotImplementedError
#
# def drop_schema(self):
# raise NotImplementedError
#
# def prepare_migrations(self):
# raise NotImplementedError
#
# def keys(self, namespace, prefix=None, limit=None, offset=None):
# raise NotImplementedError
#
# def lookup_ids(self, keys):
# raise NotImplementedError
#
# def get_block(self, namespace, offset, key):
# raise NotImplementedError
#
# def insert_keys(self, keys):
# raise NotImplementedError
#
# def replace_blocks(self, blocks):
# raise NotImplementedError
#
# def insert_or_append_blocks(self, blocks):
# raise NotImplementedError
#
# def commit(self):
# raise NotImplementedError
#
# def block_offset_bounds(self, namespace):
# raise NotImplementedError
#
# def set_metadata(self, metadata, replace=True):
# raise NotImplementedError
#
# def get_metadata(self, key):
# raise NotImplementedError
#
# def set_writer_position(self, name, timestamp):
# raise NotImplementedError
#
# def get_writer_position(self, name):
# raise NotImplementedError
#
# def get_namespaces(self):
# raise NotImplementedError
#
# def remove_namespace(self, namespace):
# raise NotImplementedError
#
# def clear_from(self, offset, timestamp):
# raise NotImplementedError
#
# def clear_key_after(self, key, namespace, offset=None, timestamp=None):
# raise NotImplementedError
#
# def clear_key_before(self, key, namespace, offset=None, timestamp=None):
# raise NotImplementedError
#
# def get_cache(self, namespace, query_hash, length, start, end):
# pass
#
# def add_cache(self, namespace, key, query_hash, length, cache):
# pass
#
# def remove_cache(self, namespace, key=None):
# pass
#
# def add_namespace_statistics(self, namespace, offset,
# data_points, byte_count):
# raise NotImplementedError
#
# def get_namespace_statistics(self, namespace, start_offset, end_offset):
# raise NotImplementedError
, which may include functions, classes, or code. Output only the next line. | return migrations |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
class Limit_test(TestCase):
def test_less(self):
less = limit.__dict__['__limit_less']
assert less(42, 42)
assert not less(42, 41)
assert less(1e99, resource.RLIM_INFINITY)
assert less(resource.RLIM_INFINITY, resource.RLIM_INFINITY)
<|code_end|>
, predict the next line using imports from the current file:
from unittest import TestCase
from problemtools.run import limit
import resource
and context including class names, function names, and sometimes code from other files:
# Path: problemtools/run/limit.py
# def check_limit_capabilities(logger):
# def try_limit(limit, soft, hard):
# def __limit_less(lim1, lim2):
. Output only the next line. | assert not less(resource.RLIM_INFINITY, 1e99) |
Given the following code snippet before the placeholder: <|code_start|> languages.Language('id', vals)
vals['compile'] = 'echo {nonexistent}'
with pytest.raises(languages.LanguageConfigError):
languages.Language('id', vals)
def test_missing_run(self):
vals = self.__language_dict()
del vals['run']
with pytest.raises(languages.LanguageConfigError):
languages.Language('id', vals)
def test_invalid_run(self):
vals = self.__language_dict()
vals['run'] = ['python3', '{mainfile}']
with pytest.raises(languages.LanguageConfigError):
languages.Language('id', vals)
vals['run'] = 'echo {nonexistent}'
with pytest.raises(languages.LanguageConfigError):
languages.Language('id', vals)
def test_good_entrypoints(self):
vals = self.__language_dict()
vals['compile'] = 'echo {binary}'
vals['run'] = 'echo {binary}'
languages.Language('id', vals)
<|code_end|>
, predict the next line using imports from the current file:
from unittest import TestCase
from problemtools import languages
import pytest
import os
import re
and context including class names, function names, and sometimes code from other files:
# Path: problemtools/languages.py
# class LanguageConfigError(Exception):
# class Language(object):
# class Languages(object):
# __KEYS = ['name', 'priority', 'files', 'shebang', 'compile', 'run']
# __VARIABLES = ['path', 'files', 'binary', 'mainfile', 'mainclass', 'Mainclass', 'memlim']
# def __init__(self, lang_id, lang_spec):
# def get_source_files(self, file_list):
# def update(self, values):
# def __check(self):
# def __variables_in_command(cmd):
# def __matches_shebang(self, filename):
# def __init__(self, data=None):
# def detect_language(self, file_list):
# def update(self, data):
# def load_language_config():
. Output only the next line. | vals['compile'] = 'echo {mainfile}' |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
def config_paths_mock():
return [os.path.join(os.path.dirname(__file__), 'config1'),
os.path.join(os.path.dirname(__file__), 'config2')]
def test_load_basic_config(monkeypatch):
monkeypatch.setattr(config, '__config_file_paths', config_paths_mock)
conf = config.load_config('test.yaml')
assert conf == {'prop1': 'hello', 'prop2': 5}
<|code_end|>
with the help of current file imports:
import pytest
import os
from problemtools import config
and context from other files:
# Path: problemtools/config.py
# class ConfigError(Exception):
# def load_config(configuration_file):
# def __config_file_paths():
# def __update_dict(orig, update):
, which may contain function names, class names, or code. Output only the next line. | def test_load_updated_config(monkeypatch): |
Next line prediction: <|code_start|> from_email='no-reply@indabom.com',
subject_template_name='indabom/password-reset-subject.txt',
email_template_name='indabom/password-reset-email.html'),
name='password_reset'),
path('password-reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name='indabom/password-reset-done.html'),
name='password_reset_done'),
path('password-reset/confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(
template_name='indabom/password-reset-confirm.html'),
name='password_reset_confirm'),
path('password-reset/complete/', auth_views.PasswordResetCompleteView.as_view(
template_name='indabom/password-reset-complete.html'), name='password_reset_complete'),
path('about/', views.About.as_view(), name=views.About.name),
path('learn-more/', views.LearnMore.as_view(), name=views.LearnMore.name),
path('privacy-policy/', views.PrivacyPolicy.as_view(), name=views.PrivacyPolicy.name),
path('terms-and-conditions/', views.TermsAndConditions.as_view(), name=views.TermsAndConditions.name),
path('install/', views.Install.as_view(), name=views.Install.name),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='sitemap'),
path('robots.txt', TemplateView.as_view(template_name='robots.txt', content_type="text/plain"), name="robots-file"),
path('stripe/', include('djstripe.urls', namespace='djstripe')),
path('checkout/', login_required(views.Checkout.as_view()), name=views.Checkout.name),
path('checkout-success/', views.CheckoutSuccess.as_view(), name=views.CheckoutSuccess.name),
path('checkout-cancelled/', views.CheckoutCancelled.as_view(), name=views.CheckoutCancelled.name),
path('stripe-manage/', views.stripe_manage, name='stripe-manage'),
path('explorer/', include('explorer.urls')),
]
<|code_end|>
. Use current file imports:
(from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.contrib.sitemaps.views import sitemap
from django.urls import path
from django.views.generic import TemplateView
from . import views
from .sitemaps import StaticViewSitemap)
and context including class names, function names, or small code snippets from other files:
# Path: indabom/sitemaps.py
# class StaticViewSitemap(sitemaps.Sitemap):
# priority = 0.8
# changefreq = 'weekly'
#
# def items(self):
# return ['index', 'about', 'install', 'learn-more', ]
#
# def location(self, item):
# return reverse(item)
. Output only the next line. | handler404 = 'indabom.views.handler404' |
Given snippet: <|code_start|># the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
# Imports ###########################################################
# Classes ###########################################################
class MentoringTableBlockTest(MentoringBaseTest):
def test_mentoring_table(self):
# Initially, the table should be blank, with just the titles
table = self.go_to_page('Table 2', css_selector='.mentoring-table')
headers = table.find_elements_by_css_selector('th')
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0].text, 'Header Test 1')
self.assertEqual(headers[1].text, 'Header Test 2')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from mentoring.test_base import MentoringBaseTest
and context:
# Path: mentoring/test_base.py
# class MentoringBaseTest(SeleniumTest):
#
# def setUp(self):
# super(MentoringBaseTest, self).setUp()
#
# # Use test scenarios
# self.browser.get(self.live_server_url) # Needed to load tests once
# scenarios.SCENARIOS.clear()
# scenarios_list = load_scenarios_from_path('../tests/xml')
# for identifier, title, xml in scenarios_list:
# scenarios.add_xml_scenario(identifier, title, xml)
# self.addCleanup(scenarios.remove_scenario, identifier)
#
# # Suzy opens the browser to visit the workbench
# self.browser.get(self.live_server_url)
#
# # She knows it's the site by the header
# header1 = self.browser.find_element_by_css_selector('h1')
# self.assertEqual(header1.text, 'XBlock scenarios')
#
# def go_to_page(self, page_name, css_selector='div.mentoring'):
# """
# Navigate to the page `page_name`, as listed on the workbench home
# Returns the DOM element on the visited page located by the `css_selector`
# """
# self.browser.get(self.live_server_url)
# self.browser.find_element_by_link_text(page_name).click()
# time.sleep(1)
# mentoring = self.browser.find_element_by_css_selector(css_selector)
# return mentoring
which might include code, classes, or functions. Output only the next line. | rows = table.find_elements_by_css_selector('td') |
Using the snippet: <|code_start|># Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
# Imports ###########################################################
# Classes ###########################################################
class MentoringBaseTest(SeleniumTest):
def setUp(self):
super(MentoringBaseTest, self).setUp()
# Use test scenarios
self.browser.get(self.live_server_url) # Needed to load tests once
scenarios.SCENARIOS.clear()
scenarios_list = load_scenarios_from_path('../tests/xml')
<|code_end|>
, determine the next line of code. You have imports:
import time
from workbench import scenarios
from workbench.test.selenium_test import SeleniumTest
from .utils import load_scenarios_from_path
and context (class names, function names, or code) available:
# Path: mentoring/utils.py
# def load_scenarios_from_path(scenarios_path):
# """
# Load all xml files contained in a specified directory, as workbench scenarios
# """
# return get_scenarios_from_path(scenarios_path, include_identifier=True)
. Output only the next line. | for identifier, title, xml in scenarios_list: |
Next line prediction: <|code_start|> }
# for handling duplicate entries
dataUniqe = dict()
# find all entries:
# suche <div class="houses">
parking_houses = soup.find_all('div', class_='houses')
for parking_group in parking_houses :
parking_lots = parking_group.find_all('li')
for one_lot in parking_lots :
parking_name = one_lot.find('a').text
if ( not parking_name in dataUniqe ) :
dataUniqe[parking_name] = 1 # add this to the list
lot = geodata.lot(parking_name)
parking_state = 'open'
parking_free = 0
try :
parking_free = int(one_lot.find('span', class_='free-text').text.split()[0])
except :
parking_state = 'nodata'
data["lots"].append({
"name": parking_name,
"free": parking_free,
"total": lot.total,
"address": lot.address,
"coords": lot.coords,
"state": parking_state,
"lot_type": lot.type,
<|code_end|>
. Use current file imports:
(from bs4 import BeautifulSoup
from park_api.geodata import GeoData
from park_api.util import utc_now)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
. Output only the next line. | "id": lot.id, |
Given the code snippet: <|code_start|> # for handling duplicate entries
dataUniqe = dict()
# find all entries:
# suche <div class="houses">
parking_houses = soup.find_all('div', class_='houses')
for parking_group in parking_houses :
parking_lots = parking_group.find_all('li')
for one_lot in parking_lots :
parking_name = one_lot.find('a').text
if ( not parking_name in dataUniqe ) :
dataUniqe[parking_name] = 1 # add this to the list
lot = geodata.lot(parking_name)
parking_state = 'open'
parking_free = 0
try :
parking_free = int(one_lot.find('span', class_='free-text').text.split()[0])
except :
parking_state = 'nodata'
data["lots"].append({
"name": parking_name,
"free": parking_free,
"total": lot.total,
"address": lot.address,
"coords": lot.coords,
"state": parking_state,
"lot_type": lot.type,
"id": lot.id,
<|code_end|>
, generate the next line using the imports in this file:
from bs4 import BeautifulSoup
from park_api.geodata import GeoData
from park_api.util import utc_now
and context (functions, classes, or occasionally code) from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
. Output only the next line. | "forecast": False |
Predict the next line for this snippet: <|code_start|>state_map = {
"frei": "open",
"geschlossen": "closed",
"besetzt": "open"
}
geodata = GeoData(__file__)
def parse_html(html):
soup = BeautifulSoup(html, "html.parser")
lot_table_trs = soup.select("div#parkingList table")[0].find_all("tr")
date_field = soup.find(id="lastRefresh").text.strip()
data = {
"last_updated": convert_date(date_field, "%d.%m.%Y %H:%M Uhr"),
"lots": []
}
for tr in lot_table_trs[1:-1]:
tds = tr.find_all("td")
type_and_name = process_name(tds[0].text.strip())
lot = geodata.lot(tds[0].text.strip())
data["lots"].append({
"name": type_and_name[1].strip("\n"),
"lot_type": type_and_name[0],
"free": int(tds[1].text),
"total": lot.total,
"state": state_map.get(tds[2].text, ""),
<|code_end|>
with the help of current file imports:
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
and context from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
, which may contain function names, class names, or code. Output only the next line. | "coords": lot.coords, |
Continue the code snippet: <|code_start|> "geschlossen": "closed",
"besetzt": "open"
}
geodata = GeoData(__file__)
def parse_html(html):
soup = BeautifulSoup(html, "html.parser")
lot_table_trs = soup.select("div#parkingList table")[0].find_all("tr")
date_field = soup.find(id="lastRefresh").text.strip()
data = {
"last_updated": convert_date(date_field, "%d.%m.%Y %H:%M Uhr"),
"lots": []
}
for tr in lot_table_trs[1:-1]:
tds = tr.find_all("td")
type_and_name = process_name(tds[0].text.strip())
lot = geodata.lot(tds[0].text.strip())
data["lots"].append({
"name": type_and_name[1].strip("\n"),
"lot_type": type_and_name[0],
"free": int(tds[1].text),
"total": lot.total,
"state": state_map.get(tds[2].text, ""),
"coords": lot.coords,
"id": lot.id,
<|code_end|>
. Use current file imports:
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
and context (classes, functions, or code) from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
. Output only the next line. | "forecast": False |
Predict the next line for this snippet: <|code_start|> self.city[1],
self.city[2],
self.city[3],
self.city[4],
json["source"],
json["public"],
self.city[7],
self.city[8])
def _process_feature(self, feature):
props = feature["properties"]
_type = props.get("type", None)
name = props["name"]
lng, lat = self._coords(feature)
if _type == "city":
self.city = self._city_from_props(name, lng, lat, props)
else:
lot = self._lot_from_props(name, lng, lat, props)
self.lots[name] = lot
def _city_from_props(self, name, lng, lat, props):
url = props.get("url", None)
source = props.get("source", None)
headers = props.get("headers", {})
active_support = props.get("active_support", None)
attribution = props.get("attribution", None)
return City(name,
self.city_name,
lng,
lat,
<|code_end|>
with the help of current file imports:
import os
import calendar
import json
from datetime import date
from collections import namedtuple
from park_api import env
from park_api.util import remove_special_chars
and context from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/util.py
# def remove_special_chars(string):
# """
# Remove any umlauts, spaces and punctuation from a string.
#
# :param string:
# :return:
# """
# replacements = {
# "ä": "ae",
# "ö": "oe",
# "ü": "ue",
# "ß": "ss",
# "-": "",
# " ": "",
# ".": "",
# ",": "",
# "'": "",
# "\"": "",
# "/": "",
# "\\": "",
# "\n": "",
# "\t": ""
# }
# for repl in replacements.keys():
# string = string.replace(repl, replacements[repl])
# return string
, which may contain function names, class names, or code. Output only the next line. | url, |
Continue the code snippet: <|code_start|> private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
try:
with open(private_path) as p:
self.private_data = json.load(p)
self._process_private(self.private_data)
except FileNotFoundError:
self.private_data = None
def _process_json(self, json):
self.lots = {}
self.city = None
for f in json["features"]:
self._process_feature(f)
if self.city is None:
self.city = City(self.city_name,
self.city_name,
None,
None,
None,
None,
None,
None)
def _process_private(self, json):
if self.city:
self.city = City(self.city[0],
self.city[1],
self.city[2],
self.city[3],
self.city[4],
<|code_end|>
. Use current file imports:
import os
import calendar
import json
from datetime import date
from collections import namedtuple
from park_api import env
from park_api.util import remove_special_chars
and context (classes, functions, or code) from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/util.py
# def remove_special_chars(string):
# """
# Remove any umlauts, spaces and punctuation from a string.
#
# :param string:
# :return:
# """
# replacements = {
# "ä": "ae",
# "ö": "oe",
# "ü": "ue",
# "ß": "ss",
# "-": "",
# " ": "",
# ".": "",
# ",": "",
# "'": "",
# "\"": "",
# "/": "",
# "\\": "",
# "\n": "",
# "\t": ""
# }
# for repl in replacements.keys():
# string = string.replace(repl, replacements[repl])
# return string
. Output only the next line. | json["source"], |
Predict the next line after this snippet: <|code_start|>
def main():
log_path = os.path.join(env.APP_ROOT, "log", env.ENV + ".log")
log_handler = RotatingFileHandler(log_path,
maxBytes=1000000,
backupCount=1)
<|code_end|>
using the current file's imports:
import logging
import os
from logging.handlers import RotatingFileHandler
from park_api import env, db
from park_api.app import app
and any relevant context from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
#
# Path: park_api/app.py
# def user_agent(request):
# def init_static():
# def update_cache(city):
# def get_meta():
# def get_api_status():
# def get_lots(city):
# def get_longtime_forecast(city, lot_id):
# def make_coffee():
. Output only the next line. | formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s ") |
Predict the next line for this snippet: <|code_start|>
def main():
log_path = os.path.join(env.APP_ROOT, "log", env.ENV + ".log")
log_handler = RotatingFileHandler(log_path,
maxBytes=1000000,
backupCount=1)
<|code_end|>
with the help of current file imports:
import logging
import os
from logging.handlers import RotatingFileHandler
from park_api import env, db
from park_api.app import app
and context from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
#
# Path: park_api/app.py
# def user_agent(request):
# def init_static():
# def update_cache(city):
# def get_meta():
# def get_api_status():
# def get_lots(city):
# def get_longtime_forecast(city, lot_id):
# def make_coffee():
, which may contain function names, class names, or code. Output only the next line. | formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s ") |
Here is a snippet: <|code_start|>def parse_html(html):
# BeautifulSoup is a great and easy way to parse the html and find the bits and pieces we're looking for.
soup = BeautifulSoup(html, "html.parser")
data = {
"last_updated": '',
# URL for the page where the scraper can gather the data
"lots": []
}
try :
# <div class="container-fluid"
parking_data = soup.find( 'div', class_='container-fluid')
# Letzte Aktualisierung: 04.07.2019 11:03:00
last_updated = convert_date( parking_data.find('h5').text, 'Letzte Aktualisierung: %d.%m.%Y %H:%M:%S')
data["last_updated"] = last_updated
except :
# if the service is unavailable (did happen in one of my tests):
data["last_updated"] = utc_now()
# return data
parking_lots = parking_data.find_all('div', class_='well')
for one_parking_lot in parking_lots :
parking_name = one_parking_lot.find('b').text.strip()
lot = geodata.lot(parking_name)
parking_free = 0
try :
parking_status = 'open'
parking_free = int(one_parking_lot.find_all('div', role='progressbar')[1].find('b').text.strip())
<|code_end|>
. Write the next line using the current file imports:
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
from park_api.util import utc_now
and context from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
, which may include functions, classes, or code. Output only the next line. | except : |
Using the snippet: <|code_start|> parking_data = soup.find( 'div', class_='container-fluid')
# Letzte Aktualisierung: 04.07.2019 11:03:00
last_updated = convert_date( parking_data.find('h5').text, 'Letzte Aktualisierung: %d.%m.%Y %H:%M:%S')
data["last_updated"] = last_updated
except :
# if the service is unavailable (did happen in one of my tests):
data["last_updated"] = utc_now()
# return data
parking_lots = parking_data.find_all('div', class_='well')
for one_parking_lot in parking_lots :
parking_name = one_parking_lot.find('b').text.strip()
lot = geodata.lot(parking_name)
parking_free = 0
try :
parking_status = 'open'
parking_free = int(one_parking_lot.find_all('div', role='progressbar')[1].find('b').text.strip())
except :
parking_status = 'nodata'
data["lots"].append({
"name": parking_name,
"free": parking_free,
"total": lot.total,
"address": lot.address,
"coords": lot.coords,
"state": parking_status,
"lot_type": lot.type,
"id": lot.id,
"forecast": False
<|code_end|>
, determine the next line of code. You have imports:
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
from park_api.util import utc_now
and context (class names, function names, or code) available:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
. Output only the next line. | }) |
Predict the next line for this snippet: <|code_start|>
# BeautifulSoup is a great and easy way to parse the html and find the bits and pieces we're looking for.
soup = BeautifulSoup(html, "html.parser")
data = {
"last_updated": '',
# URL for the page where the scraper can gather the data
"lots": []
}
try :
# <div class="container-fluid"
parking_data = soup.find( 'div', class_='container-fluid')
# Letzte Aktualisierung: 04.07.2019 11:03:00
last_updated = convert_date( parking_data.find('h5').text, 'Letzte Aktualisierung: %d.%m.%Y %H:%M:%S')
data["last_updated"] = last_updated
except :
# if the service is unavailable (did happen in one of my tests):
data["last_updated"] = utc_now()
# return data
parking_lots = parking_data.find_all('div', class_='well')
for one_parking_lot in parking_lots :
parking_name = one_parking_lot.find('b').text.strip()
lot = geodata.lot(parking_name)
parking_free = 0
try :
parking_status = 'open'
parking_free = int(one_parking_lot.find_all('div', role='progressbar')[1].find('b').text.strip())
except :
<|code_end|>
with the help of current file imports:
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
from park_api.util import utc_now
and context from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
, which may contain function names, class names, or code. Output only the next line. | parking_status = 'nodata' |
Next line prediction: <|code_start|># from park_api.util import utc_now
# This loads the geodata for this city if <city>.geojson exists in the same directory as this file.
# No need to remove this if there's no geodata (yet), everything will still work.
geodata = GeoData(__file__)
# This function is called by the scraper and given the data of the page specified as source in geojson above.
# It's supposed to return a dictionary containing everything the current spec expects. Tests will fail if it doesn't ;)
def parse_html(html):
# BeautifulSoup is a great and easy way to parse the html and find the bits and pieces we're looking for.
soup = BeautifulSoup(html, "html.parser")
data = {
"last_updated": '',
# URL for the page where the scraper can gather the data
"lots": []
}
# <b>Stand: 13.08.2019 16:40:00 Uhr</b> (Aktualisierung alle 60 Sekunden)<br>
data['last_updated'] = convert_date( soup.find('b').text, 'Stand: %d.%m.%Y %H:%M:%S Uhr')
entries = soup.find( 'table', class_= 'tabellenformat')
entries_rows = entries.find_all( 'tr' )
# first line: header
<|code_end|>
. Use current file imports:
(from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
. Output only the next line. | for one_entry in entries_rows[1:] : |
Given snippet: <|code_start|># from park_api.util import utc_now
# This loads the geodata for this city if <city>.geojson exists in the same directory as this file.
# No need to remove this if there's no geodata (yet), everything will still work.
geodata = GeoData(__file__)
# This function is called by the scraper and given the data of the page specified as source in geojson above.
# It's supposed to return a dictionary containing everything the current spec expects. Tests will fail if it doesn't ;)
def parse_html(html):
# BeautifulSoup is a great and easy way to parse the html and find the bits and pieces we're looking for.
soup = BeautifulSoup(html, "html.parser")
data = {
"last_updated": '',
# URL for the page where the scraper can gather the data
"lots": []
}
# <b>Stand: 13.08.2019 16:40:00 Uhr</b> (Aktualisierung alle 60 Sekunden)<br>
data['last_updated'] = convert_date( soup.find('b').text, 'Stand: %d.%m.%Y %H:%M:%S Uhr')
entries = soup.find( 'table', class_= 'tabellenformat')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
and context:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
which might include code, classes, or functions. Output only the next line. | entries_rows = entries.find_all( 'tr' ) |
Predict the next line after this snippet: <|code_start|>
class ScraperTestCase(unittest.TestCase):
def setUp(self):
db.setup()
@requests_mock.Mocker()
<|code_end|>
using the current file's imports:
import os
import unittest
import helpers
import requests
import requests_mock
from park_api import env, scraper, db
and any relevant context from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/scraper.py
# HEADERS = {
# "User-Agent": "ParkAPI v%s - Info: %s" %
# (env.SERVER_VERSION, env.SOURCE_REPOSITORY),
# }
# def get_html(city):
# def parse_html(city, html):
# def add_metadata(data):
# def save_data_to_db(cursor, parking_data, city):
# def _live(module):
# def scrape_city(module):
# def main():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
. Output only the next line. | def test_insert(self, mock): |
Next line prediction: <|code_start|>
class ScraperTestCase(unittest.TestCase):
def setUp(self):
db.setup()
@requests_mock.Mocker()
<|code_end|>
. Use current file imports:
(import os
import unittest
import helpers
import requests
import requests_mock
from park_api import env, scraper, db)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/scraper.py
# HEADERS = {
# "User-Agent": "ParkAPI v%s - Info: %s" %
# (env.SERVER_VERSION, env.SOURCE_REPOSITORY),
# }
# def get_html(city):
# def parse_html(city, html):
# def add_metadata(data):
# def save_data_to_db(cursor, parking_data, city):
# def _live(module):
# def scrape_city(module):
# def main():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
. Output only the next line. | def test_insert(self, mock): |
Predict the next line for this snippet: <|code_start|>
# Falls das hier jemals einer von den Menschen
# hinter OpenDataZürich lesen sollte: Ihr seid so toll <3
geodata = GeoData(__file__)
def parse_html(xml_data):
feed = feedparser.parse(xml_data)
try:
last_updated = feed["entries"][0]["updated"]
<|code_end|>
with the help of current file imports:
import feedparser
from park_api.geodata import GeoData
from park_api.util import utc_now
and context from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
, which may contain function names, class names, or code. Output only the next line. | except KeyError: |
Next line prediction: <|code_start|>
def parse_html(xml_data):
feed = feedparser.parse(xml_data)
try:
last_updated = feed["entries"][0]["updated"]
except KeyError:
last_updated = utc_now()
data = {
"lots": [],
# remove trailing timezone for consensistency
"last_updated": last_updated.replace("Z", "")
}
for entry in feed["entries"]:
summary = parse_summary(entry["summary"])
title_elements = parse_title(entry["title"])
lot_identifier = (title_elements[2] + " " + title_elements[0]).strip()
lot = geodata.lot(lot_identifier)
data["lots"].append({
"name": title_elements[0],
"address": title_elements[1],
"id": lot.id,
"state": summary[0],
"free": summary[1],
"total": lot.total,
"coords": lot.coords,
<|code_end|>
. Use current file imports:
(import feedparser
from park_api.geodata import GeoData
from park_api.util import utc_now)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
. Output only the next line. | "forecast": False, |
Predict the next line after this snippet: <|code_start|>
LOT_COUNTS_PER_CITY = {}
def get_most_lots_from_known_data(city, lot_name):
"""
Get the total value from the highest known value in the last saved JSON.
This is useful for cities that don't publish
total number of spaces for a parking lot.
Caveats:
- Returns 0 if not found.
- If a lot name exists twice only the last value is returned.
:param city:
:param lot_name:
:return:
"""
global LOT_COUNTS_PER_CITY
# FIXME ugly work around, this should be really fixed in a different way
lot_counts = LOT_COUNTS_PER_CITY.get(city, {})
if lot_counts == {}:
with db.cursor() as cursor:
sql = """
SELECT data FROM parkapi
WHERE city=%s
<|code_end|>
using the current file's imports:
import pytz
from datetime import datetime
from park_api import db
and any relevant context from other files:
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
. Output only the next line. | ORDER BY timestamp_downloaded DESC LIMIT 600; |
Given the code snippet: <|code_start|>
# This loads the geodata for this city if <city>.geojson exists in the same directory as this file.
# No need to remove this if there's no geodata (yet), everything will still work.
geodata = GeoData(__file__)
# This function is called by the scraper and given the data of the page specified as source in geojson above.
# It's supposed to return a dictionary containing everything the current spec expects. Tests will fail if it doesn't ;)
def parse_html(source_json):
parsed_json = json.loads(source_json)
features = parsed_json['features']
# last_updated is the date when the data on the page was last updated, it should be listed on most pages
last_updated = ""
data = {
# URL for the page where the scraper can gather the data
"lots": []
}
for feature in features:
lot_name = feature['properties']['park_name']
<|code_end|>
, generate the next line using the imports in this file:
from park_api.util import convert_date
from park_api.geodata import GeoData
import json
and context (functions, classes, or occasionally code) from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
. Output only the next line. | lot_free = int(feature['properties']['obs_free']) |
Predict the next line after this snippet: <|code_start|>geodata = GeoData(__file__)
# This function is called by the scraper and given the data of the page specified as source in geojson above.
# It's supposed to return a dictionary containing everything the current spec expects. Tests will fail if it doesn't ;)
def parse_html(source_json):
parsed_json = json.loads(source_json)
features = parsed_json['features']
# last_updated is the date when the data on the page was last updated, it should be listed on most pages
last_updated = ""
data = {
# URL for the page where the scraper can gather the data
"lots": []
}
for feature in features:
lot_name = feature['properties']['park_name']
lot_free = int(feature['properties']['obs_free'])
lot_total = int(feature['properties']['obs_max'])
obs_ts = feature['properties']['obs_ts'].split('.')[0]
if last_updated < obs_ts:
last_updated = obs_ts
# please be careful about the state only being allowed to contain either open, closed or nodata
# should the page list other states, please map these into the three listed possibilities
state = "nodata"
<|code_end|>
using the current file's imports:
from park_api.util import convert_date
from park_api.geodata import GeoData
import json
and any relevant context from other files:
# Path: park_api/util.py
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
# """
# Convert a date into a ISO formatted UTC date string.
# Timezone defaults to Europe/Berlin.
#
# :param date_string:
# :param date_format:
# :param timezone:
# :return:
# """
# last_updated = datetime.strptime(date_string, date_format)
# local_timezone = pytz.timezone(timezone)
# last_updated = local_timezone.localize(last_updated, is_dst=None)
# last_updated = last_updated.astimezone(pytz.utc).replace(tzinfo=None)
#
# return last_updated.replace(microsecond=0).isoformat()
#
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
. Output only the next line. | if feature['properties']['obs_state'] == "1": |
Next line prediction: <|code_start|>#!/usr/bin/env python
HEADERS = {
"User-Agent": "ParkAPI v%s - Info: %s" %
(env.SERVER_VERSION, env.SOURCE_REPOSITORY),
<|code_end|>
. Use current file imports:
(import json
import traceback
import requests
from bs4 import BeautifulSoup
from park_api import util, env, db)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/util.py
# LOT_COUNTS_PER_CITY = {}
# def get_most_lots_from_known_data(city, lot_name):
# def utc_now():
# def remove_special_chars(string):
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
#
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
. Output only the next line. | } |
Here is a snippet: <|code_start|> soup = BeautifulSoup(r.text, "html.parser")
meta_content = soup.find("meta", {"http-equiv": "content-type"})
if meta_content is not None:
encoding = meta_content["content"].split("=")[-1]
r.encoding = encoding
return r.text
def parse_html(city, html):
"""Use a city module to parse its html"""
return city.parse_html(html)
def add_metadata(data):
"""Adds metadata to a scraped output dict"""
data["last_downloaded"] = util.utc_now()
return data
def save_data_to_db(cursor, parking_data, city):
"""Save the data given into the Postgres DB."""
timestamp_updated = parking_data["last_updated"]
timestamp_downloaded = util.utc_now()
json_data = json.dumps(parking_data)
sql = """
INSERT INTO parkapi(
timestamp_updated,
timestamp_downloaded,
city,
<|code_end|>
. Write the next line using the current file imports:
import json
import traceback
import requests
from bs4 import BeautifulSoup
from park_api import util, env, db
and context from other files:
# Path: park_api/util.py
# LOT_COUNTS_PER_CITY = {}
# def get_most_lots_from_known_data(city, lot_name):
# def utc_now():
# def remove_special_chars(string):
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
#
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
, which may include functions, classes, or code. Output only the next line. | data) |
Using the snippet: <|code_start|> meta_content = soup.find("meta", {"http-equiv": "content-type"})
if meta_content is not None:
encoding = meta_content["content"].split("=")[-1]
r.encoding = encoding
return r.text
def parse_html(city, html):
"""Use a city module to parse its html"""
return city.parse_html(html)
def add_metadata(data):
"""Adds metadata to a scraped output dict"""
data["last_downloaded"] = util.utc_now()
return data
def save_data_to_db(cursor, parking_data, city):
"""Save the data given into the Postgres DB."""
timestamp_updated = parking_data["last_updated"]
timestamp_downloaded = util.utc_now()
json_data = json.dumps(parking_data)
sql = """
INSERT INTO parkapi(
timestamp_updated,
timestamp_downloaded,
city,
data)
<|code_end|>
, determine the next line of code. You have imports:
import json
import traceback
import requests
from bs4 import BeautifulSoup
from park_api import util, env, db
and context (class names, function names, or code) available:
# Path: park_api/util.py
# LOT_COUNTS_PER_CITY = {}
# def get_most_lots_from_known_data(city, lot_name):
# def utc_now():
# def remove_special_chars(string):
# def convert_date(date_string, date_format, timezone="Europe/Berlin"):
#
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
. Output only the next line. | VALUES (%(updated)s, %(downloaded)s, %(city)s, %(data)s) |
Based on the snippet: <|code_start|> break
with open(path, 'rb') as f:
city = importlib.import_module("park_api.cities." + city)
return city.parse_html(f.read().decode('utf-8', 'replace'))
def get_tests():
modpath = os.path.join(env.APP_ROOT, "park_api", "cities")
strip_py = lambda name: ".".join(name.split(".")[:-1])
for (dirpath, dirnames, filenames) in os.walk(modpath):
return tuple(map(strip_py, filter(security.file_is_allowed, filenames)))
@ddt.ddt
class CityTestCase(unittest.TestCase):
def setUp(self):
db.setup()
def sanity_check(self, city_name, city):
self.assertIn("lots", city)
self.assertIn("last_updated", city)
last_updated = datetime.strptime(city["last_updated"],
"%Y-%m-%dT%H:%M:%S")
self.assertIsInstance(last_updated, datetime)
self.assertTrue(len(city["lots"]) > 0)
for lot in city["lots"]:
self.assertIn("name", lot)
self.assertIn("coords", lot)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import unittest
import ddt
import helpers
import importlib
from datetime import datetime
from park_api import db, env, security
and context (classes, functions, sometimes code) from other files:
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
#
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/security.py
# def file_is_allowed(file):
. Output only the next line. | self.assertIn("state", lot) |
Next line prediction: <|code_start|>
def scrape_city(city):
allowed_extensions = [".html", ".json", ".xml"]
for extension in allowed_extensions:
path = os.path.join(helpers.TEST_ROOT,
"fixtures",
city.lower() + extension)
if os.path.isfile(path):
break
with open(path, 'rb') as f:
<|code_end|>
. Use current file imports:
(import os
import unittest
import ddt
import helpers
import importlib
from datetime import datetime
from park_api import db, env, security)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
#
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/security.py
# def file_is_allowed(file):
. Output only the next line. | city = importlib.import_module("park_api.cities." + city) |
Given the code snippet: <|code_start|>
def scrape_city(city):
allowed_extensions = [".html", ".json", ".xml"]
for extension in allowed_extensions:
path = os.path.join(helpers.TEST_ROOT,
"fixtures",
city.lower() + extension)
if os.path.isfile(path):
break
with open(path, 'rb') as f:
city = importlib.import_module("park_api.cities." + city)
return city.parse_html(f.read().decode('utf-8', 'replace'))
def get_tests():
modpath = os.path.join(env.APP_ROOT, "park_api", "cities")
strip_py = lambda name: ".".join(name.split(".")[:-1])
for (dirpath, dirnames, filenames) in os.walk(modpath):
return tuple(map(strip_py, filter(security.file_is_allowed, filenames)))
@ddt.ddt
<|code_end|>
, generate the next line using the imports in this file:
import os
import unittest
import ddt
import helpers
import importlib
from datetime import datetime
from park_api import db, env, security
and context (functions, classes, or occasionally code) from other files:
# Path: park_api/db.py
# POOL = None
# POOL = ThreadedConnectionPool(1, 20,
# database=u.path[1:],
# user=u.username,
# password=u.password,
# host=u.hostname,
# port=u.port)
# def setup(url=env.DATABASE_URI):
# def cursor(commit=False):
#
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
#
# Path: park_api/security.py
# def file_is_allowed(file):
. Output only the next line. | class CityTestCase(unittest.TestCase): |
Based on the snippet: <|code_start|>
API_VERSION = '1.0'
SERVER_VERSION = '0.3.0'
SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
SERVER_CONF = None
ENV = None
SUPPORTED_CITIES = None
DATABASE = {}
DEFAULT_CONFIGURATION = {
"port": 5000,
"host": "::1",
"debug": False,
"live_scrape": True,
"database_uri": "postgres:///park_api",
}
def is_production():
return ENV == "production"
def is_development():
return ENV == "development"
def is_testing():
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import importlib
import configparser
import sys
import subprocess
import logging
from park_api import structs, security
and context (classes, functions, sometimes code) from other files:
# Path: park_api/structs.py
#
# Path: park_api/security.py
# def file_is_allowed(file):
. Output only the next line. | return ENV == "testing" |
Next line prediction: <|code_start|>
API_VERSION = '1.0'
SERVER_VERSION = '0.3.0'
SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
<|code_end|>
. Use current file imports:
(import os
import importlib
import configparser
import sys
import subprocess
import logging
from park_api import structs, security)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/structs.py
#
# Path: park_api/security.py
# def file_is_allowed(file):
. Output only the next line. | SERVER_CONF = None |
Here is a snippet: <|code_start|>def parse_html(xml_data):
feed = feedparser.parse(xml_data)
try:
last_updated = feed["entries"][0]["updated"]
last_updated = datetime.strptime(last_updated[5:25], "%d %b %Y %H:%M:%S").isoformat()
except KeyError:
last_updated = utc_now()
data = {
"lots": [],
"last_updated": last_updated
}
for entry in feed["entries"]:
summary = parse_summary(entry["summary"])
title_elements = parse_title(entry["title"])
lot_identifier = html.unescape((title_elements[2] + " " + title_elements[0]).strip())
lot = geodata.lot(lot_identifier)
data["lots"].append({
"name": html.unescape(title_elements[0]),
"address": lot.address,
"id": html.unescape(lot.id),
"state": "open",
"free": summary[1],
"total": lot.total,
<|code_end|>
. Write the next line using the current file imports:
import feedparser
import html
from datetime import datetime
from park_api.geodata import GeoData
from park_api.util import utc_now
and context from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
, which may include functions, classes, or code. Output only the next line. | "coords": lot.coords, |
Predict the next line after this snippet: <|code_start|>
geodata = GeoData(__file__)
def parse_html(xml_data):
feed = feedparser.parse(xml_data)
try:
last_updated = feed["entries"][0]["updated"]
last_updated = datetime.strptime(last_updated[5:25], "%d %b %Y %H:%M:%S").isoformat()
except KeyError:
last_updated = utc_now()
data = {
"lots": [],
"last_updated": last_updated
}
for entry in feed["entries"]:
summary = parse_summary(entry["summary"])
title_elements = parse_title(entry["title"])
lot_identifier = html.unescape((title_elements[2] + " " + title_elements[0]).strip())
lot = geodata.lot(lot_identifier)
data["lots"].append({
"name": html.unescape(title_elements[0]),
"address": lot.address,
<|code_end|>
using the current file's imports:
import feedparser
import html
from datetime import datetime
from park_api.geodata import GeoData
from park_api.util import utc_now
and any relevant context from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
#
# Path: park_api/util.py
# def utc_now():
# """
# Returns the current UTC time in ISO format.
#
# :return:
# """
# return datetime.utcnow().replace(microsecond=0).isoformat()
. Output only the next line. | "id": html.unescape(lot.id), |
Predict the next line for this snippet: <|code_start|>
def main():
backend = get_backend(env.DATABASE_URI)
migrations = read_migrations(os.path.join(env.APP_ROOT, "schema/db"))
<|code_end|>
with the help of current file imports:
import os
from yoyo import read_migrations, get_backend
from park_api import env
and context from other files:
# Path: park_api/env.py
# API_VERSION = '1.0'
# SERVER_VERSION = '0.3.0'
# SOURCE_REPOSITORY = 'https://github.com/offenesdresden/ParkAPI'
# APP_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
# SERVER_CONF = None
# ENV = None
# SUPPORTED_CITIES = None
# DATABASE = {}
# DEFAULT_CONFIGURATION = {
# "port": 5000,
# "host": "::1",
# "debug": False,
# "live_scrape": True,
# "database_uri": "postgres:///park_api",
# }
# SUPPORTED_CITIES = load_cities()
# ENV = os.getenv("env", "development")
# SERVER_CONF = structs.ServerConf(host=raw_config.get('host'),
# port=raw_config.getint("port"),
# debug=raw_config.getboolean("debug"))
# LIVE_SCRAPE = raw_config.getboolean("live_scrape")
# DATABASE_URI = raw_config.get("database_uri")
# SERVER_VERSION = '0.3.{0}'.format(rev)
# def is_production():
# def is_development():
# def is_testing():
# def is_staging():
# def load_cities():
# def supported_cities():
# def load_config():
# def determine_server_version():
, which may contain function names, class names, or code. Output only the next line. | backend.apply_migrations(migrations) |
Next line prediction: <|code_start|> count = int(member.find('app:stellplaetze_gesamt').string)
except AttributeError:
pass
free = 0
state = "nodata"
situation = member.find('app:situation')
if situation and situation.string != "keine Auslastungsdaten":
free = int(member.find('app:frei').string)
status = member.find('app:status').string
if status == "frei" or status == "besetzt":
state = "open"
else:
state = "closed"
lot_type = member.find('app:art').string
if lot_type == "Straßenrand":
lot_type = "Parkplatz"
lot_id = member.find('app:id').string
address = ""
try:
address = member.find('app:einfahrt').string
except AttributeError:
try:
address = member.find('app:strasse').string
try:
address += " " + member.find('app:hausnr').string
except (AttributeError, TypeError):
pass
except AttributeError:
pass
<|code_end|>
. Use current file imports:
(from bs4 import BeautifulSoup
from park_api.geodata import GeoData
import utm)
and context including class names, function names, or small code snippets from other files:
# Path: park_api/geodata.py
# class GeoData:
# def __init__(self, city):
# json_file = city[:-3] + ".geojson"
# self.city_name = os.path.basename(city[:-3])
# json_path = os.path.join(env.APP_ROOT, "park_api", "cities", json_file)
# try:
# with open(json_path) as f:
# self._process_json(json.load(f))
# except FileNotFoundError:
# self.lots = {}
# private_file = city[:-3] + ".json"
# private_path = os.path.join(env.APP_ROOT, "park_api", "cities", private_file)
# try:
# with open(private_path) as p:
# self.private_data = json.load(p)
# self._process_private(self.private_data)
# except FileNotFoundError:
# self.private_data = None
#
# def _process_json(self, json):
# self.lots = {}
# self.city = None
# for f in json["features"]:
# self._process_feature(f)
# if self.city is None:
# self.city = City(self.city_name,
# self.city_name,
# None,
# None,
# None,
# None,
# None,
# None)
#
# def _process_private(self, json):
# if self.city:
# self.city = City(self.city[0],
# self.city[1],
# self.city[2],
# self.city[3],
# self.city[4],
# json["source"],
# json["public"],
# self.city[7],
# self.city[8])
#
# def _process_feature(self, feature):
# props = feature["properties"]
# _type = props.get("type", None)
# name = props["name"]
# lng, lat = self._coords(feature)
# if _type == "city":
# self.city = self._city_from_props(name, lng, lat, props)
# else:
# lot = self._lot_from_props(name, lng, lat, props)
# self.lots[name] = lot
#
# def _city_from_props(self, name, lng, lat, props):
# url = props.get("url", None)
# source = props.get("source", None)
# headers = props.get("headers", {})
# active_support = props.get("active_support", None)
# attribution = props.get("attribution", None)
# return City(name,
# self.city_name,
# lng,
# lat,
# url,
# source,
# headers,
# source,
# active_support,
# attribution)
#
# def _lot_from_props(self, name, lng, lat, props):
# address = props.get("address", None)
# total = props.get("total", 0)
# if "total_by_weekday" in props.keys():
# weekday = calendar.day_name[date.today().weekday()]
# if weekday in props.get("total_by_weekday"):
# total = props.get("total_by_weekday").get(weekday)
# _type = props.get("type", None)
# _aux = props.get("aux", None)
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, _type, lng, lat, address, total, _aux)
#
# def _coords(self, feature):
# geometry = feature.get("geometry", None)
# if geometry is None:
# return None, None
# else:
# lng, lat = geometry["coordinates"]
# return lng, lat
#
# def lot(self, name):
# lot = self.lots.get(name, None)
# if lot is None:
# _id = generate_id(self.city_name + name)
# return Lot(name, _id, None, None, None, None, 0, None)
# return lot
. Output only the next line. | coord_member = member.find('gml:pos') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.