Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
<|code_end|>
with the help of current file imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
, which may contain function names, class names, or code. Output only the next line. | MAX_FILENAME_LENGTH = 250 |
Here is a snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
<|code_end|>
. Write the next line using the current file imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
, which may include functions, classes, or code. Output only the next line. | MAX_FILENAME_LENGTH = 250 |
Predict the next line after this snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
<|code_end|>
using the current file's imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and any relevant context from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
. Output only the next line. | magic_bufsz = None |
Predict the next line after this snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
MAX_FILENAME_LENGTH = 250
class Upload:
_filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
<|code_end|>
using the current file's imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and any relevant context from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
. Output only the next line. | _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+') |
Predict the next line for this snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
MAX_FILENAME_LENGTH = 250
class Upload:
_filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
_type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
<|code_end|>
with the help of current file imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
, which may contain function names, class names, or code. Output only the next line. | @classmethod |
Based on the snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context (classes, functions, sometimes code) from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
. Output only the next line. | MAX_FILENAME_LENGTH = 250 |
Given snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
MAX_FILENAME_LENGTH = 250
class Upload:
_filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
which might include code, classes, or functions. Output only the next line. | _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+') |
Here is a snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
<|code_end|>
. Write the next line using the current file imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
, which may include functions, classes, or code. Output only the next line. | magic_bufsz = None |
Given the following code snippet before the placeholder: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
<|code_end|>
, predict the next line using imports from the current file:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context including class names, function names, and sometimes code from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
. Output only the next line. | magic = None |
Based on the snippet: <|code_start|>
try:
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import time
import mimetypes
import magic as magic_module
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
and context (classes, functions, sometimes code) from other files:
# Path: src/bepasty/constants.py
# FILENAME = 'filename'
# TYPE = 'type'
# TYPE_HINT = 'type-hint'
# LOCKED = 'locked'
# SIZE = 'size'
# COMPLETE = 'complete'
# HASH = 'hash'
# TIMESTAMP_UPLOAD = 'timestamp-upload'
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
# ID = 'id' # storage name
# FOREVER = -1
# TRANSACTION_ID = 'Transaction-ID' # keep in sync with bepasty-cli
#
# Path: src/bepasty/utils/name.py
# class ItemName(str):
# def __new__(cls, uid):
# return str(uid)
#
# @classmethod
# def create(cls, storage, length=ID_LENGTH, max_length=2 * ID_LENGTH, max_tries=10):
# """
# create a unique item name in storage, wanted name length is <length>.
#
# we try at most <max_tries> times to find a unique name of a specific length -
# if we do not succeed, we increase name length and try again.
# if we can't find a unique name even for longer lengths up to max_length,
# we'll raise RuntimeError.
# """
# name = None # avoid false alarm about reference before assignment
# while length <= max_length:
# tries = 0
# while tries < max_tries:
# name = make_id(length)
# if name not in storage:
# break
# tries += 1
# if tries < max_tries:
# # we found a name, break out of outer while also
# break
# length += 1
# if length > max_length:
# raise RuntimeError("no unique names available")
# return cls(name)
#
# Path: src/bepasty/utils/decorators.py
# def threaded(func):
# """
# decorator to run a function asynchronously (in a thread)
#
# be careful: do not access flask threadlocals in f!
# """
# def wrapper(*args, **kwargs):
# t = Thread(target=func, args=args, kwargs=kwargs)
# t.start()
# return wrapper
#
# Path: src/bepasty/utils/hashing.py
# SIZE = 1024 * 1024
# def compute_hash(data, size):
. Output only the next line. | except ImportError: |
Using the snippet: <|code_start|> raise
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
need_close = False
finally:
if need_close:
item.close()
return self.response(item, name)
class InlineView(DownloadView):
content_disposition = 'inline' # to trigger viewing in browser, for some types
class ThumbnailView(InlineView):
thumbnail_size = 192, 108
thumbnail_type = 'jpeg' # png, jpeg
thumbnail_data = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="108" height="108" viewBox="0 0 108 108" xmlns="http://www.w3.org/2000/svg">
<|code_end|>
, determine the next line of code. You have imports:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and context (class names, function names, or code) available:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | <rect x="1" y="1" width="106" height="106" fill="whitesmoke" stroke-width="2" stroke="blue" /> |
Predict the next line after this snippet: <|code_start|>
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
need_close = False
finally:
if need_close:
item.close()
return self.response(item, name)
class InlineView(DownloadView):
content_disposition = 'inline' # to trigger viewing in browser, for some types
class ThumbnailView(InlineView):
thumbnail_size = 192, 108
thumbnail_type = 'jpeg' # png, jpeg
thumbnail_data = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="108" height="108" viewBox="0 0 108 108" xmlns="http://www.w3.org/2000/svg">
<rect x="1" y="1" width="106" height="106" fill="whitesmoke" stroke-width="2" stroke="blue" />
<|code_end|>
using the current file's imports:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and any relevant context from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | <line x1="1" y1="1" x2="106" y2="106" stroke="blue" stroke-width="2" /> |
Predict the next line after this snippet: <|code_start|> dispo = self.content_disposition
if dispo != 'attachment':
# no simple download, so we must be careful about XSS
if ct.startswith("text/"):
ct = 'text/plain' # only send simple plain text
ret = Response(stream_with_context(self.stream(item, 0, item.data.size)))
ret.headers['Content-Disposition'] = '{}; filename="{}"'.format(
dispo, item.meta[FILENAME])
ret.headers['Content-Length'] = item.meta[SIZE]
ret.headers['Content-Type'] = ct
ret.headers['X-Content-Type-Options'] = 'nosniff' # yes, we really mean it
return ret
def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
<|code_end|>
using the current file's imports:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and any relevant context from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | raise Forbidden() |
Next line prediction: <|code_start|> ret = Response(stream_with_context(self.stream(item, 0, item.data.size)))
ret.headers['Content-Disposition'] = '{}; filename="{}"'.format(
dispo, item.meta[FILENAME])
ret.headers['Content-Length'] = item.meta[SIZE]
ret.headers['Content-Type'] = ct
ret.headers['X-Content-Type-Options'] = 'nosniff' # yes, we really mean it
return ret
def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
need_close = False
finally:
<|code_end|>
. Use current file imports:
(import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may)
and context including class names, function names, or small code snippets from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | if need_close: |
Given the following code snippet before the placeholder: <|code_start|>
if delete_if_lifetime_over(item, name):
raise NotFound()
need_close = False
finally:
if need_close:
item.close()
return self.response(item, name)
class InlineView(DownloadView):
content_disposition = 'inline' # to trigger viewing in browser, for some types
class ThumbnailView(InlineView):
thumbnail_size = 192, 108
thumbnail_type = 'jpeg' # png, jpeg
thumbnail_data = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="108" height="108" viewBox="0 0 108 108" xmlns="http://www.w3.org/2000/svg">
<rect x="1" y="1" width="106" height="106" fill="whitesmoke" stroke-width="2" stroke="blue" />
<line x1="1" y1="1" x2="106" y2="106" stroke="blue" stroke-width="2" />
<line x1="1" y1="106" x2="106" y2="0" stroke="blue" stroke-width="2" />
</svg>""".strip().encode()
def err_incomplete(self, item, error):
return b'', 409 # conflict
def response(self, item, name):
<|code_end|>
, predict the next line using imports from the current file:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and context including class names, function names, and sometimes code from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | if PIL is None: |
Given snippet: <|code_start|> def response(self, item, name):
if PIL is None:
# looks like PIL / Pillow is not available
return b'', 501 # not implemented
sz = item.meta[SIZE]
fn = item.meta[FILENAME]
ct = item.meta[TYPE]
if not ct.startswith("image/"):
# return a placeholder thumbnail for unsupported item types
ret = Response(self.thumbnail_data)
ret.headers['Content-Length'] = len(self.thumbnail_data)
ret.headers['Content-Type'] = 'image/svg+xml'
ret.headers['X-Content-Type-Options'] = 'nosniff' # yes, we really mean it
return ret
# compute thumbnail data "on the fly"
with BytesIO(item.data.read(sz, 0)) as img_bio, BytesIO() as thumbnail_bio:
with Image.open(img_bio) as img:
img.thumbnail(self.thumbnail_size)
img.save(thumbnail_bio, self.thumbnail_type)
thumbnail_data = thumbnail_bio.getvalue()
name, ext = os.path.splitext(fn)
thumbnail_fn = '%s-thumb.%s' % (name, self.thumbnail_type)
ret = Response(thumbnail_data)
ret.headers['Content-Disposition'] = '{}; filename="{}"'.format(
self.content_disposition, thumbnail_fn)
ret.headers['Content-Length'] = len(thumbnail_data)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and context:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
which might include code, classes, or functions. Output only the next line. | ret.headers['Content-Type'] = 'image/%s' % self.thumbnail_type |
Using the snippet: <|code_start|>
def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
need_close = False
finally:
if need_close:
item.close()
return self.response(item, name)
class InlineView(DownloadView):
<|code_end|>
, determine the next line of code. You have imports:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and context (class names, function names, or code) available:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | content_disposition = 'inline' # to trigger viewing in browser, for some types |
Given snippet: <|code_start|> if ct.startswith("text/"):
ct = 'text/plain' # only send simple plain text
ret = Response(stream_with_context(self.stream(item, 0, item.data.size)))
ret.headers['Content-Disposition'] = '{}; filename="{}"'.format(
dispo, item.meta[FILENAME])
ret.headers['Content-Length'] = item.meta[SIZE]
ret.headers['Content-Type'] = ct
ret.headers['X-Content-Type-Options'] = 'nosniff' # yes, we really mean it
return ret
def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and context:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
which might include code, classes, or functions. Output only the next line. | raise NotFound() |
Given the following code snippet before the placeholder: <|code_start|> item = current_app.storage.openwrite(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
try:
need_close = True
if not item.meta[COMPLETE]:
return self.err_incomplete(item, 'Upload incomplete. Try again later.')
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
need_close = False
finally:
if need_close:
item.close()
return self.response(item, name)
class InlineView(DownloadView):
content_disposition = 'inline' # to trigger viewing in browser, for some types
class ThumbnailView(InlineView):
thumbnail_size = 192, 108
<|code_end|>
, predict the next line using imports from the current file:
import errno
import os
import time
import PIL
from io import BytesIO
from PIL import Image
from flask import Response, current_app, render_template, stream_with_context
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from ..constants import COMPLETE, FILENAME, LOCKED, SIZE, TIMESTAMP_DOWNLOAD, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.permissions import ADMIN, READ, may
and context including class names, function names, and sometimes code from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# SIZE = 'size'
#
# TIMESTAMP_DOWNLOAD = 'timestamp-download'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# READ = 'read'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
. Output only the next line. | thumbnail_type = 'jpeg' # png, jpeg |
Predict the next line after this snippet: <|code_start|>
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
}
<|code_end|>
using the current file's imports:
import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
and any relevant context from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/http.py
# def redirect_next_referrer(endpoint, **values):
# return redirect(_redirect_target_url(request.form, True, endpoint, **values))
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# CREATE = 'create'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
#
# Path: src/bepasty/utils/upload.py
# class Upload:
# _filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
# _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
#
# @classmethod
# def filter_size(cls, i):
# """
# Filter size.
# Check for advertised size.
# """
# try:
# i = int(i)
# except (ValueError, TypeError):
# raise BadRequest(description='Size is invalid')
# if i > current_app.config['MAX_ALLOWED_FILE_SIZE']:
# raise RequestEntityTooLarge()
# return i
#
# @classmethod
# def filter_filename(cls, filename, storage_name, content_type, content_type_hint):
# """
# Filter filename.
# Only allow some basic characters and shorten to 50 characters.
# """
# # Make up filename if we don't have one
# if not filename:
# if not content_type:
# content_type = content_type_hint
# # note: stdlib mimetypes.guess_extension is total crap
# if content_type.startswith("text/"):
# ext = ".txt"
# else:
# ext = ".bin"
# filename = storage_name + ext
# return cls._filename_re.sub('', filename)[:MAX_FILENAME_LENGTH]
#
# @classmethod
# def filter_type(cls, ct, ct_hint, filename=None):
# """
# Filter Content-Type
# Only allow some basic characters and shorten to 50 characters.
#
# Return value:
# tuple[0] - content-type string
# tuple[1] - whether tuple[0] is hint or not
# True: content-type is just a hint
# False: content-type is not a hint, was specified by user
# """
# if not ct and filename:
# ct, encoding = mimetypes.guess_type(filename)
# if not ct:
# return ct_hint, True
# return cls._type_re.sub('', ct)[:50], False
#
# @classmethod
# def meta_new(cls, item, input_size, input_filename, input_type,
# input_type_hint, storage_name, maxlife_stamp=FOREVER):
# item.meta[FILENAME] = cls.filter_filename(
# input_filename, storage_name, input_type, input_type_hint
# )
# item.meta[SIZE] = cls.filter_size(input_size)
# ct, hint = cls.filter_type(input_type, input_type_hint, input_filename)
# item.meta[TYPE] = ct
# item.meta[TYPE_HINT] = hint
# item.meta[TIMESTAMP_UPLOAD] = int(time.time())
# item.meta[TIMESTAMP_DOWNLOAD] = 0
# item.meta[LOCKED] = current_app.config['UPLOAD_LOCKED']
# item.meta[COMPLETE] = False
# item.meta[HASH] = ''
# item.meta[TIMESTAMP_MAX_LIFE] = maxlife_stamp
#
# @classmethod
# def meta_complete(cls, item, file_hash):
# # update TYPE by python-magic if not decided yet
# if item.meta.pop(TYPE_HINT, False):
# if magic and current_app.config.get('USE_PYTHON_MAGIC', False):
# if item.meta[TYPE] == 'application/octet-stream':
# item.meta[TYPE] = magic.from_buffer(item.data.read(magic_bufsz, 0))
# item.meta[COMPLETE] = True
# item.meta[HASH] = file_hash
#
# @staticmethod
# def data(item, f, size_input, offset=0):
# """
# Copy data from temp file into storage.
# """
# read_length = 16 * 1024
# size_written = 0
# hasher = hash_new()
#
# while True:
# read_length = min(read_length, size_input)
# if size_input == 0:
# break
#
# buf = f.read(read_length)
# if not buf:
# # Should not happen, we already checked the size
# raise RuntimeError
#
# item.data.write(buf, offset + size_written)
# hasher.update(buf)
#
# len_buf = len(buf)
# size_written += len_buf
# size_input -= len_buf
#
# return size_written, hasher.hexdigest()
. Output only the next line. | def post(self, name): |
Predict the next line after this snippet: <|code_start|>
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
}
def post(self, name):
if not may(CREATE):
raise Forbidden()
try:
with current_app.storage.openwrite(name) as item:
if not item.meta[COMPLETE] and not may(ADMIN):
<|code_end|>
using the current file's imports:
import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
and any relevant context from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/http.py
# def redirect_next_referrer(endpoint, **values):
# return redirect(_redirect_target_url(request.form, True, endpoint, **values))
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# CREATE = 'create'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
#
# Path: src/bepasty/utils/upload.py
# class Upload:
# _filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
# _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
#
# @classmethod
# def filter_size(cls, i):
# """
# Filter size.
# Check for advertised size.
# """
# try:
# i = int(i)
# except (ValueError, TypeError):
# raise BadRequest(description='Size is invalid')
# if i > current_app.config['MAX_ALLOWED_FILE_SIZE']:
# raise RequestEntityTooLarge()
# return i
#
# @classmethod
# def filter_filename(cls, filename, storage_name, content_type, content_type_hint):
# """
# Filter filename.
# Only allow some basic characters and shorten to 50 characters.
# """
# # Make up filename if we don't have one
# if not filename:
# if not content_type:
# content_type = content_type_hint
# # note: stdlib mimetypes.guess_extension is total crap
# if content_type.startswith("text/"):
# ext = ".txt"
# else:
# ext = ".bin"
# filename = storage_name + ext
# return cls._filename_re.sub('', filename)[:MAX_FILENAME_LENGTH]
#
# @classmethod
# def filter_type(cls, ct, ct_hint, filename=None):
# """
# Filter Content-Type
# Only allow some basic characters and shorten to 50 characters.
#
# Return value:
# tuple[0] - content-type string
# tuple[1] - whether tuple[0] is hint or not
# True: content-type is just a hint
# False: content-type is not a hint, was specified by user
# """
# if not ct and filename:
# ct, encoding = mimetypes.guess_type(filename)
# if not ct:
# return ct_hint, True
# return cls._type_re.sub('', ct)[:50], False
#
# @classmethod
# def meta_new(cls, item, input_size, input_filename, input_type,
# input_type_hint, storage_name, maxlife_stamp=FOREVER):
# item.meta[FILENAME] = cls.filter_filename(
# input_filename, storage_name, input_type, input_type_hint
# )
# item.meta[SIZE] = cls.filter_size(input_size)
# ct, hint = cls.filter_type(input_type, input_type_hint, input_filename)
# item.meta[TYPE] = ct
# item.meta[TYPE_HINT] = hint
# item.meta[TIMESTAMP_UPLOAD] = int(time.time())
# item.meta[TIMESTAMP_DOWNLOAD] = 0
# item.meta[LOCKED] = current_app.config['UPLOAD_LOCKED']
# item.meta[COMPLETE] = False
# item.meta[HASH] = ''
# item.meta[TIMESTAMP_MAX_LIFE] = maxlife_stamp
#
# @classmethod
# def meta_complete(cls, item, file_hash):
# # update TYPE by python-magic if not decided yet
# if item.meta.pop(TYPE_HINT, False):
# if magic and current_app.config.get('USE_PYTHON_MAGIC', False):
# if item.meta[TYPE] == 'application/octet-stream':
# item.meta[TYPE] = magic.from_buffer(item.data.read(magic_bufsz, 0))
# item.meta[COMPLETE] = True
# item.meta[HASH] = file_hash
#
# @staticmethod
# def data(item, f, size_input, offset=0):
# """
# Copy data from temp file into storage.
# """
# read_length = 16 * 1024
# size_written = 0
# hasher = hash_new()
#
# while True:
# read_length = min(read_length, size_input)
# if size_input == 0:
# break
#
# buf = f.read(read_length)
# if not buf:
# # Should not happen, we already checked the size
# raise RuntimeError
#
# item.data.write(buf, offset + size_written)
# hasher.update(buf)
#
# len_buf = len(buf)
# size_written += len_buf
# size_input -= len_buf
#
# return size_written, hasher.hexdigest()
. Output only the next line. | error = 'Upload incomplete. Try again later.' |
Continue the code snippet: <|code_start|>
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
<|code_end|>
. Use current file imports:
import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
and context (classes, functions, or code) from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/http.py
# def redirect_next_referrer(endpoint, **values):
# return redirect(_redirect_target_url(request.form, True, endpoint, **values))
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# CREATE = 'create'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
#
# Path: src/bepasty/utils/upload.py
# class Upload:
# _filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
# _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
#
# @classmethod
# def filter_size(cls, i):
# """
# Filter size.
# Check for advertised size.
# """
# try:
# i = int(i)
# except (ValueError, TypeError):
# raise BadRequest(description='Size is invalid')
# if i > current_app.config['MAX_ALLOWED_FILE_SIZE']:
# raise RequestEntityTooLarge()
# return i
#
# @classmethod
# def filter_filename(cls, filename, storage_name, content_type, content_type_hint):
# """
# Filter filename.
# Only allow some basic characters and shorten to 50 characters.
# """
# # Make up filename if we don't have one
# if not filename:
# if not content_type:
# content_type = content_type_hint
# # note: stdlib mimetypes.guess_extension is total crap
# if content_type.startswith("text/"):
# ext = ".txt"
# else:
# ext = ".bin"
# filename = storage_name + ext
# return cls._filename_re.sub('', filename)[:MAX_FILENAME_LENGTH]
#
# @classmethod
# def filter_type(cls, ct, ct_hint, filename=None):
# """
# Filter Content-Type
# Only allow some basic characters and shorten to 50 characters.
#
# Return value:
# tuple[0] - content-type string
# tuple[1] - whether tuple[0] is hint or not
# True: content-type is just a hint
# False: content-type is not a hint, was specified by user
# """
# if not ct and filename:
# ct, encoding = mimetypes.guess_type(filename)
# if not ct:
# return ct_hint, True
# return cls._type_re.sub('', ct)[:50], False
#
# @classmethod
# def meta_new(cls, item, input_size, input_filename, input_type,
# input_type_hint, storage_name, maxlife_stamp=FOREVER):
# item.meta[FILENAME] = cls.filter_filename(
# input_filename, storage_name, input_type, input_type_hint
# )
# item.meta[SIZE] = cls.filter_size(input_size)
# ct, hint = cls.filter_type(input_type, input_type_hint, input_filename)
# item.meta[TYPE] = ct
# item.meta[TYPE_HINT] = hint
# item.meta[TIMESTAMP_UPLOAD] = int(time.time())
# item.meta[TIMESTAMP_DOWNLOAD] = 0
# item.meta[LOCKED] = current_app.config['UPLOAD_LOCKED']
# item.meta[COMPLETE] = False
# item.meta[HASH] = ''
# item.meta[TIMESTAMP_MAX_LIFE] = maxlife_stamp
#
# @classmethod
# def meta_complete(cls, item, file_hash):
# # update TYPE by python-magic if not decided yet
# if item.meta.pop(TYPE_HINT, False):
# if magic and current_app.config.get('USE_PYTHON_MAGIC', False):
# if item.meta[TYPE] == 'application/octet-stream':
# item.meta[TYPE] = magic.from_buffer(item.data.read(magic_bufsz, 0))
# item.meta[COMPLETE] = True
# item.meta[HASH] = file_hash
#
# @staticmethod
# def data(item, f, size_input, offset=0):
# """
# Copy data from temp file into storage.
# """
# read_length = 16 * 1024
# size_written = 0
# hasher = hash_new()
#
# while True:
# read_length = min(read_length, size_input)
# if size_input == 0:
# break
#
# buf = f.read(read_length)
# if not buf:
# # Should not happen, we already checked the size
# raise RuntimeError
#
# item.data.write(buf, offset + size_written)
# hasher.update(buf)
#
# len_buf = len(buf)
# size_written += len_buf
# size_input -= len_buf
#
# return size_written, hasher.hexdigest()
. Output only the next line. | } |
Predict the next line after this snippet: <|code_start|>
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
}
def post(self, name):
if not may(CREATE):
raise Forbidden()
try:
<|code_end|>
using the current file's imports:
import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
and any relevant context from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/http.py
# def redirect_next_referrer(endpoint, **values):
# return redirect(_redirect_target_url(request.form, True, endpoint, **values))
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# CREATE = 'create'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
#
# Path: src/bepasty/utils/upload.py
# class Upload:
# _filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
# _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
#
# @classmethod
# def filter_size(cls, i):
# """
# Filter size.
# Check for advertised size.
# """
# try:
# i = int(i)
# except (ValueError, TypeError):
# raise BadRequest(description='Size is invalid')
# if i > current_app.config['MAX_ALLOWED_FILE_SIZE']:
# raise RequestEntityTooLarge()
# return i
#
# @classmethod
# def filter_filename(cls, filename, storage_name, content_type, content_type_hint):
# """
# Filter filename.
# Only allow some basic characters and shorten to 50 characters.
# """
# # Make up filename if we don't have one
# if not filename:
# if not content_type:
# content_type = content_type_hint
# # note: stdlib mimetypes.guess_extension is total crap
# if content_type.startswith("text/"):
# ext = ".txt"
# else:
# ext = ".bin"
# filename = storage_name + ext
# return cls._filename_re.sub('', filename)[:MAX_FILENAME_LENGTH]
#
# @classmethod
# def filter_type(cls, ct, ct_hint, filename=None):
# """
# Filter Content-Type
# Only allow some basic characters and shorten to 50 characters.
#
# Return value:
# tuple[0] - content-type string
# tuple[1] - whether tuple[0] is hint or not
# True: content-type is just a hint
# False: content-type is not a hint, was specified by user
# """
# if not ct and filename:
# ct, encoding = mimetypes.guess_type(filename)
# if not ct:
# return ct_hint, True
# return cls._type_re.sub('', ct)[:50], False
#
# @classmethod
# def meta_new(cls, item, input_size, input_filename, input_type,
# input_type_hint, storage_name, maxlife_stamp=FOREVER):
# item.meta[FILENAME] = cls.filter_filename(
# input_filename, storage_name, input_type, input_type_hint
# )
# item.meta[SIZE] = cls.filter_size(input_size)
# ct, hint = cls.filter_type(input_type, input_type_hint, input_filename)
# item.meta[TYPE] = ct
# item.meta[TYPE_HINT] = hint
# item.meta[TIMESTAMP_UPLOAD] = int(time.time())
# item.meta[TIMESTAMP_DOWNLOAD] = 0
# item.meta[LOCKED] = current_app.config['UPLOAD_LOCKED']
# item.meta[COMPLETE] = False
# item.meta[HASH] = ''
# item.meta[TIMESTAMP_MAX_LIFE] = maxlife_stamp
#
# @classmethod
# def meta_complete(cls, item, file_hash):
# # update TYPE by python-magic if not decided yet
# if item.meta.pop(TYPE_HINT, False):
# if magic and current_app.config.get('USE_PYTHON_MAGIC', False):
# if item.meta[TYPE] == 'application/octet-stream':
# item.meta[TYPE] = magic.from_buffer(item.data.read(magic_bufsz, 0))
# item.meta[COMPLETE] = True
# item.meta[HASH] = file_hash
#
# @staticmethod
# def data(item, f, size_input, offset=0):
# """
# Copy data from temp file into storage.
# """
# read_length = 16 * 1024
# size_written = 0
# hasher = hash_new()
#
# while True:
# read_length = min(read_length, size_input)
# if size_input == 0:
# break
#
# buf = f.read(read_length)
# if not buf:
# # Should not happen, we already checked the size
# raise RuntimeError
#
# item.data.write(buf, offset + size_written)
# hasher.update(buf)
#
# len_buf = len(buf)
# size_written += len_buf
# size_input -= len_buf
#
# return size_written, hasher.hexdigest()
. Output only the next line. | with current_app.storage.openwrite(name) as item: |
Continue the code snippet: <|code_start|>
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
}
<|code_end|>
. Use current file imports:
import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
and context (classes, functions, or code) from other files:
# Path: src/bepasty/constants.py
# COMPLETE = 'complete'
#
# FILENAME = 'filename'
#
# LOCKED = 'locked'
#
# TYPE = 'type'
#
# Path: src/bepasty/utils/date_funcs.py
# def delete_if_lifetime_over(item, name):
# """
# :return: True if file was deleted
# """
# if 0 < item.meta[TIMESTAMP_MAX_LIFE] < time.time():
# try:
# current_app.storage.remove(name)
# except OSError:
# pass
# return True
# return False
#
# Path: src/bepasty/utils/http.py
# def redirect_next_referrer(endpoint, **values):
# return redirect(_redirect_target_url(request.form, True, endpoint, **values))
#
# Path: src/bepasty/utils/permissions.py
# ADMIN = 'admin'
#
# CREATE = 'create'
#
# def may(permission):
# """
# check whether the current user has the permission <permission>
# """
# return permission in flaskg.permissions
#
# Path: src/bepasty/utils/upload.py
# class Upload:
# _filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
# _type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
#
# @classmethod
# def filter_size(cls, i):
# """
# Filter size.
# Check for advertised size.
# """
# try:
# i = int(i)
# except (ValueError, TypeError):
# raise BadRequest(description='Size is invalid')
# if i > current_app.config['MAX_ALLOWED_FILE_SIZE']:
# raise RequestEntityTooLarge()
# return i
#
# @classmethod
# def filter_filename(cls, filename, storage_name, content_type, content_type_hint):
# """
# Filter filename.
# Only allow some basic characters and shorten to 50 characters.
# """
# # Make up filename if we don't have one
# if not filename:
# if not content_type:
# content_type = content_type_hint
# # note: stdlib mimetypes.guess_extension is total crap
# if content_type.startswith("text/"):
# ext = ".txt"
# else:
# ext = ".bin"
# filename = storage_name + ext
# return cls._filename_re.sub('', filename)[:MAX_FILENAME_LENGTH]
#
# @classmethod
# def filter_type(cls, ct, ct_hint, filename=None):
# """
# Filter Content-Type
# Only allow some basic characters and shorten to 50 characters.
#
# Return value:
# tuple[0] - content-type string
# tuple[1] - whether tuple[0] is hint or not
# True: content-type is just a hint
# False: content-type is not a hint, was specified by user
# """
# if not ct and filename:
# ct, encoding = mimetypes.guess_type(filename)
# if not ct:
# return ct_hint, True
# return cls._type_re.sub('', ct)[:50], False
#
# @classmethod
# def meta_new(cls, item, input_size, input_filename, input_type,
# input_type_hint, storage_name, maxlife_stamp=FOREVER):
# item.meta[FILENAME] = cls.filter_filename(
# input_filename, storage_name, input_type, input_type_hint
# )
# item.meta[SIZE] = cls.filter_size(input_size)
# ct, hint = cls.filter_type(input_type, input_type_hint, input_filename)
# item.meta[TYPE] = ct
# item.meta[TYPE_HINT] = hint
# item.meta[TIMESTAMP_UPLOAD] = int(time.time())
# item.meta[TIMESTAMP_DOWNLOAD] = 0
# item.meta[LOCKED] = current_app.config['UPLOAD_LOCKED']
# item.meta[COMPLETE] = False
# item.meta[HASH] = ''
# item.meta[TIMESTAMP_MAX_LIFE] = maxlife_stamp
#
# @classmethod
# def meta_complete(cls, item, file_hash):
# # update TYPE by python-magic if not decided yet
# if item.meta.pop(TYPE_HINT, False):
# if magic and current_app.config.get('USE_PYTHON_MAGIC', False):
# if item.meta[TYPE] == 'application/octet-stream':
# item.meta[TYPE] = magic.from_buffer(item.data.read(magic_bufsz, 0))
# item.meta[COMPLETE] = True
# item.meta[HASH] = file_hash
#
# @staticmethod
# def data(item, f, size_input, offset=0):
# """
# Copy data from temp file into storage.
# """
# read_length = 16 * 1024
# size_written = 0
# hasher = hash_new()
#
# while True:
# read_length = min(read_length, size_input)
# if size_input == 0:
# break
#
# buf = f.read(read_length)
# if not buf:
# # Should not happen, we already checked the size
# raise RuntimeError
#
# item.data.write(buf, offset + size_written)
# hasher.update(buf)
#
# len_buf = len(buf)
# size_written += len_buf
# size_input -= len_buf
#
# return size_written, hasher.hexdigest()
. Output only the next line. | def post(self, name): |
Next line prediction: <|code_start|>
def get_maxlife(data, underscore):
unit_key = 'maxlife_unit' if underscore else 'maxlife-unit'
unit_default = 'MONTHS'
unit = data.get(unit_key, unit_default).upper()
value_key = 'maxlife_value' if underscore else 'maxlife-value'
value_default = '1'
try:
value = int(data.get(value_key, value_default))
except (ValueError, TypeError):
raise BadRequest(description=f'{value_key} header is incorrect')
<|code_end|>
. Use current file imports:
(import time
from flask import current_app
from werkzeug.exceptions import BadRequest
from ..constants import FOREVER, TIMESTAMP_MAX_LIFE)
and context including class names, function names, or small code snippets from other files:
# Path: src/bepasty/constants.py
# FOREVER = -1
#
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
. Output only the next line. | try: |
Predict the next line after this snippet: <|code_start|>
def get_maxlife(data, underscore):
unit_key = 'maxlife_unit' if underscore else 'maxlife-unit'
unit_default = 'MONTHS'
<|code_end|>
using the current file's imports:
import time
from flask import current_app
from werkzeug.exceptions import BadRequest
from ..constants import FOREVER, TIMESTAMP_MAX_LIFE
and any relevant context from other files:
# Path: src/bepasty/constants.py
# FOREVER = -1
#
# TIMESTAMP_MAX_LIFE = 'timestamp-max-life'
. Output only the next line. | unit = data.get(unit_key, unit_default).upper() |
Given snippet: <|code_start|>
sensor_to_node = Table(
'sensor__sensor_to_node',
postgres_base.metadata,
Column('sensor', String, ForeignKey('sensor__sensor_metadata.name')),
Column('network', String),
Column('node', String),
ForeignKeyConstraint(
['network', 'node'],
['sensor__node_metadata.sensor_network', 'sensor__node_metadata.id']
)
)
feature_to_network = Table(
'sensor__feature_to_network',
postgres_base.metadata,
Column('feature', String, ForeignKey('sensor__feature_metadata.name')),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
from geoalchemy2 import Geometry
from sqlalchemy import BigInteger, Boolean, Column, DateTime, Float, ForeignKey, ForeignKeyConstraint, String, Table, \
func as sqla_fn
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, JSONB
from sqlalchemy.orm import relationship
from plenario.database import postgres_base, postgres_engine, postgres_session, redshift_base
and context:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
which might include code, classes, or functions. Output only the next line. | Column('network', String, ForeignKey('sensor__network_metadata.name')) |
Continue the code snippet: <|code_start|>feature_to_network = Table(
'sensor__feature_to_network',
postgres_base.metadata,
Column('feature', String, ForeignKey('sensor__feature_metadata.name')),
Column('network', String, ForeignKey('sensor__network_metadata.name'))
)
def knn(lng, lat, k, network, sensors):
"""Execute a spatial query to select k nearest neighbors given some point.
:param lng: (float) longitude
:param lat: (float) latitude
:param k: (int) number of results to return
:returns: (list) of nearest k neighbors
"""
# Convert lng-lat to geojson point
point = "'" + json.dumps({
'type': 'Point',
'coordinates': [lng, lat]
}) + "'"
# How many to limit the initial bounding box query to
k_10 = k * 10
# Based off snippet provided on pg 253 of PostGIS In Action (2nd Edition)
query = """
WITH bbox_results AS (
SELECT
node,
<|code_end|>
. Use current file imports:
import json
from geoalchemy2 import Geometry
from sqlalchemy import BigInteger, Boolean, Column, DateTime, Float, ForeignKey, ForeignKeyConstraint, String, Table, \
func as sqla_fn
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, JSONB
from sqlalchemy.orm import relationship
from plenario.database import postgres_base, postgres_engine, postgres_session, redshift_base
and context (classes, functions, or code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
. Output only the next line. | location, |
Continue the code snippet: <|code_start|>
sensor_to_node = Table(
'sensor__sensor_to_node',
postgres_base.metadata,
Column('sensor', String, ForeignKey('sensor__sensor_metadata.name')),
Column('network', String),
Column('node', String),
ForeignKeyConstraint(
<|code_end|>
. Use current file imports:
import json
from geoalchemy2 import Geometry
from sqlalchemy import BigInteger, Boolean, Column, DateTime, Float, ForeignKey, ForeignKeyConstraint, String, Table, \
func as sqla_fn
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, JSONB
from sqlalchemy.orm import relationship
from plenario.database import postgres_base, postgres_engine, postgres_session, redshift_base
and context (classes, functions, or code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
. Output only the next line. | ['network', 'node'], |
Given the following code snippet before the placeholder: <|code_start|>
sensor_to_node = Table(
'sensor__sensor_to_node',
postgres_base.metadata,
Column('sensor', String, ForeignKey('sensor__sensor_metadata.name')),
Column('network', String),
Column('node', String),
ForeignKeyConstraint(
['network', 'node'],
['sensor__node_metadata.sensor_network', 'sensor__node_metadata.id']
)
)
<|code_end|>
, predict the next line using imports from the current file:
import json
from geoalchemy2 import Geometry
from sqlalchemy import BigInteger, Boolean, Column, DateTime, Float, ForeignKey, ForeignKeyConstraint, String, Table, \
func as sqla_fn
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, JSONB
from sqlalchemy.orm import relationship
from plenario.database import postgres_base, postgres_engine, postgres_session, redshift_base
and context including class names, function names, and sometimes code from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
. Output only the next line. | feature_to_network = Table( |
Using the snippet: <|code_start|># Returns 10 rows.
FLU_FILTER_SIMPLE2 = '{"op": "eq", "col": "day", "val": "Wednesday"}'
# Returns 1 row.
FLU_FILTER_COMPOUND_AND = FLU_BASE + '{"op": "and", "val": [' + \
FLU_FILTER_SIMPLE + ', ' + \
FLU_FILTER_SIMPLE2 + ']}'
# Returns 13 rows.
FLU_FILTER_COMPOUND_OR = FLU_BASE + '{"op": "or", "val": [' + \
FLU_FILTER_SIMPLE + ', ' + \
FLU_FILTER_SIMPLE2 + ']}'
# Returns 4 rows.
FLU_FILTER_NESTED = '{"op": "and", "val": [' \
' {"op": "ge", "col": "date", "val": "2013-11-01"},' \
' {"op": "or", "val": [' + \
FLU_FILTER_SIMPLE + ', ' + \
FLU_FILTER_SIMPLE2 + \
' ]' \
' }' \
']}'
def get_escaped_geojson(fname):
pwd = os.path.dirname(os.path.realpath(__file__))
rect_path = os.path.join(pwd, '../fixtures', fname)
with open(rect_path, 'r') as rect_json:
query_rect = rect_json.read()
escaped_query_rect = urllib.parse.quote(query_rect)
return escaped_query_rect
<|code_end|>
, determine the next line of code. You have imports:
import json
import os
import urllib.request, urllib.parse, urllib.error
import csv
from io import StringIO
from tests.fixtures.base_test import BasePlenarioTest, fixtures_path
and context (class names, function names, or code) available:
# Path: tests/fixtures/base_test.py
# FIXTURE_PATH = pwd
# def ingest_point_fixture(fixture_meta, fname):
# def drop_tables(table_names):
# def __init__(self, human_name, file_name):
# def setUpClass(cls, shutdown=False):
# def ingest_shapes(cls):
# def ingest_points(cls):
# def ingest_fixture(fixture):
# def tearDownClass(cls):
# class ShapeFixture(object):
# class BasePlenarioTest(unittest.TestCase):
. Output only the next line. | def get_loop_rect(): |
Given the code snippet: <|code_start|>
class ShapeETL:
def __init__(self, meta, source_path=None):
self.source_path = source_path
self.table_name = meta.dataset_name
self.source_url = meta.source_url
self.meta = meta
def add(self):
staging_name = 'staging_{}'.format(self.table_name)
with ETLFile(self.source_path, self.source_url, interpret_as='bytes') as file_helper:
handle = open(file_helper.handle.name, "rb")
<|code_end|>
, generate the next line using the imports in this file:
import zipfile
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_engine, postgres_session
from plenario.etl.common import ETLFile, add_unique_hash
from plenario.utils.shapefile import import_shapefile
and context (functions, classes, or occasionally code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/etl/common.py
# class ETLFile(object):
# """
# Encapsulates whether a file has been downloaded temporarily
# or is coming from the local file system.
# If initialized with source_path, it opens file on local filesystem.
# If initialized with source_url, it attempts to download file.
#
# Implements context manager interface with __enter__ and __exit__.
# """
# def __init__(self, source_path=None, source_url=None, interpret_as='text'):
#
# logger.info('Begin.')
# logger.info('source_path: {}'.format(source_path))
# logger.info('source_url: {}'.format(source_url))
# logger.info('interpret_as: {}'.format(interpret_as))
# if source_path and source_url:
# raise RuntimeError('ETLFile takes exactly one of source_path and source_url. Both were given.')
#
# if not source_path and not source_url:
# raise RuntimeError('ETLFile takes exactly one of source_path and source_url. Neither were given.')
#
# self.interpret_as = interpret_as
# self.source_path = source_path
# self.source_url = source_url
# self.is_local = bool(source_path)
# self._handle = None
# logger.info('End')
#
# def __enter__(self):
# """
# Assigns an open file object to self.file_handle
# """
# logger.info('Begin.')
# if self.is_local:
# logger.debug('self.is_local: True')
# file_type = 'rb' if self.interpret_as == 'bytes' else 'r'
# self.handle = open(self.source_path, file_type)
# else:
# logger.debug('self.is_local: False')
# self._download_temp_file(self.source_url)
#
# # Return the whole ETLFile so that the `with foo as bar:` syntax looks right.
# return self
#
# # Users of the class were seeking to 0 all the time after they grabbed the handle.
# # Moved it here so clients are always pointed to 0 when they get handle
# @property
# def handle(self):
# self._handle.seek(0)
# return self._handle
#
# @handle.setter
# def handle(self, val):
# self._handle = val
#
# def __exit__(self, exc_type, exc_val, exc_tb):
# # If self.handle is to a file that was already on the file system,
# # .close() acts as we expect.
# # If self.handle is to a TemporaryFile that we downloaded for this purpose,
# # .close() also deletes it from the filesystem.
# self.handle.close()
#
# def _download_temp_file(self, url):
# """
# Download file to local data directory.
# :param url: url from where file should be downloaded
# :type url: str
# :raises: IOError
# """
#
# logger.info('Begin. (url: {})'.format(url))
# # The file might be big, so stream it in chunks.
# # I'd like to enforce a timeout, but some big datasets
# # take more than a minute to start streaming.
# # Maybe add timeout as a parameter.
# file_stream_request = requests.get(url, stream=True)
# # Raise an exception if we didn't get a 200
# file_stream_request.raise_for_status()
#
# # Make this temporary file our file handle
# self.handle = tempfile.NamedTemporaryFile()
#
# # Download and write to disk in 1MB chunks.
# for chunk in file_stream_request.iter_content(chunk_size=1024*1024):
# if chunk:
# self._handle.write(chunk)
# self._handle.flush()
# logger.info('End.')
#
# def add_unique_hash(table_name):
# """
# Adds an md5 hash column of the preexisting columns
# and removes duplicate rows from a table.
# :param table_name: Name of table to add hash to.
# """
#
# logger.info('Begin (table_name: {})'.format(table_name))
# add_hash = '''
# DROP TABLE IF EXISTS temp;
# CREATE TABLE temp AS
# SELECT DISTINCT *,
# md5(CAST(("{table_name}".*)AS text))
# AS hash FROM "{table_name}";
# DROP TABLE "{table_name}";
# ALTER TABLE temp RENAME TO "{table_name}";
# ALTER TABLE "{table_name}" ADD PRIMARY KEY (hash);
# '''.format(table_name=table_name)
#
# try:
# postgres_engine.execute(add_hash)
# except Exception as e:
# raise PlenarioETLError(repr(e) +
# '\n Failed to deduplicate with ' + add_hash)
# logger.info('End.')
#
# Path: plenario/utils/shapefile.py
# def import_shapefile(shapefile_zip, table_name):
# """Given a zipped shapefile, try to insert it into the database.
#
# :param shapefile_zip: The zipped shapefile.
# :type shapefile_zip: A Python zipfile.ZipFile object
# """
# try:
# with Shapefile(shapefile_zip) as shape:
# shape.insert_in_database(table_name)
# except ShapefileError as e:
# raise e
# except Exception as e:
# raise ShapefileError("Shapefile import failed.\n{}".format(repr(e)))
. Output only the next line. | with zipfile.ZipFile(handle) as shapefile_zip: |
Next line prediction: <|code_start|>
class ShapeETL:
def __init__(self, meta, source_path=None):
self.source_path = source_path
self.table_name = meta.dataset_name
self.source_url = meta.source_url
self.meta = meta
def add(self):
staging_name = 'staging_{}'.format(self.table_name)
with ETLFile(self.source_path, self.source_url, interpret_as='bytes') as file_helper:
handle = open(file_helper.handle.name, "rb")
with zipfile.ZipFile(handle) as shapefile_zip:
import_shapefile(shapefile_zip, staging_name)
add_unique_hash(staging_name)
<|code_end|>
. Use current file imports:
(import zipfile
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_engine, postgres_session
from plenario.etl.common import ETLFile, add_unique_hash
from plenario.utils.shapefile import import_shapefile)
and context including class names, function names, or small code snippets from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/etl/common.py
# class ETLFile(object):
# """
# Encapsulates whether a file has been downloaded temporarily
# or is coming from the local file system.
# If initialized with source_path, it opens file on local filesystem.
# If initialized with source_url, it attempts to download file.
#
# Implements context manager interface with __enter__ and __exit__.
# """
# def __init__(self, source_path=None, source_url=None, interpret_as='text'):
#
# logger.info('Begin.')
# logger.info('source_path: {}'.format(source_path))
# logger.info('source_url: {}'.format(source_url))
# logger.info('interpret_as: {}'.format(interpret_as))
# if source_path and source_url:
# raise RuntimeError('ETLFile takes exactly one of source_path and source_url. Both were given.')
#
# if not source_path and not source_url:
# raise RuntimeError('ETLFile takes exactly one of source_path and source_url. Neither were given.')
#
# self.interpret_as = interpret_as
# self.source_path = source_path
# self.source_url = source_url
# self.is_local = bool(source_path)
# self._handle = None
# logger.info('End')
#
# def __enter__(self):
# """
# Assigns an open file object to self.file_handle
# """
# logger.info('Begin.')
# if self.is_local:
# logger.debug('self.is_local: True')
# file_type = 'rb' if self.interpret_as == 'bytes' else 'r'
# self.handle = open(self.source_path, file_type)
# else:
# logger.debug('self.is_local: False')
# self._download_temp_file(self.source_url)
#
# # Return the whole ETLFile so that the `with foo as bar:` syntax looks right.
# return self
#
# # Users of the class were seeking to 0 all the time after they grabbed the handle.
# # Moved it here so clients are always pointed to 0 when they get handle
# @property
# def handle(self):
# self._handle.seek(0)
# return self._handle
#
# @handle.setter
# def handle(self, val):
# self._handle = val
#
# def __exit__(self, exc_type, exc_val, exc_tb):
# # If self.handle is to a file that was already on the file system,
# # .close() acts as we expect.
# # If self.handle is to a TemporaryFile that we downloaded for this purpose,
# # .close() also deletes it from the filesystem.
# self.handle.close()
#
# def _download_temp_file(self, url):
# """
# Download file to local data directory.
# :param url: url from where file should be downloaded
# :type url: str
# :raises: IOError
# """
#
# logger.info('Begin. (url: {})'.format(url))
# # The file might be big, so stream it in chunks.
# # I'd like to enforce a timeout, but some big datasets
# # take more than a minute to start streaming.
# # Maybe add timeout as a parameter.
# file_stream_request = requests.get(url, stream=True)
# # Raise an exception if we didn't get a 200
# file_stream_request.raise_for_status()
#
# # Make this temporary file our file handle
# self.handle = tempfile.NamedTemporaryFile()
#
# # Download and write to disk in 1MB chunks.
# for chunk in file_stream_request.iter_content(chunk_size=1024*1024):
# if chunk:
# self._handle.write(chunk)
# self._handle.flush()
# logger.info('End.')
#
# def add_unique_hash(table_name):
# """
# Adds an md5 hash column of the preexisting columns
# and removes duplicate rows from a table.
# :param table_name: Name of table to add hash to.
# """
#
# logger.info('Begin (table_name: {})'.format(table_name))
# add_hash = '''
# DROP TABLE IF EXISTS temp;
# CREATE TABLE temp AS
# SELECT DISTINCT *,
# md5(CAST(("{table_name}".*)AS text))
# AS hash FROM "{table_name}";
# DROP TABLE "{table_name}";
# ALTER TABLE temp RENAME TO "{table_name}";
# ALTER TABLE "{table_name}" ADD PRIMARY KEY (hash);
# '''.format(table_name=table_name)
#
# try:
# postgres_engine.execute(add_hash)
# except Exception as e:
# raise PlenarioETLError(repr(e) +
# '\n Failed to deduplicate with ' + add_hash)
# logger.info('End.')
#
# Path: plenario/utils/shapefile.py
# def import_shapefile(shapefile_zip, table_name):
# """Given a zipped shapefile, try to insert it into the database.
#
# :param shapefile_zip: The zipped shapefile.
# :type shapefile_zip: A Python zipfile.ZipFile object
# """
# try:
# with Shapefile(shapefile_zip) as shape:
# shape.insert_in_database(table_name)
# except ShapefileError as e:
# raise e
# except Exception as e:
# raise ShapefileError("Shapefile import failed.\n{}".format(repr(e)))
. Output only the next line. | try: |
Given the code snippet: <|code_start|>
def get_size_in_degrees(meters, latitude):
earth_circumference = 40041000.0 # meters, average circumference
degrees_per_meter = 360.0 / earth_circumference
degrees_at_equator = meters * degrees_per_meter
latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
degrees_x = degrees_at_equator * latitude_correction
<|code_end|>
, generate the next line using the imports in this file:
import csv
import math
import boto3
from collections import namedtuple
from slugify import slugify as _slugify
from sqlalchemy import Table
from plenario.settings import ADMIN_EMAILS, AWS_ACCESS_KEY, AWS_REGION_NAME, AWS_SECRET_KEY, MAIL_USERNAME
from plenario.utils.typeinference import normalize_column_type
and context (functions, classes, or occasionally code) from other files:
# Path: plenario/settings.py
# ADMIN_EMAILS = _admin_emails.split(',')
#
# AWS_ACCESS_KEY = get('AWS_ACCESS_KEY', '')
#
# AWS_REGION_NAME = get('AWS_REGION_NAME', 'us-east-1')
#
# AWS_SECRET_KEY = get('AWS_SECRET_KEY', '')
#
# MAIL_USERNAME = get('MAIL_USERNAME', '')
#
# Path: plenario/utils/typeinference.py
# def normalize_column_type(l):
# """Given a sequence of values in a column (l),
# guess its type.
#
# :param l: A column
# :return: (col_type, null_values)
# where col_type is a SQLAlchemy TypeEngine
# and null_values is a boolean
# representing whether nulls of any kind were detected.
# """
# null_values = False
#
# # Convert "NA", "N/A", etc. to null types.
# for i, x in enumerate(l):
# if x is not None and x.lower() in NULL_VALUES:
# l[i] = None
# null_values = True
#
# # Are they boolean?
# try:
# for i, x in enumerate(l):
# if x == '' or x is None:
# raise ValueError('Not boolean')
# elif x.lower() in TRUE_VALUES:
# continue
# elif x.lower() in FALSE_VALUES:
# continue
# else:
# raise ValueError('Not boolean')
#
# return Boolean, null_values
# except ValueError:
# pass
#
# # Are they integers?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# int_x = int(x.replace(',', ''))
#
# if x[0] == '0' and int(x) != 0:
# raise TypeError('Integer is padded with 0s, so treat it as a string instead.')
# if x.isspace():
# raise TypeError('Integer is nothing but spaces so falling back to string')
#
# if 9000000000000000000 > int_x > 1000000000:
# add(BigInteger)
# elif 1000000000 > int_x:
# add(Integer)
# else:
# raise ValueError
#
# if BigInteger in normal_types_set:
# return BigInteger, null_values
# else:
# return Integer, null_values
#
# except TypeError:
# pass
# except ValueError:
# pass
#
# # Are they floats?
# try:
#
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# float_x = float(x.replace(',', ''))
#
# return Float, null_values
# except ValueError:
# pass
#
# # Are they datetimes?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# ampm = False
# for i, x in enumerate(l):
# if x == '' or x is None:
# add(NoneType)
# continue
#
# d = parse(x, default=DEFAULT_DATETIME)
#
# # Is it only a time?
# if d.date() == NULL_DATE:
# add(TIME)
#
# # Is it only a date?
# elif d.time() == NULL_TIME:
# add(Date)
#
# # It must be a date and time
# else:
# add(TIMESTAMP)
#
# if 'am' in x.lower():
# ampm = True
#
# if 'pm' in x.lower():
# ampm = True
#
# normal_types_set.discard(NoneType)
#
# # If a mix of dates and datetimes, up-convert dates to datetimes
# if normal_types_set == set([TIMESTAMP, Date]):
# normal_types_set = set([TIMESTAMP])
# # Datetimes and times don't mix -- fallback to using strings
# elif normal_types_set == set([TIMESTAMP, TIME]):
# normal_types_set = set([String])
# # Dates and times don't mix -- fallback to using strings
# elif normal_types_set == set([Date, TIME]):
# normal_types_set = set([String])
# elif normal_types_set == set([TIME]) and ampm:
# normal_types_set = set([String])
#
# return normal_types_set.pop(), null_values
# except ValueError:
# pass
# except TypeError: # https://bugs.launchpad.net/dateutil/+bug/1247643
# pass
#
# # Don't know what they are, so they must just be strings
# return String, null_values
. Output only the next line. | degrees_y = degrees_at_equator |
Next line prediction: <|code_start|>
def get_size_in_degrees(meters, latitude):
earth_circumference = 40041000.0 # meters, average circumference
degrees_per_meter = 360.0 / earth_circumference
degrees_at_equator = meters * degrees_per_meter
latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
degrees_x = degrees_at_equator * latitude_correction
<|code_end|>
. Use current file imports:
(import csv
import math
import boto3
from collections import namedtuple
from slugify import slugify as _slugify
from sqlalchemy import Table
from plenario.settings import ADMIN_EMAILS, AWS_ACCESS_KEY, AWS_REGION_NAME, AWS_SECRET_KEY, MAIL_USERNAME
from plenario.utils.typeinference import normalize_column_type)
and context including class names, function names, or small code snippets from other files:
# Path: plenario/settings.py
# ADMIN_EMAILS = _admin_emails.split(',')
#
# AWS_ACCESS_KEY = get('AWS_ACCESS_KEY', '')
#
# AWS_REGION_NAME = get('AWS_REGION_NAME', 'us-east-1')
#
# AWS_SECRET_KEY = get('AWS_SECRET_KEY', '')
#
# MAIL_USERNAME = get('MAIL_USERNAME', '')
#
# Path: plenario/utils/typeinference.py
# def normalize_column_type(l):
# """Given a sequence of values in a column (l),
# guess its type.
#
# :param l: A column
# :return: (col_type, null_values)
# where col_type is a SQLAlchemy TypeEngine
# and null_values is a boolean
# representing whether nulls of any kind were detected.
# """
# null_values = False
#
# # Convert "NA", "N/A", etc. to null types.
# for i, x in enumerate(l):
# if x is not None and x.lower() in NULL_VALUES:
# l[i] = None
# null_values = True
#
# # Are they boolean?
# try:
# for i, x in enumerate(l):
# if x == '' or x is None:
# raise ValueError('Not boolean')
# elif x.lower() in TRUE_VALUES:
# continue
# elif x.lower() in FALSE_VALUES:
# continue
# else:
# raise ValueError('Not boolean')
#
# return Boolean, null_values
# except ValueError:
# pass
#
# # Are they integers?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# int_x = int(x.replace(',', ''))
#
# if x[0] == '0' and int(x) != 0:
# raise TypeError('Integer is padded with 0s, so treat it as a string instead.')
# if x.isspace():
# raise TypeError('Integer is nothing but spaces so falling back to string')
#
# if 9000000000000000000 > int_x > 1000000000:
# add(BigInteger)
# elif 1000000000 > int_x:
# add(Integer)
# else:
# raise ValueError
#
# if BigInteger in normal_types_set:
# return BigInteger, null_values
# else:
# return Integer, null_values
#
# except TypeError:
# pass
# except ValueError:
# pass
#
# # Are they floats?
# try:
#
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# float_x = float(x.replace(',', ''))
#
# return Float, null_values
# except ValueError:
# pass
#
# # Are they datetimes?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# ampm = False
# for i, x in enumerate(l):
# if x == '' or x is None:
# add(NoneType)
# continue
#
# d = parse(x, default=DEFAULT_DATETIME)
#
# # Is it only a time?
# if d.date() == NULL_DATE:
# add(TIME)
#
# # Is it only a date?
# elif d.time() == NULL_TIME:
# add(Date)
#
# # It must be a date and time
# else:
# add(TIMESTAMP)
#
# if 'am' in x.lower():
# ampm = True
#
# if 'pm' in x.lower():
# ampm = True
#
# normal_types_set.discard(NoneType)
#
# # If a mix of dates and datetimes, up-convert dates to datetimes
# if normal_types_set == set([TIMESTAMP, Date]):
# normal_types_set = set([TIMESTAMP])
# # Datetimes and times don't mix -- fallback to using strings
# elif normal_types_set == set([TIMESTAMP, TIME]):
# normal_types_set = set([String])
# # Dates and times don't mix -- fallback to using strings
# elif normal_types_set == set([Date, TIME]):
# normal_types_set = set([String])
# elif normal_types_set == set([TIME]) and ampm:
# normal_types_set = set([String])
#
# return normal_types_set.pop(), null_values
# except ValueError:
# pass
# except TypeError: # https://bugs.launchpad.net/dateutil/+bug/1247643
# pass
#
# # Don't know what they are, so they must just be strings
# return String, null_values
. Output only the next line. | degrees_y = degrees_at_equator |
Given snippet: <|code_start|>
def get_size_in_degrees(meters, latitude):
earth_circumference = 40041000.0 # meters, average circumference
degrees_per_meter = 360.0 / earth_circumference
degrees_at_equator = meters * degrees_per_meter
latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import csv
import math
import boto3
from collections import namedtuple
from slugify import slugify as _slugify
from sqlalchemy import Table
from plenario.settings import ADMIN_EMAILS, AWS_ACCESS_KEY, AWS_REGION_NAME, AWS_SECRET_KEY, MAIL_USERNAME
from plenario.utils.typeinference import normalize_column_type
and context:
# Path: plenario/settings.py
# ADMIN_EMAILS = _admin_emails.split(',')
#
# AWS_ACCESS_KEY = get('AWS_ACCESS_KEY', '')
#
# AWS_REGION_NAME = get('AWS_REGION_NAME', 'us-east-1')
#
# AWS_SECRET_KEY = get('AWS_SECRET_KEY', '')
#
# MAIL_USERNAME = get('MAIL_USERNAME', '')
#
# Path: plenario/utils/typeinference.py
# def normalize_column_type(l):
# """Given a sequence of values in a column (l),
# guess its type.
#
# :param l: A column
# :return: (col_type, null_values)
# where col_type is a SQLAlchemy TypeEngine
# and null_values is a boolean
# representing whether nulls of any kind were detected.
# """
# null_values = False
#
# # Convert "NA", "N/A", etc. to null types.
# for i, x in enumerate(l):
# if x is not None and x.lower() in NULL_VALUES:
# l[i] = None
# null_values = True
#
# # Are they boolean?
# try:
# for i, x in enumerate(l):
# if x == '' or x is None:
# raise ValueError('Not boolean')
# elif x.lower() in TRUE_VALUES:
# continue
# elif x.lower() in FALSE_VALUES:
# continue
# else:
# raise ValueError('Not boolean')
#
# return Boolean, null_values
# except ValueError:
# pass
#
# # Are they integers?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# int_x = int(x.replace(',', ''))
#
# if x[0] == '0' and int(x) != 0:
# raise TypeError('Integer is padded with 0s, so treat it as a string instead.')
# if x.isspace():
# raise TypeError('Integer is nothing but spaces so falling back to string')
#
# if 9000000000000000000 > int_x > 1000000000:
# add(BigInteger)
# elif 1000000000 > int_x:
# add(Integer)
# else:
# raise ValueError
#
# if BigInteger in normal_types_set:
# return BigInteger, null_values
# else:
# return Integer, null_values
#
# except TypeError:
# pass
# except ValueError:
# pass
#
# # Are they floats?
# try:
#
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# float_x = float(x.replace(',', ''))
#
# return Float, null_values
# except ValueError:
# pass
#
# # Are they datetimes?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# ampm = False
# for i, x in enumerate(l):
# if x == '' or x is None:
# add(NoneType)
# continue
#
# d = parse(x, default=DEFAULT_DATETIME)
#
# # Is it only a time?
# if d.date() == NULL_DATE:
# add(TIME)
#
# # Is it only a date?
# elif d.time() == NULL_TIME:
# add(Date)
#
# # It must be a date and time
# else:
# add(TIMESTAMP)
#
# if 'am' in x.lower():
# ampm = True
#
# if 'pm' in x.lower():
# ampm = True
#
# normal_types_set.discard(NoneType)
#
# # If a mix of dates and datetimes, up-convert dates to datetimes
# if normal_types_set == set([TIMESTAMP, Date]):
# normal_types_set = set([TIMESTAMP])
# # Datetimes and times don't mix -- fallback to using strings
# elif normal_types_set == set([TIMESTAMP, TIME]):
# normal_types_set = set([String])
# # Dates and times don't mix -- fallback to using strings
# elif normal_types_set == set([Date, TIME]):
# normal_types_set = set([String])
# elif normal_types_set == set([TIME]) and ampm:
# normal_types_set = set([String])
#
# return normal_types_set.pop(), null_values
# except ValueError:
# pass
# except TypeError: # https://bugs.launchpad.net/dateutil/+bug/1247643
# pass
#
# # Don't know what they are, so they must just be strings
# return String, null_values
which might include code, classes, or functions. Output only the next line. | degrees_x = degrees_at_equator * latitude_correction |
Continue the code snippet: <|code_start|>
def get_size_in_degrees(meters, latitude):
earth_circumference = 40041000.0 # meters, average circumference
degrees_per_meter = 360.0 / earth_circumference
degrees_at_equator = meters * degrees_per_meter
latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
degrees_x = degrees_at_equator * latitude_correction
degrees_y = degrees_at_equator
return degrees_x, degrees_y
ColumnInfo = namedtuple('ColumnInfo', 'name type_ has_nulls')
<|code_end|>
. Use current file imports:
import csv
import math
import boto3
from collections import namedtuple
from slugify import slugify as _slugify
from sqlalchemy import Table
from plenario.settings import ADMIN_EMAILS, AWS_ACCESS_KEY, AWS_REGION_NAME, AWS_SECRET_KEY, MAIL_USERNAME
from plenario.utils.typeinference import normalize_column_type
and context (classes, functions, or code) from other files:
# Path: plenario/settings.py
# ADMIN_EMAILS = _admin_emails.split(',')
#
# AWS_ACCESS_KEY = get('AWS_ACCESS_KEY', '')
#
# AWS_REGION_NAME = get('AWS_REGION_NAME', 'us-east-1')
#
# AWS_SECRET_KEY = get('AWS_SECRET_KEY', '')
#
# MAIL_USERNAME = get('MAIL_USERNAME', '')
#
# Path: plenario/utils/typeinference.py
# def normalize_column_type(l):
# """Given a sequence of values in a column (l),
# guess its type.
#
# :param l: A column
# :return: (col_type, null_values)
# where col_type is a SQLAlchemy TypeEngine
# and null_values is a boolean
# representing whether nulls of any kind were detected.
# """
# null_values = False
#
# # Convert "NA", "N/A", etc. to null types.
# for i, x in enumerate(l):
# if x is not None and x.lower() in NULL_VALUES:
# l[i] = None
# null_values = True
#
# # Are they boolean?
# try:
# for i, x in enumerate(l):
# if x == '' or x is None:
# raise ValueError('Not boolean')
# elif x.lower() in TRUE_VALUES:
# continue
# elif x.lower() in FALSE_VALUES:
# continue
# else:
# raise ValueError('Not boolean')
#
# return Boolean, null_values
# except ValueError:
# pass
#
# # Are they integers?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# int_x = int(x.replace(',', ''))
#
# if x[0] == '0' and int(x) != 0:
# raise TypeError('Integer is padded with 0s, so treat it as a string instead.')
# if x.isspace():
# raise TypeError('Integer is nothing but spaces so falling back to string')
#
# if 9000000000000000000 > int_x > 1000000000:
# add(BigInteger)
# elif 1000000000 > int_x:
# add(Integer)
# else:
# raise ValueError
#
# if BigInteger in normal_types_set:
# return BigInteger, null_values
# else:
# return Integer, null_values
#
# except TypeError:
# pass
# except ValueError:
# pass
#
# # Are they floats?
# try:
#
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# float_x = float(x.replace(',', ''))
#
# return Float, null_values
# except ValueError:
# pass
#
# # Are they datetimes?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# ampm = False
# for i, x in enumerate(l):
# if x == '' or x is None:
# add(NoneType)
# continue
#
# d = parse(x, default=DEFAULT_DATETIME)
#
# # Is it only a time?
# if d.date() == NULL_DATE:
# add(TIME)
#
# # Is it only a date?
# elif d.time() == NULL_TIME:
# add(Date)
#
# # It must be a date and time
# else:
# add(TIMESTAMP)
#
# if 'am' in x.lower():
# ampm = True
#
# if 'pm' in x.lower():
# ampm = True
#
# normal_types_set.discard(NoneType)
#
# # If a mix of dates and datetimes, up-convert dates to datetimes
# if normal_types_set == set([TIMESTAMP, Date]):
# normal_types_set = set([TIMESTAMP])
# # Datetimes and times don't mix -- fallback to using strings
# elif normal_types_set == set([TIMESTAMP, TIME]):
# normal_types_set = set([String])
# # Dates and times don't mix -- fallback to using strings
# elif normal_types_set == set([Date, TIME]):
# normal_types_set = set([String])
# elif normal_types_set == set([TIME]) and ampm:
# normal_types_set = set([String])
#
# return normal_types_set.pop(), null_values
# except ValueError:
# pass
# except TypeError: # https://bugs.launchpad.net/dateutil/+bug/1247643
# pass
#
# # Don't know what they are, so they must just be strings
# return String, null_values
. Output only the next line. | def infer_csv_columns(inp): |
Next line prediction: <|code_start|>
def get_size_in_degrees(meters, latitude):
earth_circumference = 40041000.0 # meters, average circumference
degrees_per_meter = 360.0 / earth_circumference
degrees_at_equator = meters * degrees_per_meter
latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
degrees_x = degrees_at_equator * latitude_correction
degrees_y = degrees_at_equator
return degrees_x, degrees_y
<|code_end|>
. Use current file imports:
(import csv
import math
import boto3
from collections import namedtuple
from slugify import slugify as _slugify
from sqlalchemy import Table
from plenario.settings import ADMIN_EMAILS, AWS_ACCESS_KEY, AWS_REGION_NAME, AWS_SECRET_KEY, MAIL_USERNAME
from plenario.utils.typeinference import normalize_column_type)
and context including class names, function names, or small code snippets from other files:
# Path: plenario/settings.py
# ADMIN_EMAILS = _admin_emails.split(',')
#
# AWS_ACCESS_KEY = get('AWS_ACCESS_KEY', '')
#
# AWS_REGION_NAME = get('AWS_REGION_NAME', 'us-east-1')
#
# AWS_SECRET_KEY = get('AWS_SECRET_KEY', '')
#
# MAIL_USERNAME = get('MAIL_USERNAME', '')
#
# Path: plenario/utils/typeinference.py
# def normalize_column_type(l):
# """Given a sequence of values in a column (l),
# guess its type.
#
# :param l: A column
# :return: (col_type, null_values)
# where col_type is a SQLAlchemy TypeEngine
# and null_values is a boolean
# representing whether nulls of any kind were detected.
# """
# null_values = False
#
# # Convert "NA", "N/A", etc. to null types.
# for i, x in enumerate(l):
# if x is not None and x.lower() in NULL_VALUES:
# l[i] = None
# null_values = True
#
# # Are they boolean?
# try:
# for i, x in enumerate(l):
# if x == '' or x is None:
# raise ValueError('Not boolean')
# elif x.lower() in TRUE_VALUES:
# continue
# elif x.lower() in FALSE_VALUES:
# continue
# else:
# raise ValueError('Not boolean')
#
# return Boolean, null_values
# except ValueError:
# pass
#
# # Are they integers?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# int_x = int(x.replace(',', ''))
#
# if x[0] == '0' and int(x) != 0:
# raise TypeError('Integer is padded with 0s, so treat it as a string instead.')
# if x.isspace():
# raise TypeError('Integer is nothing but spaces so falling back to string')
#
# if 9000000000000000000 > int_x > 1000000000:
# add(BigInteger)
# elif 1000000000 > int_x:
# add(Integer)
# else:
# raise ValueError
#
# if BigInteger in normal_types_set:
# return BigInteger, null_values
# else:
# return Integer, null_values
#
# except TypeError:
# pass
# except ValueError:
# pass
#
# # Are they floats?
# try:
#
# for i, x in enumerate(l):
# if x == '' or x is None:
# continue
#
# float_x = float(x.replace(',', ''))
#
# return Float, null_values
# except ValueError:
# pass
#
# # Are they datetimes?
# try:
# normal_types_set = set()
# add = normal_types_set.add
# ampm = False
# for i, x in enumerate(l):
# if x == '' or x is None:
# add(NoneType)
# continue
#
# d = parse(x, default=DEFAULT_DATETIME)
#
# # Is it only a time?
# if d.date() == NULL_DATE:
# add(TIME)
#
# # Is it only a date?
# elif d.time() == NULL_TIME:
# add(Date)
#
# # It must be a date and time
# else:
# add(TIMESTAMP)
#
# if 'am' in x.lower():
# ampm = True
#
# if 'pm' in x.lower():
# ampm = True
#
# normal_types_set.discard(NoneType)
#
# # If a mix of dates and datetimes, up-convert dates to datetimes
# if normal_types_set == set([TIMESTAMP, Date]):
# normal_types_set = set([TIMESTAMP])
# # Datetimes and times don't mix -- fallback to using strings
# elif normal_types_set == set([TIMESTAMP, TIME]):
# normal_types_set = set([String])
# # Dates and times don't mix -- fallback to using strings
# elif normal_types_set == set([Date, TIME]):
# normal_types_set = set([String])
# elif normal_types_set == set([TIME]) and ampm:
# normal_types_set = set([String])
#
# return normal_types_set.pop(), null_values
# except ValueError:
# pass
# except TypeError: # https://bugs.launchpad.net/dateutil/+bug/1247643
# pass
#
# # Don't know what they are, so they must just be strings
# return String, null_values
. Output only the next line. | ColumnInfo = namedtuple('ColumnInfo', 'name type_ has_nulls') |
Predict the next line for this snippet: <|code_start|>
bcrypt = Bcrypt()
class ShapeMetadata(postgres_base):
__tablename__ = 'meta_shape'
dataset_name = Column(String, primary_key=True)
human_name = Column(String, nullable=False)
source_url = Column(String)
view_url = Column(String)
date_added = Column(Date, nullable=False)
# Organization that published this dataset
attribution = Column(String)
description = Column(Text)
update_freq = Column(String(100), nullable=False)
# Who submitted this dataset?
contributor_name = Column(String)
contributor_organization = Column(String)
<|code_end|>
with the help of current file imports:
from datetime import datetime
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from sqlalchemy import Boolean, Column, Date, Integer, String, Table, Text, func, select
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.types import NullType
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import slugify
and context from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
, which may contain function names, class names, or code. Output only the next line. | contributor_email = Column(String) |
Continue the code snippet: <|code_start|>
dataset_name = Column(String, primary_key=True)
human_name = Column(String, nullable=False)
source_url = Column(String)
view_url = Column(String)
date_added = Column(Date, nullable=False)
# Organization that published this dataset
attribution = Column(String)
description = Column(Text)
update_freq = Column(String(100), nullable=False)
# Who submitted this dataset?
contributor_name = Column(String)
contributor_organization = Column(String)
contributor_email = Column(String)
# Has an admin signed off on it?
approved_status = Column(Boolean)
# We always ingest geometric data as 4326
bbox = Column(Geometry('POLYGON', srid=4326))
# How many shape records are present?
num_shapes = Column(Integer)
# False when admin first submits metadata.
# Will become true if ETL completes successfully.
is_ingested = Column(Boolean, nullable=False)
# foreign key of celery task responsible for shapefile's ingestion
celery_task_id = Column(String)
<|code_end|>
. Use current file imports:
from datetime import datetime
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from sqlalchemy import Boolean, Column, Date, Integer, String, Table, Text, func, select
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.types import NullType
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import slugify
and context (classes, functions, or code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
. Output only the next line. | @classmethod |
Continue the code snippet: <|code_start|> finally:
# Extract every column's info.
fields_list = []
for col in table.columns:
if not isinstance(col.type, NullType):
# Don't report our internal-use columns
if col.name in {'geom', 'ogc_fid', 'hash'}:
continue
field_object = {
'field_name': col.name,
'field_type': str(col.type)
}
fields_list.append(field_object)
dataset['columns'] = fields_list
return listing
@classmethod
def tablenames(cls):
return [x.dataset_name for x in postgres_session.query(ShapeMetadata.dataset_name).all()]
@staticmethod
def add_intersections_to_index(listing, geom):
# For each dataset_name in the listing,
# get a count of intersections
# and replace num_geoms
for row in listing:
name = row['dataset_name']
num_intersections_query = """
SELECT count(g.geom) as num_geoms
<|code_end|>
. Use current file imports:
from datetime import datetime
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from sqlalchemy import Boolean, Column, Date, Integer, String, Table, Text, func, select
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.types import NullType
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import slugify
and context (classes, functions, or code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
. Output only the next line. | FROM "{dataset_name}" as g |
Given the code snippet: <|code_start|>
def make_error(msg, status_code):
resp = {
'meta': {
<|code_end|>
, generate the next line using the imports in this file:
import json
from flask import make_response, request
from plenario.api.common import unknown_object_json_handler
and context (functions, classes, or occasionally code) from other files:
# Path: plenario/api/common.py
# def unknown_object_json_handler(obj):
# """When trying to dump values into JSON, sometimes the json.dumps() method
# finds values that it cannot serialize. This converts those objects from
# a non-serializable format to a serializable one.
#
# :param obj: object that json is trying to convert
# :returns: converted object
# """
# if type(obj) == Table:
# return obj.name
# elif isinstance(obj, date):
# return obj.isoformat()
# elif isinstance(obj, datetime):
# return obj.isoformat()
# elif isinstance(obj, time):
# return obj.isoformat()
# elif isinstance(obj, MetaTable):
# return obj.__tablename__
# else:
# raise ValueError('{0} cannot be parsed into JSON. \n'
# '{0} is of type: {1}.'.format(obj, type(obj)))
. Output only the next line. | }, |
Predict the next line after this snippet: <|code_start|># TODO: refactor this whole massive kludge into an `auth` package -- too much going on in here
auth = Blueprint('auth', __name__)
login_manager = LoginManager()
csrf = CSRFProtect()
<|code_end|>
using the current file's imports:
import json
from functools import wraps
from flask import Blueprint, render_template, redirect, request, url_for, flash, session as flask_session
from flask_login import LoginManager, login_user, logout_user, login_required
from flask_wtf import Form, CSRFProtect
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email
from plenario.database import postgres_session as db_session
from plenario.models import User
and any relevant context from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/User.py
# class User(postgres_base):
# __tablename__ = 'plenario_user'
#
# id = Column(String(36), default=get_uuid, primary_key=True)
# name = Column(String, nullable=False, unique=True)
# email = Column(String, nullable=False)
# _password = Column('password', String(60), nullable=False)
#
# def _get_password(self):
# return self._password
#
# def _set_password(self, value):
# self._password = bcrypt.generate_password_hash(value).decode('utf-8')
#
# password = property(_get_password, _set_password)
# password = synonym('_password', descriptor=password)
#
# def __init__(self, name, password, email):
# self.name = name
# self.password = password
# self.email = email
#
# @classmethod
# def get_by_username(cls, name):
# return postgres_session.query(cls).filter(cls.name == name).first()
#
# @classmethod
# def check_password(cls, name, value):
# user = cls.get_by_username(name)
# if not user:
# return False
# return bcrypt.check_password_hash(user.password, value)
#
# def is_authenticated(self):
# return True
#
# def is_active(self):
# return True
#
# def is_anonymous(self):
# return False
#
# def get_id(self):
# return self.id
. Output only the next line. | def check_admin_status(): |
Given snippet: <|code_start|># TODO: refactor this whole massive kludge into an `auth` package -- too much going on in here
auth = Blueprint('auth', __name__)
login_manager = LoginManager()
csrf = CSRFProtect()
def check_admin_status():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
from functools import wraps
from flask import Blueprint, render_template, redirect, request, url_for, flash, session as flask_session
from flask_login import LoginManager, login_user, logout_user, login_required
from flask_wtf import Form, CSRFProtect
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email
from plenario.database import postgres_session as db_session
from plenario.models import User
and context:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/User.py
# class User(postgres_base):
# __tablename__ = 'plenario_user'
#
# id = Column(String(36), default=get_uuid, primary_key=True)
# name = Column(String, nullable=False, unique=True)
# email = Column(String, nullable=False)
# _password = Column('password', String(60), nullable=False)
#
# def _get_password(self):
# return self._password
#
# def _set_password(self, value):
# self._password = bcrypt.generate_password_hash(value).decode('utf-8')
#
# password = property(_get_password, _set_password)
# password = synonym('_password', descriptor=password)
#
# def __init__(self, name, password, email):
# self.name = name
# self.password = password
# self.email = email
#
# @classmethod
# def get_by_username(cls, name):
# return postgres_session.query(cls).filter(cls.name == name).first()
#
# @classmethod
# def check_password(cls, name, value):
# user = cls.get_by_username(name)
# if not user:
# return False
# return bcrypt.check_password_hash(user.password, value)
#
# def is_authenticated(self):
# return True
#
# def is_active(self):
# return True
#
# def is_anonymous(self):
# return False
#
# def get_id(self):
# return self.id
which might include code, classes, or functions. Output only the next line. | def decorator(f): |
Given the following code snippet before the placeholder: <|code_start|>
session = postgres_session()
objects = []
def redshift_table_exists(table_name):
"""Make an inexpensive query to the database. It the table does not exist,
the query will cause a ProgrammingError.
:param table_name: (string) table name
:returns (bool) true if the table exists, false otherwise"""
try:
redshift_engine.execute("select '{}'::regclass".format(table_name))
return True
except ProgrammingError:
return False
class TestFeatureMeta(unittest.TestCase):
@classmethod
def setUpClass(cls):
network = NetworkMeta(name='test')
feature = FeatureMeta(
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from sqlalchemy.exc import ProgrammingError, IntegrityError, InvalidRequestError
from plenario.database import redshift_engine
from plenario.database import postgres_session as postgres_session
from plenario.models.SensorNetwork import NetworkMeta, FeatureMeta
and context including class names, function names, and sometimes code from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/SensorNetwork.py
# class NetworkMeta(postgres_base):
# __tablename__ = 'sensor__network_metadata'
#
# name = Column(String, primary_key=True)
# nodes = relationship('NodeMeta')
# info = Column(JSONB)
#
# @staticmethod
# def index():
# networks = postgres_session.query(NetworkMeta)
# return [network.name.lower() for network in networks]
#
# def __repr__(self):
# return '<Network {!r}>'.format(self.name)
#
# def tree(self):
# sensor_tree_fn = sqla_fn.network_tree(self.name)
# sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
# return sensor_tree_result_proxy.scalar()
#
# def sensors(self) -> set:
#
# keys = []
# for sensor in self.tree().values():
# keys += sensor
#
# return set(keys)
#
# def features(self):
#
# keys = []
# for sensor in self.tree().values():
# for feature in sensor.values():
# keys += feature.values()
#
# return set([k.split('.')[0] for k in keys])
#
# class FeatureMeta(postgres_base):
# __tablename__ = 'sensor__feature_metadata'
#
# name = Column(String, primary_key=True)
# networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
# observed_properties = Column(JSONB)
#
# def types(self):
# """Return a dictionary with the properties mapped to their types.
# """
# return {e['name']: e['type'] for e in self.observed_properties}
#
# def sensors(self) -> set:
# """Return the set of sensors that report on this feature.
# """
# results = set()
# for network in self.networks:
# for node in network.tree().values():
# for sensor, properties in node.items():
# if self.name in {p.split('.')[0] for p in properties}:
# results.add(sensor)
#
# return results
#
# @staticmethod
# def index(network_name=None):
# features = []
# for node in postgres_session.query(NodeMeta).all():
# if network_name is None or node.sensor_network.lower() == network_name.lower():
# for sensor in node.sensors:
# for prop in sensor.observed_properties.values():
# features.append(prop.split('.')[0].lower())
# return list(set(features))
#
# @staticmethod
# def properties_of(feature):
# query = postgres_session.query(FeatureMeta.observed_properties).filter(
# FeatureMeta.name == feature)
# return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
#
# def mirror(self):
# """Create feature tables in redshift for all the networks associated
# with this feature.
# """
# for network in self.networks:
# self._mirror(network.name)
#
# def _mirror(self, network_name: str):
# """Create a feature table in redshift for the specified network.
# """
# columns = []
# for feature in self.observed_properties:
# column_name = feature['name']
# column_type = database_types[feature['type'].upper()]
# columns.append(Column(column_name, column_type, default=None))
#
# redshift_table = Table(
# '{}__{}'.format(network_name, self.name),
# redshift_base.metadata,
# Column('node_id', String, primary_key=True),
# Column('datetime', DateTime, primary_key=True),
# Column('meta_id', Float, nullable=False),
# Column('sensor', String, nullable=False),
# *columns,
# redshift_distkey='datetime',
# redshift_sortkey='datetime'
# )
#
# redshift_table.create()
#
# def __repr__(self):
# return '<Feature {!r}>'.format(self.name)
. Output only the next line. | name='foo', |
Predict the next line after this snippet: <|code_start|>
session = postgres_session()
objects = []
def redshift_table_exists(table_name):
"""Make an inexpensive query to the database. It the table does not exist,
the query will cause a ProgrammingError.
:param table_name: (string) table name
:returns (bool) true if the table exists, false otherwise"""
try:
redshift_engine.execute("select '{}'::regclass".format(table_name))
return True
<|code_end|>
using the current file's imports:
import unittest
from sqlalchemy.exc import ProgrammingError, IntegrityError, InvalidRequestError
from plenario.database import redshift_engine
from plenario.database import postgres_session as postgres_session
from plenario.models.SensorNetwork import NetworkMeta, FeatureMeta
and any relevant context from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/SensorNetwork.py
# class NetworkMeta(postgres_base):
# __tablename__ = 'sensor__network_metadata'
#
# name = Column(String, primary_key=True)
# nodes = relationship('NodeMeta')
# info = Column(JSONB)
#
# @staticmethod
# def index():
# networks = postgres_session.query(NetworkMeta)
# return [network.name.lower() for network in networks]
#
# def __repr__(self):
# return '<Network {!r}>'.format(self.name)
#
# def tree(self):
# sensor_tree_fn = sqla_fn.network_tree(self.name)
# sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
# return sensor_tree_result_proxy.scalar()
#
# def sensors(self) -> set:
#
# keys = []
# for sensor in self.tree().values():
# keys += sensor
#
# return set(keys)
#
# def features(self):
#
# keys = []
# for sensor in self.tree().values():
# for feature in sensor.values():
# keys += feature.values()
#
# return set([k.split('.')[0] for k in keys])
#
# class FeatureMeta(postgres_base):
# __tablename__ = 'sensor__feature_metadata'
#
# name = Column(String, primary_key=True)
# networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
# observed_properties = Column(JSONB)
#
# def types(self):
# """Return a dictionary with the properties mapped to their types.
# """
# return {e['name']: e['type'] for e in self.observed_properties}
#
# def sensors(self) -> set:
# """Return the set of sensors that report on this feature.
# """
# results = set()
# for network in self.networks:
# for node in network.tree().values():
# for sensor, properties in node.items():
# if self.name in {p.split('.')[0] for p in properties}:
# results.add(sensor)
#
# return results
#
# @staticmethod
# def index(network_name=None):
# features = []
# for node in postgres_session.query(NodeMeta).all():
# if network_name is None or node.sensor_network.lower() == network_name.lower():
# for sensor in node.sensors:
# for prop in sensor.observed_properties.values():
# features.append(prop.split('.')[0].lower())
# return list(set(features))
#
# @staticmethod
# def properties_of(feature):
# query = postgres_session.query(FeatureMeta.observed_properties).filter(
# FeatureMeta.name == feature)
# return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
#
# def mirror(self):
# """Create feature tables in redshift for all the networks associated
# with this feature.
# """
# for network in self.networks:
# self._mirror(network.name)
#
# def _mirror(self, network_name: str):
# """Create a feature table in redshift for the specified network.
# """
# columns = []
# for feature in self.observed_properties:
# column_name = feature['name']
# column_type = database_types[feature['type'].upper()]
# columns.append(Column(column_name, column_type, default=None))
#
# redshift_table = Table(
# '{}__{}'.format(network_name, self.name),
# redshift_base.metadata,
# Column('node_id', String, primary_key=True),
# Column('datetime', DateTime, primary_key=True),
# Column('meta_id', Float, nullable=False),
# Column('sensor', String, nullable=False),
# *columns,
# redshift_distkey='datetime',
# redshift_sortkey='datetime'
# )
#
# redshift_table.create()
#
# def __repr__(self):
# return '<Feature {!r}>'.format(self.name)
. Output only the next line. | except ProgrammingError: |
Predict the next line for this snippet: <|code_start|>
session = postgres_session()
objects = []
def redshift_table_exists(table_name):
"""Make an inexpensive query to the database. It the table does not exist,
the query will cause a ProgrammingError.
:param table_name: (string) table name
:returns (bool) true if the table exists, false otherwise"""
try:
redshift_engine.execute("select '{}'::regclass".format(table_name))
return True
except ProgrammingError:
return False
class TestFeatureMeta(unittest.TestCase):
@classmethod
def setUpClass(cls):
network = NetworkMeta(name='test')
feature = FeatureMeta(
<|code_end|>
with the help of current file imports:
import unittest
from sqlalchemy.exc import ProgrammingError, IntegrityError, InvalidRequestError
from plenario.database import redshift_engine
from plenario.database import postgres_session as postgres_session
from plenario.models.SensorNetwork import NetworkMeta, FeatureMeta
and context from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/SensorNetwork.py
# class NetworkMeta(postgres_base):
# __tablename__ = 'sensor__network_metadata'
#
# name = Column(String, primary_key=True)
# nodes = relationship('NodeMeta')
# info = Column(JSONB)
#
# @staticmethod
# def index():
# networks = postgres_session.query(NetworkMeta)
# return [network.name.lower() for network in networks]
#
# def __repr__(self):
# return '<Network {!r}>'.format(self.name)
#
# def tree(self):
# sensor_tree_fn = sqla_fn.network_tree(self.name)
# sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
# return sensor_tree_result_proxy.scalar()
#
# def sensors(self) -> set:
#
# keys = []
# for sensor in self.tree().values():
# keys += sensor
#
# return set(keys)
#
# def features(self):
#
# keys = []
# for sensor in self.tree().values():
# for feature in sensor.values():
# keys += feature.values()
#
# return set([k.split('.')[0] for k in keys])
#
# class FeatureMeta(postgres_base):
# __tablename__ = 'sensor__feature_metadata'
#
# name = Column(String, primary_key=True)
# networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
# observed_properties = Column(JSONB)
#
# def types(self):
# """Return a dictionary with the properties mapped to their types.
# """
# return {e['name']: e['type'] for e in self.observed_properties}
#
# def sensors(self) -> set:
# """Return the set of sensors that report on this feature.
# """
# results = set()
# for network in self.networks:
# for node in network.tree().values():
# for sensor, properties in node.items():
# if self.name in {p.split('.')[0] for p in properties}:
# results.add(sensor)
#
# return results
#
# @staticmethod
# def index(network_name=None):
# features = []
# for node in postgres_session.query(NodeMeta).all():
# if network_name is None or node.sensor_network.lower() == network_name.lower():
# for sensor in node.sensors:
# for prop in sensor.observed_properties.values():
# features.append(prop.split('.')[0].lower())
# return list(set(features))
#
# @staticmethod
# def properties_of(feature):
# query = postgres_session.query(FeatureMeta.observed_properties).filter(
# FeatureMeta.name == feature)
# return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
#
# def mirror(self):
# """Create feature tables in redshift for all the networks associated
# with this feature.
# """
# for network in self.networks:
# self._mirror(network.name)
#
# def _mirror(self, network_name: str):
# """Create a feature table in redshift for the specified network.
# """
# columns = []
# for feature in self.observed_properties:
# column_name = feature['name']
# column_type = database_types[feature['type'].upper()]
# columns.append(Column(column_name, column_type, default=None))
#
# redshift_table = Table(
# '{}__{}'.format(network_name, self.name),
# redshift_base.metadata,
# Column('node_id', String, primary_key=True),
# Column('datetime', DateTime, primary_key=True),
# Column('meta_id', Float, nullable=False),
# Column('sensor', String, nullable=False),
# *columns,
# redshift_distkey='datetime',
# redshift_sortkey='datetime'
# )
#
# redshift_table.create()
#
# def __repr__(self):
# return '<Feature {!r}>'.format(self.name)
, which may contain function names, class names, or code. Output only the next line. | name='foo', |
Given the code snippet: <|code_start|>
session = postgres_session()
objects = []
def redshift_table_exists(table_name):
"""Make an inexpensive query to the database. It the table does not exist,
the query will cause a ProgrammingError.
:param table_name: (string) table name
:returns (bool) true if the table exists, false otherwise"""
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from sqlalchemy.exc import ProgrammingError, IntegrityError, InvalidRequestError
from plenario.database import redshift_engine
from plenario.database import postgres_session as postgres_session
from plenario.models.SensorNetwork import NetworkMeta, FeatureMeta
and context (functions, classes, or occasionally code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/SensorNetwork.py
# class NetworkMeta(postgres_base):
# __tablename__ = 'sensor__network_metadata'
#
# name = Column(String, primary_key=True)
# nodes = relationship('NodeMeta')
# info = Column(JSONB)
#
# @staticmethod
# def index():
# networks = postgres_session.query(NetworkMeta)
# return [network.name.lower() for network in networks]
#
# def __repr__(self):
# return '<Network {!r}>'.format(self.name)
#
# def tree(self):
# sensor_tree_fn = sqla_fn.network_tree(self.name)
# sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
# return sensor_tree_result_proxy.scalar()
#
# def sensors(self) -> set:
#
# keys = []
# for sensor in self.tree().values():
# keys += sensor
#
# return set(keys)
#
# def features(self):
#
# keys = []
# for sensor in self.tree().values():
# for feature in sensor.values():
# keys += feature.values()
#
# return set([k.split('.')[0] for k in keys])
#
# class FeatureMeta(postgres_base):
# __tablename__ = 'sensor__feature_metadata'
#
# name = Column(String, primary_key=True)
# networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
# observed_properties = Column(JSONB)
#
# def types(self):
# """Return a dictionary with the properties mapped to their types.
# """
# return {e['name']: e['type'] for e in self.observed_properties}
#
# def sensors(self) -> set:
# """Return the set of sensors that report on this feature.
# """
# results = set()
# for network in self.networks:
# for node in network.tree().values():
# for sensor, properties in node.items():
# if self.name in {p.split('.')[0] for p in properties}:
# results.add(sensor)
#
# return results
#
# @staticmethod
# def index(network_name=None):
# features = []
# for node in postgres_session.query(NodeMeta).all():
# if network_name is None or node.sensor_network.lower() == network_name.lower():
# for sensor in node.sensors:
# for prop in sensor.observed_properties.values():
# features.append(prop.split('.')[0].lower())
# return list(set(features))
#
# @staticmethod
# def properties_of(feature):
# query = postgres_session.query(FeatureMeta.observed_properties).filter(
# FeatureMeta.name == feature)
# return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
#
# def mirror(self):
# """Create feature tables in redshift for all the networks associated
# with this feature.
# """
# for network in self.networks:
# self._mirror(network.name)
#
# def _mirror(self, network_name: str):
# """Create a feature table in redshift for the specified network.
# """
# columns = []
# for feature in self.observed_properties:
# column_name = feature['name']
# column_type = database_types[feature['type'].upper()]
# columns.append(Column(column_name, column_type, default=None))
#
# redshift_table = Table(
# '{}__{}'.format(network_name, self.name),
# redshift_base.metadata,
# Column('node_id', String, primary_key=True),
# Column('datetime', DateTime, primary_key=True),
# Column('meta_id', Float, nullable=False),
# Column('sensor', String, nullable=False),
# *columns,
# redshift_distkey='datetime',
# redshift_sortkey='datetime'
# )
#
# redshift_table.create()
#
# def __repr__(self):
# return '<Feature {!r}>'.format(self.name)
. Output only the next line. | try: |
Given the following code snippet before the placeholder: <|code_start|>
class ShapefileError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
<|code_end|>
, predict the next line using imports from the current file:
import os
import shutil
import tempfile
from plenario.utils.ogr2ogr import OgrError, import_shapefile_to_table
and context including class names, function names, and sometimes code from other files:
# Path: plenario/utils/ogr2ogr.py
# class OgrError(Exception):
# def __init__(self, message):
# Exception.__init__(self, message)
# self.message = message
#
# def import_shapefile_to_table(component_path, table_name):
# """
# :param component_path: Path to unzipped shapefile components and the shared name of all components. So if folder
# contains foo.shp, foo.prj, foo.dbf, then component_path is path/to/dir/foo. foo.shp and foo.prj must be present.
# :param table_name: Name that we want table to have in the database
# """
#
# args = ['ogr2ogr',
# '-f', 'PostgreSQL', # Use the PostgreSQL driver. Documentation here: http://www.gdal.org/drv_pg.html
#
# '-lco', 'PRECISION=no', # Many .dbf files don't obey their precision headers.
# # So importing as precision-marked types like NUMERIC(width, precision) often fails.
# # Instead, import as INTEGER, VARCHAR, FLOAT8.
#
# '-nlt', 'PROMOTE_TO_MULTI', # Import all lines and polygons as multilines and multipolygons
# # We don't know if the source shapefiles will have multi or non-multi geometries,
# # so we need to import the most inclusive set of types.
# '-s_srs', component_path + '.prj', # Derive source SRID from Well Known Text in .prj
# '-t_srs', 'EPSG:4326', # Always convert to 4326
# postgres_connection_arg,
# component_path + '.shp', # Point to .shp so that ogr2ogr knows it's importing a Shapefile.
# '-nln', table_name, # (n)ew (l)ayer (n)ame. Set the name of the new table.
# '-lco', 'GEOMETRY_NAME=geom'] # Always name the geometry column 'geom'
#
# subprocess.check_output(args)
. Output only the next line. | self.message = message |
Using the snippet: <|code_start|>
class ShapefileError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
<|code_end|>
, determine the next line of code. You have imports:
import os
import shutil
import tempfile
from plenario.utils.ogr2ogr import OgrError, import_shapefile_to_table
and context (class names, function names, or code) available:
# Path: plenario/utils/ogr2ogr.py
# class OgrError(Exception):
# def __init__(self, message):
# Exception.__init__(self, message)
# self.message = message
#
# def import_shapefile_to_table(component_path, table_name):
# """
# :param component_path: Path to unzipped shapefile components and the shared name of all components. So if folder
# contains foo.shp, foo.prj, foo.dbf, then component_path is path/to/dir/foo. foo.shp and foo.prj must be present.
# :param table_name: Name that we want table to have in the database
# """
#
# args = ['ogr2ogr',
# '-f', 'PostgreSQL', # Use the PostgreSQL driver. Documentation here: http://www.gdal.org/drv_pg.html
#
# '-lco', 'PRECISION=no', # Many .dbf files don't obey their precision headers.
# # So importing as precision-marked types like NUMERIC(width, precision) often fails.
# # Instead, import as INTEGER, VARCHAR, FLOAT8.
#
# '-nlt', 'PROMOTE_TO_MULTI', # Import all lines and polygons as multilines and multipolygons
# # We don't know if the source shapefiles will have multi or non-multi geometries,
# # so we need to import the most inclusive set of types.
# '-s_srs', component_path + '.prj', # Derive source SRID from Well Known Text in .prj
# '-t_srs', 'EPSG:4326', # Always convert to 4326
# postgres_connection_arg,
# component_path + '.shp', # Point to .shp so that ogr2ogr knows it's importing a Shapefile.
# '-nln', table_name, # (n)ew (l)ayer (n)ame. Set the name of the new table.
# '-lco', 'GEOMETRY_NAME=geom'] # Always name the geometry column 'geom'
#
# subprocess.check_output(args)
. Output only the next line. | def import_shapefile(shapefile_zip, table_name): |
Next line prediction: <|code_start|> extend_existing=True
)
stations_table = Table(
'weather_stations',
postgres_base.metadata,
autoload=True,
autoload_with=engine,
extend_existing=True
)
valid_query, query_clauses, resp, status_code = make_query(weather_table,
raw_query_params)
if valid_query:
resp['meta']['status'] = 'ok'
base_query = postgres_session.query(weather_table, stations_table)
base_query = base_query.join(
stations_table,
weather_table.c.wban_code == stations_table.c.wban_code
)
for clause in query_clauses:
base_query = base_query.filter(clause)
try:
base_query = base_query.order_by(
getattr(weather_table.c, 'date').desc()
)
except AttributeError:
<|code_end|>
. Use current file imports:
(import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather)
and context including class names, function names, or small code snippets from other files:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
. Output only the next line. | base_query = base_query.order_by( |
Given the following code snippet before the placeholder: <|code_start|>
stations_table = Table(
'weather_stations',
postgres_base.metadata,
autoload=True,
autoload_with=engine,
extend_existing=True
)
valid_query, query_clauses, resp, status_code = make_query(weather_table,
raw_query_params)
if valid_query:
resp['meta']['status'] = 'ok'
base_query = postgres_session.query(weather_table, stations_table)
base_query = base_query.join(
stations_table,
weather_table.c.wban_code == stations_table.c.wban_code
)
for clause in query_clauses:
base_query = base_query.filter(clause)
try:
base_query = base_query.order_by(
getattr(weather_table.c, 'date').desc()
)
except AttributeError:
base_query = base_query.order_by(
getattr(weather_table.c, 'datetime').desc()
<|code_end|>
, predict the next line using imports from the current file:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context including class names, function names, and sometimes code from other files:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
. Output only the next line. | ) |
Given snippet: <|code_start|>
for clause in query_clauses:
print(('weather_stations(): filtering on clause', clause))
base_query = base_query.filter(clause)
values = [r for r in base_query.all()]
fieldnames = [f for f in list(stations_table.columns.keys())]
for value in values:
d = {f: getattr(value, f) for f in fieldnames}
loc = bytes(value.location.data)
d['location'] = shapely.wkb.loads(loc).__geo_interface__
resp['objects'].append(d)
resp['meta']['query'] = raw_query_params
resp = make_response(
json.dumps(resp, default=date_json_handler),
status_code
)
resp.headers['Content-Type'] = 'application/json'
return resp
@cache.cached(timeout=CACHE_TIMEOUT, key_prefix=make_cache_key)
@crossdomain(origin='*')
def weather(table):
raw_query_params = request.args.copy()
weather_table = Table(
'dat_weather_observations_{}'.format(table),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
which might include code, classes, or functions. Output only the next line. | postgres_base.metadata, |
Given snippet: <|code_start|>
valid_query, query_clauses, resp, status_code = make_query(weather_table,
raw_query_params)
if valid_query:
resp['meta']['status'] = 'ok'
base_query = postgres_session.query(weather_table, stations_table)
base_query = base_query.join(
stations_table,
weather_table.c.wban_code == stations_table.c.wban_code
)
for clause in query_clauses:
base_query = base_query.filter(clause)
try:
base_query = base_query.order_by(
getattr(weather_table.c, 'date').desc()
)
except AttributeError:
base_query = base_query.order_by(
getattr(weather_table.c, 'datetime').desc()
)
base_query = base_query.limit(RESPONSE_LIMIT)
if raw_query_params.get('offset'):
offset = raw_query_params['offset']
base_query = base_query.offset(int(offset))
values = [r for r in base_query.all()]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
which might include code, classes, or functions. Output only the next line. | weather_fields = list(weather_table.columns.keys()) |
Given snippet: <|code_start|> except AttributeError:
base_query = base_query.order_by(
getattr(weather_table.c, 'datetime').desc()
)
base_query = base_query.limit(RESPONSE_LIMIT)
if raw_query_params.get('offset'):
offset = raw_query_params['offset']
base_query = base_query.offset(int(offset))
values = [r for r in base_query.all()]
weather_fields = list(weather_table.columns.keys())
station_fields = list(stations_table.columns.keys())
weather_data = {}
station_data = {}
for value in values:
wd = {f: getattr(value, f) for f in weather_fields}
sd = {f: getattr(value, f) for f in station_fields}
if weather_data.get(value.wban_code):
weather_data[value.wban_code].append(wd)
else:
weather_data[value.wban_code] = [wd]
loc = bytes(value.location.data)
sd['location'] = shapely.wkb.loads(loc).__geo_interface__
station_data[value.wban_code] = sd
for station_id in list(weather_data.keys()):
d = {
'station_info': station_data[station_id],
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
which might include code, classes, or functions. Output only the next line. | 'observations': weather_data[station_id], |
Here is a snippet: <|code_start|> stations_table = Table(
'weather_stations',
postgres_base.metadata,
autoload=True,
autoload_with=engine,
extend_existing=True
)
valid_query, query_clauses, resp, status_code = make_query(stations_table, raw_query_params)
if valid_query:
resp['meta']['status'] = 'ok'
base_query = postgres_session.query(stations_table)
for clause in query_clauses:
print(('weather_stations(): filtering on clause', clause))
base_query = base_query.filter(clause)
values = [r for r in base_query.all()]
fieldnames = [f for f in list(stations_table.columns.keys())]
for value in values:
d = {f: getattr(value, f) for f in fieldnames}
loc = bytes(value.location.data)
d['location'] = shapely.wkb.loads(loc).__geo_interface__
resp['objects'].append(d)
resp['meta']['query'] = raw_query_params
resp = make_response(
json.dumps(resp, default=date_json_handler),
<|code_end|>
. Write the next line using the current file imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context from other files:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
, which may include functions, classes, or code. Output only the next line. | status_code |
Predict the next line for this snippet: <|code_start|> values = [r for r in base_query.all()]
weather_fields = list(weather_table.columns.keys())
station_fields = list(stations_table.columns.keys())
weather_data = {}
station_data = {}
for value in values:
wd = {f: getattr(value, f) for f in weather_fields}
sd = {f: getattr(value, f) for f in station_fields}
if weather_data.get(value.wban_code):
weather_data[value.wban_code].append(wd)
else:
weather_data[value.wban_code] = [wd]
loc = bytes(value.location.data)
sd['location'] = shapely.wkb.loads(loc).__geo_interface__
station_data[value.wban_code] = sd
for station_id in list(weather_data.keys()):
d = {
'station_info': station_data[station_id],
'observations': weather_data[station_id],
}
resp['objects'].append(d)
resp['meta']['total'] = sum([len(r['observations']) for r in resp['objects']])
resp['meta']['query'] = raw_query_params
resp = make_response(
json.dumps(resp, default=date_json_handler),
status_code
)
<|code_end|>
with the help of current file imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context from other files:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
, which may contain function names, class names, or code. Output only the next line. | resp.headers['Content-Type'] = 'application/json' |
Here is a snippet: <|code_start|> )
base_query = base_query.limit(RESPONSE_LIMIT)
if raw_query_params.get('offset'):
offset = raw_query_params['offset']
base_query = base_query.offset(int(offset))
values = [r for r in base_query.all()]
weather_fields = list(weather_table.columns.keys())
station_fields = list(stations_table.columns.keys())
weather_data = {}
station_data = {}
for value in values:
wd = {f: getattr(value, f) for f in weather_fields}
sd = {f: getattr(value, f) for f in station_fields}
if weather_data.get(value.wban_code):
weather_data[value.wban_code].append(wd)
else:
weather_data[value.wban_code] = [wd]
loc = bytes(value.location.data)
sd['location'] = shapely.wkb.loads(loc).__geo_interface__
station_data[value.wban_code] = sd
for station_id in list(weather_data.keys()):
d = {
'station_info': station_data[station_id],
'observations': weather_data[station_id],
}
resp['objects'].append(d)
<|code_end|>
. Write the next line using the current file imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context from other files:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
, which may include functions, classes, or code. Output only the next line. | resp['meta']['total'] = sum([len(r['observations']) for r in resp['objects']]) |
Based on the snippet: <|code_start|>
@cache.cached(timeout=CACHE_TIMEOUT, key_prefix=make_cache_key)
@crossdomain(origin='*')
def weather_stations():
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import shapely.geometry
import shapely.wkb
import sqlalchemy as sa
from flask import jsonify, make_response, request
from sqlalchemy import Table, func
from sqlalchemy.exc import SQLAlchemyError
from plenario.api.common import CACHE_TIMEOUT, RESPONSE_LIMIT, cache, crossdomain, date_json_handler, make_cache_key
from plenario.api.response import make_error
from plenario.database import postgres_base, postgres_engine as engine, postgres_session
from plenario.utils.helpers import get_size_in_degrees
from plenario.tasks import update_weather
and context (classes, functions, sometimes code) from other files:
# Path: plenario/api/common.py
# RESPONSE_LIMIT = 1000
# CACHE_TIMEOUT = 60 * 60 * 6
# def unknown_object_json_handler(obj):
# def date_json_handler(obj):
# def crossdomain(origin=None, methods=None, headers=None,
# max_age=21600, attach_to_all=True,
# automatic_options=True): # pragma: no cover
# def get_methods():
# def decorator(f):
# def wrapped_function(*args, **kwargs):
# def make_cache_key(*args, **kwargs):
# def make_csv(data):
# def extract_first_geometry_fragment(geojson):
# def make_fragment_str(geojson_fragment, buffer=100):
#
# Path: plenario/api/response.py
# def make_error(msg, status_code, arguments=None):
#
# if not arguments:
# arguments = request.args
#
# resp = {
# 'meta': {
# 'status': 'error',
# 'message': msg,
# 'query': arguments
# },
# 'objects': [],
# }
#
# response = jsonify(resp)
# response.status_code = status_code
# return response
#
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
. Output only the next line. | raw_query_params = request.args.copy() |
Given the following code snippet before the placeholder: <|code_start|>
logger = getLogger(__name__)
postgres_engine = create_engine(DATABASE_CONN)
postgres_session = scoped_session(sessionmaker(bind=postgres_engine))
postgres_base = declarative_base(bind=postgres_engine)
postgres_base.query = postgres_session.query_property()
redshift_engine = create_engine(REDSHIFT_CONN, max_overflow=-1)
redshift_session = scoped_session(sessionmaker(bind=redshift_engine, autocommit=True))
redshift_base = declarative_base(bind=redshift_engine)
redshift_base.query = redshift_session.query_property()
<|code_end|>
, predict the next line using imports from the current file:
import subprocess
from contextlib import contextmanager
from logging import getLogger
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from plenario.settings import DATABASE_CONN, REDSHIFT_CONN
and context including class names, function names, and sometimes code from other files:
# Path: plenario/settings.py
# DATABASE_CONN = 'postgresql://{}:{}@{}:{}/{}'.format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
#
# REDSHIFT_CONN = 'postgresql://{}:{}@{}:{}/{}'.format(RS_USER, RS_PASSWORD, RS_HOST, RS_PORT, RS_NAME)
. Output only the next line. | def create_database(bind: Engine, database: str) -> None: |
Predict the next line for this snippet: <|code_start|>
logger = getLogger(__name__)
postgres_engine = create_engine(DATABASE_CONN)
postgres_session = scoped_session(sessionmaker(bind=postgres_engine))
postgres_base = declarative_base(bind=postgres_engine)
postgres_base.query = postgres_session.query_property()
redshift_engine = create_engine(REDSHIFT_CONN, max_overflow=-1)
redshift_session = scoped_session(sessionmaker(bind=redshift_engine, autocommit=True))
<|code_end|>
with the help of current file imports:
import subprocess
from contextlib import contextmanager
from logging import getLogger
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from plenario.settings import DATABASE_CONN, REDSHIFT_CONN
and context from other files:
# Path: plenario/settings.py
# DATABASE_CONN = 'postgresql://{}:{}@{}:{}/{}'.format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
#
# REDSHIFT_CONN = 'postgresql://{}:{}@{}:{}/{}'.format(RS_USER, RS_PASSWORD, RS_HOST, RS_PORT, RS_NAME)
, which may contain function names, class names, or code. Output only the next line. | redshift_base = declarative_base(bind=redshift_engine) |
Predict the next line after this snippet: <|code_start|>
def test_hopeless_url(self):
self.assertRaises(RuntimeError, process_suggestion,
'https://www.google.com/')
class SubmitCSVTests(unittest.TestCase):
def test_socrata_url(self):
sub = process_suggestion('https://data.cityofchicago.org/'
'Health-Human-Services/'
'Flu-Shot-Clinic-Locations-2013/g5vx-5vqf')
self.assertEqual(sub.file_url,
'https://data.cityofchicago.org/api/views/'
'g5vx-5vqf/rows.csv?accessType=DOWNLOAD')
expected_names = {'Date', 'Start Time', 'End Time', 'Day', 'Event',
'Event Type', 'Address', 'City', 'State', 'Zip',
'Phone', 'Community Area Number',
'Community Area Name', 'Ward',
'Latitude', 'Longitude', 'Location'}
observed_names = {c.name for c in sub.columns}
self.assertEqual(expected_names, observed_names)
expected_attribution = 'City of Chicago'
expected_description = 'List of Chicago Department of Public Health free flu clinics offered throughout the city. For more information about the flu, go to http://bit.ly/9uNhqG.'
expected_human_name = 'Flu Shot Clinic Locations - 2013'
self.assertEqual(sub.description_meta.description, expected_description)
self.assertEqual(sub.description_meta.attribution, expected_attribution)
self.assertEqual(sub.description_meta.human_name, expected_human_name)
<|code_end|>
using the current file's imports:
import unittest
from plenario.views import process_suggestion
and any relevant context from other files:
# Path: plenario/views.py
# def process_suggestion(url, is_shapefile=False):
# _assert_reachable(url)
# if SocrataSuggestion.is_socrata_url(url):
# suggestion = SocrataSuggestion(url, is_shapefile)
# else:
# suggestion = GenericSuggestion(url, is_shapefile)
# return suggestion
. Output only the next line. | def test_non_socrata_url(self): |
Given the following code snippet before the placeholder: <|code_start|>
# from http://stackoverflow.com/questions/7490660/converting-wind-direction-in-angles-to-text-words
def degToCardinal(num):
val = int((num / 22.5) + .5)
arr = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
<|code_end|>
, predict the next line using imports from the current file:
import calendar
import csv
import operator
import os
import re
import sys
import tarfile
import zipfile
import requests
import sqlalchemy
from datetime import date, datetime, timedelta
from ftplib import FTP
from io import StringIO
from dateutil import parser, relativedelta
from geoalchemy2 import Geometry
from metar.metar import ParserError
from sqlalchemy import BigInteger, Column, Date, DateTime, Float, Integer, String, Table, and_, distinct, select, text
from sqlalchemy.dialects.postgresql import ARRAY
from plenario.database import postgres_base, postgres_engine as engine
from plenario.settings import DATA_DIR
from .weather_metar import getAllCurrentWeather, getCurrentWeather, getMetar, getMetarVals
and context including class names, function names, and sometimes code from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/settings.py
# DATA_DIR = '/tmp'
#
# Path: plenario/utils/weather_metar.py
# def getAllCurrentWeather():
# all_metar_url = 'http://aviationweather.gov/adds/dataserver_current/current/metars.cache.xml'
# return raw_metars_from_url(all_metar_url)
# # all_calls = all_callSigns()
# # all_metars = []
# # for i in range(0, len(all_calls), 1000):
# # calls_range = all_calls[i:(i+1000)]
# # metars = getCurrentWeather(call_signs=calls_range)
# # all_metars.extend(metars)
# #
# # print "getAllCurrentWeather(): total metar collection is length", len(all_metars)
#
# def getCurrentWeather(call_signs=None, wban_codes=None, all_stations=False, wban2callsigns=None):
# xml_METAR_url = 'http://aviationweather.gov/adds/dataserver_current/httpparam?datasource=metars&requesttype=retrieve&format=xml&hoursBeforeNow=1.25'
# # Example of multiple stations: https://aviationweather.gov/adds/dataserver_current/httpparam?datasource=metars&requesttype=retrieve&format=xml&hoursBeforeNow=1.25&stationString=KORD,KMDW
#
# if (all_stations == True):
# # We should grab our list from weather_stations and only ask for 100 at a time.
# # print "all_callSigns is ", all_callSigns()
# # print "len(all_callSigns) is ", len(all_callSigns())
# # XXXXXX TOOO
# pass
# elif (call_signs and wban_codes):
# print("error: define only call_signs or wban_codes and not both")
# elif (wban_codes):
# # Convert all wban_codes to call_signs
# if (wban2callsigns):
# call_signs = []
# for wban in wban_codes:
# if wban in wban2callsigns:
# call_signs.append(wban2callsigns[wban])
# else:
# call_signs = []
# for wban_code in wban_codes:
# call_sign = wban2CallSign(wban_code)
# if (call_sign):
# call_signs.append(call_sign)
#
# if (call_signs):
# # OK, we have call signs now
# xml_METAR_url += '&stationString='
# xml_METAR_url += ','.join([x.upper() for x in call_signs])
# else:
# # XXXXXX: doing all stations
# pass
#
# print(("xml_METAR_url: '%s'" % xml_METAR_url))
# return raw_metars_from_url(xml_METAR_url)
#
# def getMetar(metar_string):
# m = Metar(metar_string)
# return m
#
# def getMetarVals(metar):
# wban_code = getWban(metar)
# call_sign = metar.station_id
# datetime = metar.time
# sky_condition, sky_condition_top = getSkyCondition(metar)
# visibility = getVisibility(metar)
# weather_types = getWeatherTypes(metar)
# f = getTempFahrenheit(metar)
# dp = getDewpointFahrenheit(metar)
# wind_speed, wind_direction_int, wind_direction_cardinal, wind_gust = getWind(metar)
# pressure = getPressure(metar)
# pressure_sea_level = getPressureSeaLevel(metar)
# # XXX do snow depth ("Usually found in the 06 and 18Z observations.")
# # (XXX: snow depth not found in current metar parse, but could be wrong.)
# precip_1hr, precip_3hr, precip_6hr, precip_24hr = getPrecip(metar)
#
# # print "wban: ", wban_code
# # print "datetime: ", datetime
# # print "sky_condition: ", sky_condition
# # print "sky_condition_top: ", sky_condition_top
# # print "weather_types: ", weather_types
# # print "temp: " , f, "F"
# # print "dewpoint: ", dp, "F"
# # print "wind speed:", wind_speed, "MPH", "wind_direction: ", wind_direction_int, "wind_direction_cardinal:", wind_direction_cardinal
# # print "pressure: ", pressure, "IN"
# # print "pressure (sea_level): ", pressure_sea_level, "IN"
# # print "precip (1hr, 3hr, 6hr, 24hr):", precip_1hr, precip_3hr, precip_6hr, precip_24hr
#
# return [wban_code, call_sign, datetime, sky_condition, sky_condition_top,
# visibility, weather_types, f, dp,
# wind_speed, wind_direction_int, wind_direction_cardinal, wind_gust,
# pressure, pressure_sea_level,
# precip_1hr, precip_3hr, precip_6hr, precip_24hr]
. Output only the next line. | return arr[(val % 16)] |
Using the snippet: <|code_start|>
# from http://stackoverflow.com/questions/7490660/converting-wind-direction-in-angles-to-text-words
def degToCardinal(num):
val = int((num / 22.5) + .5)
arr = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
return arr[(val % 16)]
class WeatherError(Exception):
<|code_end|>
, determine the next line of code. You have imports:
import calendar
import csv
import operator
import os
import re
import sys
import tarfile
import zipfile
import requests
import sqlalchemy
from datetime import date, datetime, timedelta
from ftplib import FTP
from io import StringIO
from dateutil import parser, relativedelta
from geoalchemy2 import Geometry
from metar.metar import ParserError
from sqlalchemy import BigInteger, Column, Date, DateTime, Float, Integer, String, Table, and_, distinct, select, text
from sqlalchemy.dialects.postgresql import ARRAY
from plenario.database import postgres_base, postgres_engine as engine
from plenario.settings import DATA_DIR
from .weather_metar import getAllCurrentWeather, getCurrentWeather, getMetar, getMetarVals
and context (class names, function names, or code) available:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/settings.py
# DATA_DIR = '/tmp'
#
# Path: plenario/utils/weather_metar.py
# def getAllCurrentWeather():
# all_metar_url = 'http://aviationweather.gov/adds/dataserver_current/current/metars.cache.xml'
# return raw_metars_from_url(all_metar_url)
# # all_calls = all_callSigns()
# # all_metars = []
# # for i in range(0, len(all_calls), 1000):
# # calls_range = all_calls[i:(i+1000)]
# # metars = getCurrentWeather(call_signs=calls_range)
# # all_metars.extend(metars)
# #
# # print "getAllCurrentWeather(): total metar collection is length", len(all_metars)
#
# def getCurrentWeather(call_signs=None, wban_codes=None, all_stations=False, wban2callsigns=None):
# xml_METAR_url = 'http://aviationweather.gov/adds/dataserver_current/httpparam?datasource=metars&requesttype=retrieve&format=xml&hoursBeforeNow=1.25'
# # Example of multiple stations: https://aviationweather.gov/adds/dataserver_current/httpparam?datasource=metars&requesttype=retrieve&format=xml&hoursBeforeNow=1.25&stationString=KORD,KMDW
#
# if (all_stations == True):
# # We should grab our list from weather_stations and only ask for 100 at a time.
# # print "all_callSigns is ", all_callSigns()
# # print "len(all_callSigns) is ", len(all_callSigns())
# # XXXXXX TOOO
# pass
# elif (call_signs and wban_codes):
# print("error: define only call_signs or wban_codes and not both")
# elif (wban_codes):
# # Convert all wban_codes to call_signs
# if (wban2callsigns):
# call_signs = []
# for wban in wban_codes:
# if wban in wban2callsigns:
# call_signs.append(wban2callsigns[wban])
# else:
# call_signs = []
# for wban_code in wban_codes:
# call_sign = wban2CallSign(wban_code)
# if (call_sign):
# call_signs.append(call_sign)
#
# if (call_signs):
# # OK, we have call signs now
# xml_METAR_url += '&stationString='
# xml_METAR_url += ','.join([x.upper() for x in call_signs])
# else:
# # XXXXXX: doing all stations
# pass
#
# print(("xml_METAR_url: '%s'" % xml_METAR_url))
# return raw_metars_from_url(xml_METAR_url)
#
# def getMetar(metar_string):
# m = Metar(metar_string)
# return m
#
# def getMetarVals(metar):
# wban_code = getWban(metar)
# call_sign = metar.station_id
# datetime = metar.time
# sky_condition, sky_condition_top = getSkyCondition(metar)
# visibility = getVisibility(metar)
# weather_types = getWeatherTypes(metar)
# f = getTempFahrenheit(metar)
# dp = getDewpointFahrenheit(metar)
# wind_speed, wind_direction_int, wind_direction_cardinal, wind_gust = getWind(metar)
# pressure = getPressure(metar)
# pressure_sea_level = getPressureSeaLevel(metar)
# # XXX do snow depth ("Usually found in the 06 and 18Z observations.")
# # (XXX: snow depth not found in current metar parse, but could be wrong.)
# precip_1hr, precip_3hr, precip_6hr, precip_24hr = getPrecip(metar)
#
# # print "wban: ", wban_code
# # print "datetime: ", datetime
# # print "sky_condition: ", sky_condition
# # print "sky_condition_top: ", sky_condition_top
# # print "weather_types: ", weather_types
# # print "temp: " , f, "F"
# # print "dewpoint: ", dp, "F"
# # print "wind speed:", wind_speed, "MPH", "wind_direction: ", wind_direction_int, "wind_direction_cardinal:", wind_direction_cardinal
# # print "pressure: ", pressure, "IN"
# # print "pressure (sea_level): ", pressure_sea_level, "IN"
# # print "precip (1hr, 3hr, 6hr, 24hr):", precip_1hr, precip_3hr, precip_6hr, precip_24hr
#
# return [wban_code, call_sign, datetime, sky_condition, sky_condition_top,
# visibility, weather_types, f, dp,
# wind_speed, wind_direction_int, wind_direction_cardinal, wind_gust,
# pressure, pressure_sea_level,
# precip_1hr, precip_3hr, precip_6hr, precip_24hr]
. Output only the next line. | def __init__(self, message): |
Given snippet: <|code_start|>
# from http://stackoverflow.com/questions/7490660/converting-wind-direction-in-angles-to-text-words
def degToCardinal(num):
val = int((num / 22.5) + .5)
arr = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
return arr[(val % 16)]
class WeatherError(Exception):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import calendar
import csv
import operator
import os
import re
import sys
import tarfile
import zipfile
import requests
import sqlalchemy
from datetime import date, datetime, timedelta
from ftplib import FTP
from io import StringIO
from dateutil import parser, relativedelta
from geoalchemy2 import Geometry
from metar.metar import ParserError
from sqlalchemy import BigInteger, Column, Date, DateTime, Float, Integer, String, Table, and_, distinct, select, text
from sqlalchemy.dialects.postgresql import ARRAY
from plenario.database import postgres_base, postgres_engine as engine
from plenario.settings import DATA_DIR
from .weather_metar import getAllCurrentWeather, getCurrentWeather, getMetar, getMetarVals
and context:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/settings.py
# DATA_DIR = '/tmp'
#
# Path: plenario/utils/weather_metar.py
# def getAllCurrentWeather():
# all_metar_url = 'http://aviationweather.gov/adds/dataserver_current/current/metars.cache.xml'
# return raw_metars_from_url(all_metar_url)
# # all_calls = all_callSigns()
# # all_metars = []
# # for i in range(0, len(all_calls), 1000):
# # calls_range = all_calls[i:(i+1000)]
# # metars = getCurrentWeather(call_signs=calls_range)
# # all_metars.extend(metars)
# #
# # print "getAllCurrentWeather(): total metar collection is length", len(all_metars)
#
# def getCurrentWeather(call_signs=None, wban_codes=None, all_stations=False, wban2callsigns=None):
# xml_METAR_url = 'http://aviationweather.gov/adds/dataserver_current/httpparam?datasource=metars&requesttype=retrieve&format=xml&hoursBeforeNow=1.25'
# # Example of multiple stations: https://aviationweather.gov/adds/dataserver_current/httpparam?datasource=metars&requesttype=retrieve&format=xml&hoursBeforeNow=1.25&stationString=KORD,KMDW
#
# if (all_stations == True):
# # We should grab our list from weather_stations and only ask for 100 at a time.
# # print "all_callSigns is ", all_callSigns()
# # print "len(all_callSigns) is ", len(all_callSigns())
# # XXXXXX TOOO
# pass
# elif (call_signs and wban_codes):
# print("error: define only call_signs or wban_codes and not both")
# elif (wban_codes):
# # Convert all wban_codes to call_signs
# if (wban2callsigns):
# call_signs = []
# for wban in wban_codes:
# if wban in wban2callsigns:
# call_signs.append(wban2callsigns[wban])
# else:
# call_signs = []
# for wban_code in wban_codes:
# call_sign = wban2CallSign(wban_code)
# if (call_sign):
# call_signs.append(call_sign)
#
# if (call_signs):
# # OK, we have call signs now
# xml_METAR_url += '&stationString='
# xml_METAR_url += ','.join([x.upper() for x in call_signs])
# else:
# # XXXXXX: doing all stations
# pass
#
# print(("xml_METAR_url: '%s'" % xml_METAR_url))
# return raw_metars_from_url(xml_METAR_url)
#
# def getMetar(metar_string):
# m = Metar(metar_string)
# return m
#
# def getMetarVals(metar):
# wban_code = getWban(metar)
# call_sign = metar.station_id
# datetime = metar.time
# sky_condition, sky_condition_top = getSkyCondition(metar)
# visibility = getVisibility(metar)
# weather_types = getWeatherTypes(metar)
# f = getTempFahrenheit(metar)
# dp = getDewpointFahrenheit(metar)
# wind_speed, wind_direction_int, wind_direction_cardinal, wind_gust = getWind(metar)
# pressure = getPressure(metar)
# pressure_sea_level = getPressureSeaLevel(metar)
# # XXX do snow depth ("Usually found in the 06 and 18Z observations.")
# # (XXX: snow depth not found in current metar parse, but could be wrong.)
# precip_1hr, precip_3hr, precip_6hr, precip_24hr = getPrecip(metar)
#
# # print "wban: ", wban_code
# # print "datetime: ", datetime
# # print "sky_condition: ", sky_condition
# # print "sky_condition_top: ", sky_condition_top
# # print "weather_types: ", weather_types
# # print "temp: " , f, "F"
# # print "dewpoint: ", dp, "F"
# # print "wind speed:", wind_speed, "MPH", "wind_direction: ", wind_direction_int, "wind_direction_cardinal:", wind_direction_cardinal
# # print "pressure: ", pressure, "IN"
# # print "pressure (sea_level): ", pressure_sea_level, "IN"
# # print "precip (1hr, 3hr, 6hr, 24hr):", precip_1hr, precip_3hr, precip_6hr, precip_24hr
#
# return [wban_code, call_sign, datetime, sky_condition, sky_condition_top,
# visibility, weather_types, f, dp,
# wind_speed, wind_direction_int, wind_direction_cardinal, wind_gust,
# pressure, pressure_sea_level,
# precip_1hr, precip_3hr, precip_6hr, precip_24hr]
which might include code, classes, or functions. Output only the next line. | def __init__(self, message): |
Here is a snippet: <|code_start|> # limited to 50 chars elsewhere
dataset_name = Column(String(100), nullable=False)
human_name = Column(String(255), nullable=False)
description = Column(Text)
source_url = Column(String(255))
source_url_hash = Column(String(32), primary_key=True)
view_url = Column(String(255))
attribution = Column(String(255))
# Spatial and temporal boundaries of observations in this dataset
obs_from = Column(Date)
obs_to = Column(Date)
bbox = Column(Geometry('POLYGON', srid=4326))
# TODO: Add restriction list ['daily' etc.]
update_freq = Column(String(100), nullable=False)
last_update = Column(DateTime)
date_added = Column(DateTime)
# The names of our "special" fields
observed_date = Column(String, nullable=False)
latitude = Column(String)
longitude = Column(String)
location = Column(String)
# if False, then do not display without first getting administrator approval
approved_status = Column(Boolean)
contributor_name = Column(String)
contributor_organization = Column(String)
contributor_email = Column(String)
result_ids = Column(ARRAY(String))
column_names = Column(JSONB) # {'<COLUMN_NAME>': '<COLUMN_TYPE>'}
def __init__(self, url, human_name, observed_date,
<|code_end|>
. Write the next line using the current file imports:
import json
import sqlalchemy as sa
from collections import namedtuple
from datetime import datetime
from hashlib import md5
from itertools import groupby
from operator import itemgetter
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from shapely.geometry import shape
from sqlalchemy import Boolean, Column, Date, DateTime, String, Table, Text, func, select
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import get_size_in_degrees, slugify
and context from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
#
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
, which may include functions, classes, or code. Output only the next line. | approved_status=False, update_freq='yearly', |
Using the snippet: <|code_start|> description = Column(Text)
source_url = Column(String(255))
source_url_hash = Column(String(32), primary_key=True)
view_url = Column(String(255))
attribution = Column(String(255))
# Spatial and temporal boundaries of observations in this dataset
obs_from = Column(Date)
obs_to = Column(Date)
bbox = Column(Geometry('POLYGON', srid=4326))
# TODO: Add restriction list ['daily' etc.]
update_freq = Column(String(100), nullable=False)
last_update = Column(DateTime)
date_added = Column(DateTime)
# The names of our "special" fields
observed_date = Column(String, nullable=False)
latitude = Column(String)
longitude = Column(String)
location = Column(String)
# if False, then do not display without first getting administrator approval
approved_status = Column(Boolean)
contributor_name = Column(String)
contributor_organization = Column(String)
contributor_email = Column(String)
result_ids = Column(ARRAY(String))
column_names = Column(JSONB) # {'<COLUMN_NAME>': '<COLUMN_TYPE>'}
def __init__(self, url, human_name, observed_date,
approved_status=False, update_freq='yearly',
latitude=None, longitude=None, location=None,
attribution=None, description=None,
<|code_end|>
, determine the next line of code. You have imports:
import json
import sqlalchemy as sa
from collections import namedtuple
from datetime import datetime
from hashlib import md5
from itertools import groupby
from operator import itemgetter
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from shapely.geometry import shape
from sqlalchemy import Boolean, Column, Date, DateTime, String, Table, Text, func, select
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import get_size_in_degrees, slugify
and context (class names, function names, or code) available:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
#
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
. Output only the next line. | column_names=None, |
Given the code snippet: <|code_start|>
bcrypt = Bcrypt()
class MetaTable(postgres_base):
__tablename__ = 'meta_master'
# limited to 50 chars elsewhere
<|code_end|>
, generate the next line using the imports in this file:
import json
import sqlalchemy as sa
from collections import namedtuple
from datetime import datetime
from hashlib import md5
from itertools import groupby
from operator import itemgetter
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from shapely.geometry import shape
from sqlalchemy import Boolean, Column, Date, DateTime, String, Table, Text, func, select
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import get_size_in_degrees, slugify
and context (functions, classes, or occasionally code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
#
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
. Output only the next line. | dataset_name = Column(String(100), nullable=False) |
Based on the snippet: <|code_start|> __tablename__ = 'meta_master'
# limited to 50 chars elsewhere
dataset_name = Column(String(100), nullable=False)
human_name = Column(String(255), nullable=False)
description = Column(Text)
source_url = Column(String(255))
source_url_hash = Column(String(32), primary_key=True)
view_url = Column(String(255))
attribution = Column(String(255))
# Spatial and temporal boundaries of observations in this dataset
obs_from = Column(Date)
obs_to = Column(Date)
bbox = Column(Geometry('POLYGON', srid=4326))
# TODO: Add restriction list ['daily' etc.]
update_freq = Column(String(100), nullable=False)
last_update = Column(DateTime)
date_added = Column(DateTime)
# The names of our "special" fields
observed_date = Column(String, nullable=False)
latitude = Column(String)
longitude = Column(String)
location = Column(String)
# if False, then do not display without first getting administrator approval
approved_status = Column(Boolean)
contributor_name = Column(String)
contributor_organization = Column(String)
contributor_email = Column(String)
result_ids = Column(ARRAY(String))
column_names = Column(JSONB) # {'<COLUMN_NAME>': '<COLUMN_TYPE>'}
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import sqlalchemy as sa
from collections import namedtuple
from datetime import datetime
from hashlib import md5
from itertools import groupby
from operator import itemgetter
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from shapely.geometry import shape
from sqlalchemy import Boolean, Column, Date, DateTime, String, Table, Text, func, select
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import get_size_in_degrees, slugify
and context (classes, functions, sometimes code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/utils/helpers.py
# def get_size_in_degrees(meters, latitude):
# earth_circumference = 40041000.0 # meters, average circumference
# degrees_per_meter = 360.0 / earth_circumference
#
# degrees_at_equator = meters * degrees_per_meter
#
# latitude_correction = 1.0 / math.cos(latitude * (math.pi / 180.0))
#
# degrees_x = degrees_at_equator * latitude_correction
# degrees_y = degrees_at_equator
#
# return degrees_x, degrees_y
#
# def slugify(text: str, delimiter: str = '_') -> str:
# return _slugify(text, separator=delimiter)
. Output only the next line. | def __init__(self, url, human_name, observed_date, |
Given the following code snippet before the placeholder: <|code_start|>#!/usr/bin/env python
"""Script for migrating the contents of celery_taskmeta into etl_task."""
engine = create_engine(DATABASE_CONN)
def main():
rp = engine.execute("""
<|code_end|>
, predict the next line using imports from the current file:
import traceback
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.settings import DATABASE_CONN
and context including class names, function names, and sometimes code from other files:
# Path: plenario/settings.py
# DATABASE_CONN = 'postgresql://{}:{}@{}:{}/{}'.format(DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
. Output only the next line. | select * from meta_shape as ms natural join celery_taskmeta as ct |
Next line prediction: <|code_start|> name="sensor_03",
observed_properties={"temperature": "temperature.temperature"}
)
node = NodeMeta(
id="test_node",
address='Nichols Bridgeway, Chicago, IL 60601, USA',
sensor_network="test_network",
sensors=[sensor_01, sensor_02, sensor_03],
location="0101000020E6100000A4A7C821E2E755C07C48F8DEDFF04440",
)
node_2 = NodeMeta(
id="node_2",
sensor_network="test_network",
sensors=[sensor_01, sensor_02],
location="0101000020E6100000A4A7C821E2E755C07C48F8DEDFF04440",
)
network = NetworkMeta(
name="test_network",
nodes=[node],
)
network_02 = NetworkMeta(
name="test_network_other",
)
feature_01 = FeatureMeta(
name="temperature",
<|code_end|>
. Use current file imports:
(import os
import signal
import subprocess
from datetime import datetime
from random import randint, random
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.database import psql
from plenario.settings import DB_USER, DB_PASSWORD, DB_HOST, DB_NAME, DB_PORT
from plenario.models.SensorNetwork import *)
and context including class names, function names, or small code snippets from other files:
# Path: plenario/database.py
# def psql(path: str) -> None:
# """Use psql to run a file at some path.
# """
# logger.info('[plenario] Psql file %s' % path)
# command = 'psql {} -f {}'.format(DATABASE_CONN, path)
# subprocess.check_call(command, shell=True)
#
# Path: plenario/settings.py
# DB_USER = get('POSTGRES_USER', 'postgres')
#
# DB_PASSWORD = get('POSTGRES_PASSWORD', 'password')
#
# DB_HOST = get('POSTGRES_HOST', 'localhost')
#
# DB_NAME = get('POSTGRES_DB', 'plenario_test')
#
# DB_PORT = get('POSTGRES_PORT', 5432)
. Output only the next line. | observed_properties=[{"type": "float", "name": "temperature"}], |
Here is a snippet: <|code_start|>
class Fixtures:
def _run_with_connection(self, query):
conn = self.engine.connect()
try:
print(query)
conn.execute("commit")
conn.execute(query)
except ProgrammingError as err:
print(str(err))
finally:
conn.close()
def _create_foi_table(self, table_schema):
"""A postgres friendly version of the redshift method that shares
the same name.
:param table_schema: (dict) {"name": "<name>", "properties": [ ... ]}
:returns: None"""
print("create table {}".format(table_schema["name"]))
create_table = ("CREATE TABLE {} ("
"\"node_id\" VARCHAR NOT NULL,"
"datetime TIMESTAMP WITHOUT TIME ZONE NOT NULL,"
<|code_end|>
. Write the next line using the current file imports:
import os
import signal
import subprocess
from datetime import datetime
from random import randint, random
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.database import psql
from plenario.settings import DB_USER, DB_PASSWORD, DB_HOST, DB_NAME, DB_PORT
from plenario.models.SensorNetwork import *
and context from other files:
# Path: plenario/database.py
# def psql(path: str) -> None:
# """Use psql to run a file at some path.
# """
# logger.info('[plenario] Psql file %s' % path)
# command = 'psql {} -f {}'.format(DATABASE_CONN, path)
# subprocess.check_call(command, shell=True)
#
# Path: plenario/settings.py
# DB_USER = get('POSTGRES_USER', 'postgres')
#
# DB_PASSWORD = get('POSTGRES_PASSWORD', 'password')
#
# DB_HOST = get('POSTGRES_HOST', 'localhost')
#
# DB_NAME = get('POSTGRES_DB', 'plenario_test')
#
# DB_PORT = get('POSTGRES_PORT', 5432)
, which may include functions, classes, or code. Output only the next line. | "\"meta_id\" DOUBLE PRECISION NOT NULL," |
Using the snippet: <|code_start|> print(query)
conn.execute("commit")
conn.execute(query)
except ProgrammingError as err:
print(str(err))
finally:
conn.close()
def _create_foi_table(self, table_schema):
"""A postgres friendly version of the redshift method that shares
the same name.
:param table_schema: (dict) {"name": "<name>", "properties": [ ... ]}
:returns: None"""
print("create table {}".format(table_schema["name"]))
create_table = ("CREATE TABLE {} ("
"\"node_id\" VARCHAR NOT NULL,"
"datetime TIMESTAMP WITHOUT TIME ZONE NOT NULL,"
"\"meta_id\" DOUBLE PRECISION NOT NULL,"
"\"sensor\" VARCHAR NOT NULL,"
"").format(table_schema["name"])
for i, prop in enumerate(table_schema["properties"]):
create_table += '"{}" {} '.format(prop['name'], prop['type'])
create_table += "," if i != len(table_schema["properties"]) - 1 else ""
create_table += ')'
self.rs_engine.execute(create_table)
<|code_end|>
, determine the next line of code. You have imports:
import os
import signal
import subprocess
from datetime import datetime
from random import randint, random
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.database import psql
from plenario.settings import DB_USER, DB_PASSWORD, DB_HOST, DB_NAME, DB_PORT
from plenario.models.SensorNetwork import *
and context (class names, function names, or code) available:
# Path: plenario/database.py
# def psql(path: str) -> None:
# """Use psql to run a file at some path.
# """
# logger.info('[plenario] Psql file %s' % path)
# command = 'psql {} -f {}'.format(DATABASE_CONN, path)
# subprocess.check_call(command, shell=True)
#
# Path: plenario/settings.py
# DB_USER = get('POSTGRES_USER', 'postgres')
#
# DB_PASSWORD = get('POSTGRES_PASSWORD', 'password')
#
# DB_HOST = get('POSTGRES_HOST', 'localhost')
#
# DB_NAME = get('POSTGRES_DB', 'plenario_test')
#
# DB_PORT = get('POSTGRES_PORT', 5432)
. Output only the next line. | def __init__(self): |
Using the snippet: <|code_start|> name="sensor_03",
observed_properties={"temperature": "temperature.temperature"}
)
node = NodeMeta(
id="test_node",
address='Nichols Bridgeway, Chicago, IL 60601, USA',
sensor_network="test_network",
sensors=[sensor_01, sensor_02, sensor_03],
location="0101000020E6100000A4A7C821E2E755C07C48F8DEDFF04440",
)
node_2 = NodeMeta(
id="node_2",
sensor_network="test_network",
sensors=[sensor_01, sensor_02],
location="0101000020E6100000A4A7C821E2E755C07C48F8DEDFF04440",
)
network = NetworkMeta(
name="test_network",
nodes=[node],
)
network_02 = NetworkMeta(
name="test_network_other",
)
feature_01 = FeatureMeta(
name="temperature",
<|code_end|>
, determine the next line of code. You have imports:
import os
import signal
import subprocess
from datetime import datetime
from random import randint, random
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.database import psql
from plenario.settings import DB_USER, DB_PASSWORD, DB_HOST, DB_NAME, DB_PORT
from plenario.models.SensorNetwork import *
and context (class names, function names, or code) available:
# Path: plenario/database.py
# def psql(path: str) -> None:
# """Use psql to run a file at some path.
# """
# logger.info('[plenario] Psql file %s' % path)
# command = 'psql {} -f {}'.format(DATABASE_CONN, path)
# subprocess.check_call(command, shell=True)
#
# Path: plenario/settings.py
# DB_USER = get('POSTGRES_USER', 'postgres')
#
# DB_PASSWORD = get('POSTGRES_PASSWORD', 'password')
#
# DB_HOST = get('POSTGRES_HOST', 'localhost')
#
# DB_NAME = get('POSTGRES_DB', 'plenario_test')
#
# DB_PORT = get('POSTGRES_PORT', 5432)
. Output only the next line. | observed_properties=[{"type": "float", "name": "temperature"}], |
Based on the snippet: <|code_start|> node = NodeMeta(
id="test_node",
address='Nichols Bridgeway, Chicago, IL 60601, USA',
sensor_network="test_network",
sensors=[sensor_01, sensor_02, sensor_03],
location="0101000020E6100000A4A7C821E2E755C07C48F8DEDFF04440",
)
node_2 = NodeMeta(
id="node_2",
sensor_network="test_network",
sensors=[sensor_01, sensor_02],
location="0101000020E6100000A4A7C821E2E755C07C48F8DEDFF04440",
)
network = NetworkMeta(
name="test_network",
nodes=[node],
)
network_02 = NetworkMeta(
name="test_network_other",
)
feature_01 = FeatureMeta(
name="temperature",
observed_properties=[{"type": "float", "name": "temperature"}],
networks=[network]
)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import signal
import subprocess
from datetime import datetime
from random import randint, random
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, ProgrammingError
from plenario.database import psql
from plenario.settings import DB_USER, DB_PASSWORD, DB_HOST, DB_NAME, DB_PORT
from plenario.models.SensorNetwork import *
and context (classes, functions, sometimes code) from other files:
# Path: plenario/database.py
# def psql(path: str) -> None:
# """Use psql to run a file at some path.
# """
# logger.info('[plenario] Psql file %s' % path)
# command = 'psql {} -f {}'.format(DATABASE_CONN, path)
# subprocess.check_call(command, shell=True)
#
# Path: plenario/settings.py
# DB_USER = get('POSTGRES_USER', 'postgres')
#
# DB_PASSWORD = get('POSTGRES_PASSWORD', 'password')
#
# DB_HOST = get('POSTGRES_HOST', 'localhost')
#
# DB_NAME = get('POSTGRES_DB', 'plenario_test')
#
# DB_PORT = get('POSTGRES_PORT', 5432)
. Output only the next line. | feature_02 = FeatureMeta( |
Here is a snippet: <|code_start|>
postgres_connection_arg = 'PG:host={} user={} port={} dbname={} password={}'.format(
DB_HOST, DB_USER, DB_PORT, DB_NAME, DB_PASSWORD)
class OgrError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
<|code_end|>
. Write the next line using the current file imports:
import os
import shutil
import subprocess
import tempfile
import zipfile
from plenario.settings import DB_HOST, DB_NAME, DB_PASSWORD, DB_PORT, DB_USER
and context from other files:
# Path: plenario/settings.py
# DB_HOST = get('POSTGRES_HOST', 'localhost')
#
# DB_NAME = get('POSTGRES_DB', 'plenario_test')
#
# DB_PASSWORD = get('POSTGRES_PASSWORD', 'password')
#
# DB_PORT = get('POSTGRES_PORT', 5432)
#
# DB_USER = get('POSTGRES_USER', 'postgres')
, which may include functions, classes, or code. Output only the next line. | self.message = message |
Based on the snippet: <|code_start|>
def validate_sensor_properties(observed_properties):
if not observed_properties:
raise ValidationError('No observed properties were provided!')
features = defaultdict(list)
for feature in postgres_session.query(FeatureMeta).all():
for property_dict in feature.observed_properties:
features[feature.name].append(property_dict['name'])
for feature_property in list(observed_properties.values()):
try:
feat, prop = feature_property.split('.')
except ValueError:
raise ValidationError('Feature specified without property: {}'
.format(feature_property))
if feat not in features:
raise ValidationError('Bad FOI name: {!r}'.format(feat))
if prop not in features[feat]:
raise ValidationError('Bad property name: {!r}'.format(prop))
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import defaultdict
from wtforms import ValidationError
from plenario.database import postgres_session
from plenario.models.SensorNetwork import FeatureMeta, NetworkMeta
and context (classes, functions, sometimes code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/SensorNetwork.py
# class FeatureMeta(postgres_base):
# __tablename__ = 'sensor__feature_metadata'
#
# name = Column(String, primary_key=True)
# networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
# observed_properties = Column(JSONB)
#
# def types(self):
# """Return a dictionary with the properties mapped to their types.
# """
# return {e['name']: e['type'] for e in self.observed_properties}
#
# def sensors(self) -> set:
# """Return the set of sensors that report on this feature.
# """
# results = set()
# for network in self.networks:
# for node in network.tree().values():
# for sensor, properties in node.items():
# if self.name in {p.split('.')[0] for p in properties}:
# results.add(sensor)
#
# return results
#
# @staticmethod
# def index(network_name=None):
# features = []
# for node in postgres_session.query(NodeMeta).all():
# if network_name is None or node.sensor_network.lower() == network_name.lower():
# for sensor in node.sensors:
# for prop in sensor.observed_properties.values():
# features.append(prop.split('.')[0].lower())
# return list(set(features))
#
# @staticmethod
# def properties_of(feature):
# query = postgres_session.query(FeatureMeta.observed_properties).filter(
# FeatureMeta.name == feature)
# return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
#
# def mirror(self):
# """Create feature tables in redshift for all the networks associated
# with this feature.
# """
# for network in self.networks:
# self._mirror(network.name)
#
# def _mirror(self, network_name: str):
# """Create a feature table in redshift for the specified network.
# """
# columns = []
# for feature in self.observed_properties:
# column_name = feature['name']
# column_type = database_types[feature['type'].upper()]
# columns.append(Column(column_name, column_type, default=None))
#
# redshift_table = Table(
# '{}__{}'.format(network_name, self.name),
# redshift_base.metadata,
# Column('node_id', String, primary_key=True),
# Column('datetime', DateTime, primary_key=True),
# Column('meta_id', Float, nullable=False),
# Column('sensor', String, nullable=False),
# *columns,
# redshift_distkey='datetime',
# redshift_sortkey='datetime'
# )
#
# redshift_table.create()
#
# def __repr__(self):
# return '<Feature {!r}>'.format(self.name)
#
# class NetworkMeta(postgres_base):
# __tablename__ = 'sensor__network_metadata'
#
# name = Column(String, primary_key=True)
# nodes = relationship('NodeMeta')
# info = Column(JSONB)
#
# @staticmethod
# def index():
# networks = postgres_session.query(NetworkMeta)
# return [network.name.lower() for network in networks]
#
# def __repr__(self):
# return '<Network {!r}>'.format(self.name)
#
# def tree(self):
# sensor_tree_fn = sqla_fn.network_tree(self.name)
# sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
# return sensor_tree_result_proxy.scalar()
#
# def sensors(self) -> set:
#
# keys = []
# for sensor in self.tree().values():
# keys += sensor
#
# return set(keys)
#
# def features(self):
#
# keys = []
# for sensor in self.tree().values():
# for feature in sensor.values():
# keys += feature.values()
#
# return set([k.split('.')[0] for k in keys])
. Output only the next line. | def assert_json_enclosed_in_brackets(json_list): |
Based on the snippet: <|code_start|>
def validate_sensor_properties(observed_properties):
if not observed_properties:
raise ValidationError('No observed properties were provided!')
features = defaultdict(list)
for feature in postgres_session.query(FeatureMeta).all():
for property_dict in feature.observed_properties:
features[feature.name].append(property_dict['name'])
for feature_property in list(observed_properties.values()):
try:
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import defaultdict
from wtforms import ValidationError
from plenario.database import postgres_session
from plenario.models.SensorNetwork import FeatureMeta, NetworkMeta
and context (classes, functions, sometimes code) from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
#
# Path: plenario/models/SensorNetwork.py
# class FeatureMeta(postgres_base):
# __tablename__ = 'sensor__feature_metadata'
#
# name = Column(String, primary_key=True)
# networks = relationship('NetworkMeta', secondary='sensor__feature_to_network')
# observed_properties = Column(JSONB)
#
# def types(self):
# """Return a dictionary with the properties mapped to their types.
# """
# return {e['name']: e['type'] for e in self.observed_properties}
#
# def sensors(self) -> set:
# """Return the set of sensors that report on this feature.
# """
# results = set()
# for network in self.networks:
# for node in network.tree().values():
# for sensor, properties in node.items():
# if self.name in {p.split('.')[0] for p in properties}:
# results.add(sensor)
#
# return results
#
# @staticmethod
# def index(network_name=None):
# features = []
# for node in postgres_session.query(NodeMeta).all():
# if network_name is None or node.sensor_network.lower() == network_name.lower():
# for sensor in node.sensors:
# for prop in sensor.observed_properties.values():
# features.append(prop.split('.')[0].lower())
# return list(set(features))
#
# @staticmethod
# def properties_of(feature):
# query = postgres_session.query(FeatureMeta.observed_properties).filter(
# FeatureMeta.name == feature)
# return [feature + '.' + prop['name'] for prop in query.first().observed_properties]
#
# def mirror(self):
# """Create feature tables in redshift for all the networks associated
# with this feature.
# """
# for network in self.networks:
# self._mirror(network.name)
#
# def _mirror(self, network_name: str):
# """Create a feature table in redshift for the specified network.
# """
# columns = []
# for feature in self.observed_properties:
# column_name = feature['name']
# column_type = database_types[feature['type'].upper()]
# columns.append(Column(column_name, column_type, default=None))
#
# redshift_table = Table(
# '{}__{}'.format(network_name, self.name),
# redshift_base.metadata,
# Column('node_id', String, primary_key=True),
# Column('datetime', DateTime, primary_key=True),
# Column('meta_id', Float, nullable=False),
# Column('sensor', String, nullable=False),
# *columns,
# redshift_distkey='datetime',
# redshift_sortkey='datetime'
# )
#
# redshift_table.create()
#
# def __repr__(self):
# return '<Feature {!r}>'.format(self.name)
#
# class NetworkMeta(postgres_base):
# __tablename__ = 'sensor__network_metadata'
#
# name = Column(String, primary_key=True)
# nodes = relationship('NodeMeta')
# info = Column(JSONB)
#
# @staticmethod
# def index():
# networks = postgres_session.query(NetworkMeta)
# return [network.name.lower() for network in networks]
#
# def __repr__(self):
# return '<Network {!r}>'.format(self.name)
#
# def tree(self):
# sensor_tree_fn = sqla_fn.network_tree(self.name)
# sensor_tree_result_proxy = self.query.session.execute(sensor_tree_fn)
# return sensor_tree_result_proxy.scalar()
#
# def sensors(self) -> set:
#
# keys = []
# for sensor in self.tree().values():
# keys += sensor
#
# return set(keys)
#
# def features(self):
#
# keys = []
# for sensor in self.tree().values():
# for feature in sensor.values():
# keys += feature.values()
#
# return set([k.split('.')[0] for k in keys])
. Output only the next line. | feat, prop = feature_property.split('.') |
Given snippet: <|code_start|> return wind_speed_int, wind_direction_int, wind_direction_cardinal, wind_gust_int
def getPressure(obs):
pressure_in = None
if (obs.press):
pressure_in = obs.press.value(units="IN")
return pressure_in
def getPressureSeaLevel(obs):
pressure_in = None
if (obs.press_sea_level):
pressure_in = obs.press_sea_level.value(units="IN")
return pressure_in
def getPrecip(obs):
precip_1hr = None
precip_3hr = None
precip_6hr = None
precip_24hr = None
if obs.precip_1hr:
precip_1hr = obs.precip_1hr.value()
if obs.precip_3hr:
precip_3hr = obs.precip_3hr.value()
if obs.precip_6hr:
precip_6hr = obs.precip_6hr.value()
if obs.precip_24hr:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import csv
import os
import requests
from lxml import etree, objectify
from lxml.etree import fromstring
from metar.metar import Metar
from plenario.database import postgres_engine as engine
from .weather import degToCardinal
and context:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
which might include code, classes, or functions. Output only the next line. | precip_24hr = obs.precip_24hr.value() |
Predict the next line after this snippet: <|code_start|>
logger = logging.getLogger(__name__)
def create_foi_table(foi_name, properties):
"""Create a new foi table
:param foi_name: name of feature
:param properties: list of {'name': name, 'type': type} dictionaries
"""
template = """
CREATE TABLE {table} (
"node_id" VARCHAR NOT NULL,
"datetime" TIMESTAMP WITHOUT TIME ZONE NOT NULL,
"meta_id" DOUBLE PRECISION NOT NULL,
"sensor" VARCHAR NOT NULL,
<|code_end|>
using the current file's imports:
import logging
from sqlalchemy import text
from sqlalchemy.exc import ProgrammingError
from plenario.database import redshift_engine
and any relevant context from other files:
# Path: plenario/database.py
# def create_database(bind: Engine, database: str) -> None:
# def drop_database(bind: Engine, database: str) -> None:
# def create_extension(bind: Engine, extension: str) -> None:
# def psql(path: str) -> None:
# def postgres_session_context():
# def redshift_session_context():
. Output only the next line. | {props}, |
Continue the code snippet: <|code_start|> assert ub.modpath_to_modname(sub2_main, hide_main=True, hide_init=False) == '_tmproot927.sub1.sub2'
# Non-existent / invalid modules should always be None
for a, b in it.product([True, False], [True, False]):
with pytest.raises(ValueError):
ub.modpath_to_modname(join(sub1, '__main__.py'), hide_main=a, hide_init=b)
assert ub.modpath_to_modname(b0, hide_main=a, hide_init=b) == 'b0'
assert ub.modpath_to_modname(b1, hide_main=a, hide_init=b) == 'b1'
with pytest.raises(ValueError):
ub.modpath_to_modname(bad1, hide_main=a, hide_init=b)
with pytest.raises(ValueError):
ub.modpath_to_modname(bad2, hide_main=a, hide_init=b)
assert '_tmproot927' not in sys.modules
assert '_tmproot927.mod0' not in sys.modules
assert '_tmproot927.sub1' not in sys.modules
assert '_tmproot927.sub1.mod1' not in sys.modules
assert '_tmproot927.sub1.sub2' not in sys.modules
assert '_tmproot927.sub1.mod2.mod2' not in sys.modules
def test_splitmodpath():
with pytest.raises(ValueError):
ub.split_modpath('does/not/exists/module.py')
ub.split_modpath('does/not/exists/module.py', check=False)
if __name__ == '__main__':
r"""
CommandLine:
<|code_end|>
. Use current file imports:
import itertools as it
import ubelt as ub
import sys
import pytest
import ast
import sys
import os
import xdoctest
from os.path import join
from ubelt.util_import import PythonPathContext
from xdoctest import static_analysis as static
and context (classes, functions, or code) from other files:
# Path: ubelt/util_import.py
# class PythonPathContext(object):
# """
# Context for temporarily adding a dir to the PYTHONPATH.
#
# Used in testing, and used as a helper in certain ubelt functions.
#
# Warning:
# Even though this context manager takes precautions, this modifies the
# python path, and things can go wrong when that happens. This is
# generally safe as long as nothing else you do inside of this context
# modifies the path. If the path is modified in this context, we will try
# to detect it and warn.
#
# Args:
# dpath (str): directory to insert into the PYTHONPATH
# index (int): position to add to. Typically either -1 or 0.
#
# Example:
# >>> with PythonPathContext('foo', -1):
# >>> assert sys.path[-1] == 'foo'
# >>> assert sys.path[-1] != 'foo'
# >>> with PythonPathContext('bar', 0):
# >>> assert sys.path[0] == 'bar'
# >>> assert sys.path[0] != 'bar'
#
# Example:
# >>> # xdoctest: +REQUIRES(module:pytest)
# >>> # Mangle the path inside the context
# >>> self = PythonPathContext('foo', 0)
# >>> self.__enter__()
# >>> sys.path.insert(0, 'mangled')
# >>> import pytest
# >>> with pytest.warns(UserWarning):
# >>> self.__exit__(None, None, None)
#
# Example:
# >>> # xdoctest: +REQUIRES(module:pytest)
# >>> self = PythonPathContext('foo', 0)
# >>> self.__enter__()
# >>> sys.path.remove('foo')
# >>> import pytest
# >>> with pytest.raises(RuntimeError):
# >>> self.__exit__(None, None, None)
# """
# def __init__(self, dpath, index=0):
# self.dpath = dpath
# self.index = index
#
# def __enter__(self):
# if self.index < 0:
# self.index = len(sys.path) + self.index + 1
# sys.path.insert(self.index, self.dpath)
#
# def __exit__(self, type, value, trace):
# need_recover = False
# if len(sys.path) <= self.index: # nocover
# msg_parts = [
# 'sys.path changed while in PythonPathContext.',
# 'len(sys.path) = {!r} but index is {!r}'.format(
# len(sys.path), self.index),
# ]
# need_recover = True
#
# if sys.path[self.index] != self.dpath: # nocover
# # The path is not where we put it, the path must have been mangled
# msg_parts = [
# 'sys.path changed while in PythonPathContext',
# 'Expected dpath={!r} at index={!r} in sys.path, but got '
# 'dpath={!r}'.format(
# self.dpath, self.index, sys.path[self.index]
# )
# ]
# need_recover = True
#
# if need_recover:
# # Try and find where the temporary path went
# try:
# real_index = sys.path.index(self.dpath)
# except ValueError:
# msg_parts.append('Expected dpath was not in sys.path')
# raise RuntimeError('\n'.join(msg_parts))
# else:
# # We were able to recover, but warn the user. This method of
# # recovery is a heuristic and does not work in some cases.
# msg_parts.append((
# 'Expected dpath was at index {}. '
# 'This could indicate conflicting module namespaces.'
# ).format(real_index))
# warnings.warn('\n'.join(msg_parts))
# sys.path.pop(real_index)
# else:
# sys.path.pop(self.index)
. Output only the next line. | pytest ubelt/tests/test_import.py |
Using the snippet: <|code_start|>
Example:
>>> from ubelt.util_path import * # NOQA
>>> import ubelt as ub
>>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo')
>>> assert ub.expandpath('foo') == 'foo'
"""
path = expanduser(path)
path = expandvars(path)
return path
def ensuredir(dpath, mode=0o1777, verbose=0, recreate=False):
r"""
Ensures that directory will exist. Creates new dir with sticky bits by
default
Args:
dpath (str | PathLike | Tuple[str | PathLike]): dir to ensure. Can also
be a tuple to send to join
mode (int): octal mode of directory
verbose (int): verbosity
recreate (bool): if True removes the directory and
all of its contents and creates a fresh new directory.
USE CAREFULLY.
Returns:
str: path - the ensured directory
SeeAlso:
<|code_end|>
, determine the next line of code. You have imports:
from os.path import (
dirname, exists, expanduser, expandvars, join, normpath, split, splitext,
)
from ubelt import util_io
from ubelt import util_platform
import os
import sys
import pathlib
import pwd
import pwd
import ubelt as ub
import tempfile
import shutil
and context (class names, function names, or code) available:
# Path: ubelt/util_io.py
# def writeto(fpath, to_write, aslines=False, verbose=None):
# def _ensure_bytes(text):
# def readfrom(fpath, aslines=False, errors='replace', verbose=None):
# def touch(fpath, mode=0o666, dir_fd=None, verbose=0, **kwargs):
# def delete(path, verbose=False):
. Output only the next line. | :func:`ubelt.Path.ensuredir` |
Here is a snippet: <|code_start|> print(' * exc_info = {!r}'.format(exc_info))
print('[test] Create an authorized overwrite link (back to happy)')
ub.symlink(happy_fpath, happy_flink, verbose=verbose, overwrite=True)
def _force_junction(func):
@wraps(func)
def _wrap(*args):
if not ub.WIN32:
pytest.skip()
_win32_links.__win32_can_symlink__ = False
func(*args)
_win32_links.__win32_can_symlink__ = None
return _wrap
# class TestSymlinksForceJunction(object):
fj_test_delete_symlinks = _force_junction(test_delete_symlinks)
fj_test_modify_directory_symlinks = _force_junction(test_modify_directory_symlinks)
fj_test_modify_file_symlinks = _force_junction(test_modify_file_symlinks)
fj_test_broken_link = _force_junction(test_broken_link)
fj_test_overwrite_symlink = _force_junction(test_overwrite_symlink)
if __name__ == '__main__':
r"""
CommandLine:
set PYTHONPATH=%PYTHONPATH%;C:/Users/erote/code/ubelt/ubelt/tests
pytest ubelt/tests/test_links.py
<|code_end|>
. Write the next line using the current file imports:
from os.path import isdir
from os.path import isfile
from os.path import islink
from os.path import join, exists, relpath, dirname
from ubelt import util_links
from ubelt import _win32_links
from functools import wraps
from ubelt import _win32_links
import ubelt as ub
import pytest
import os
import sys
import xdoctest
and context from other files:
# Path: ubelt/util_links.py
# def symlink(real_path, link_path, overwrite=False, verbose=0):
# def _readlink(link):
# def _can_symlink(verbose=0): # nocover
# def _dirstats(dpath=None): # nocover
# E = os.path.exists(full_path)
# L = os.path.islink(full_path)
# F = os.path.isfile(full_path)
# D = os.path.isdir(full_path)
# J = util_platform.WIN32 and _win32_links._win32_is_junction(full_path)
# ELFDJ = [E, L, F, D, J]
, which may include functions, classes, or code. Output only the next line. | pytest ubelt/tests/test_links.py -s |
Continue the code snippet: <|code_start|>r"""
Wrappers around hashlib functions to generate hash signatures for common data.
The hashes are deterministic across python versions and operating systems.
This is verified by CI testing on Windows, Linux, Python with 2.7, 3.4, and
greater, and on 32 and 64 bit versions.
<|code_end|>
. Use current file imports:
import hashlib
import math
import xxhash # type: ignore
import blake3 # type: ignore
import numpy as np
import uuid
import ubelt as ub
import ubelt as ub
import torch
import json
from collections import OrderedDict
from ubelt.util_const import NoParam
and context (classes, functions, or code) from other files:
# Path: ubelt/util_const.py
# class _NoParamType(object):
# def __new__(cls):
# def __reduce__(self):
# def __copy__(self):
# def __deepcopy__(self, memo):
# def __str__(cls):
# def __repr__(cls):
# def __bool__(self):
. Output only the next line. | Use Case #1: You have data that you want to hash. If we assume the data is in |
Based on the snippet: <|code_start|> def __init__(self, dataset_path, output_path='./working', file_types=['avi'], face_locations_path=None):
super(MaskAttack, self).__init__(dataset_path, output_path)
self.file_types = file_types
self.face_locations_path = face_locations_path
@staticmethod
def get_fold(k_fold):
rstate = np.random.RandomState(42)
k = 0
fold = []
while k < k_fold:
fold = rstate.permutation(17)
k += 1
return fold
@staticmethod
def split_for_cross(all_labels, all_idxs, training_rate):
rstate = np.random.RandomState(7)
pos_idxs = np.where(all_labels[all_idxs] == 1)[0]
neg_idxs = np.where(all_labels[all_idxs] == 0)[0]
# -- cross dataset idxs
n_samples_pos = int(len(all_idxs[pos_idxs]) * training_rate)
n_samples_neg = int(len(all_idxs[neg_idxs]) * training_rate)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import itertools
import numpy as np
from glob import glob
from antispoofing.spectralcubes.datasets.dataset import Dataset
and context (classes, functions, sometimes code) from other files:
# Path: antispoofing/spectralcubes/datasets/dataset.py
# class Dataset(object):
#
# """docstring for Dataset"""
#
# __metaclass__ = ABCMeta
#
# def __init__(self, dataset_path, output_path):
#
# self.__dataset_path = ""
# self.__output_path = ""
#
# self.dataset_path = dataset_path
# self.output_path = output_path
# # self.file_types = file_types
#
# @property
# def dataset_path(self):
# return self.__dataset_path
#
# @dataset_path.setter
# def dataset_path(self, path):
# self.__dataset_path = os.path.abspath(path)
#
# @property
# def output_path(self):
# return self.__output_path
#
# @output_path.setter
# def output_path(self, path):
# self.__output_path = os.path.abspath(path)
#
# # @property
# # def file_types(self):
# # return self.__file_types
# #
# # @file_types.setter
# # def file_types(self, filetypes):
# # self.__file_types = filetypes
#
# # @file_types.setter
# # def file_types(self, filetypes):
# # extensions = []
# # for ext in filetypes:
# # if not "*." in ext:
# # ext = "*.%s" % ext
# # extensions += [ext]
# # self.__file_types = extensions
#
# @abstractmethod
# def _build_meta(self, inpath, filetypes):
# """ docstring """
#
# @staticmethod
# def _list_dirs(rootpath, filetype):
# folders = []
#
# for root, dirs, files in os.walk(rootpath):
# for f in files:
# if filetype in os.path.splitext(f)[1]:
# folders += [os.path.relpath(root, rootpath)]
# break
#
# return folders
#
# # def _list_dirs(self, rootpath, filetypes):
# # folders = []
#
# # for root, dirs, files in os.walk(rootpath):
# # for extension in filetypes:
# # for filename in fnmatch.filter(files, extension):
# # folders += [os.path.join(root, filename)]
#
# # # for f in files:
# # # if filetype in os.path.splitext(f)[1]:
# # # folders += [os.path.relpath(root, inpath)]
# # # break
#
# # return folders
#
# # @property
# # def metainfo(self):
# # try:
# # return self.__metainfo
# # except AttributeError:
# # self.__metainfo = self._build_meta(self.dataset_path, self.file_types)
# # return self.__metainfo
#
# # def metainfo_feats(self, output_path, file_types):
# # return self._build_meta(output_path, file_types)
. Output only the next line. | rand_idxs_pos = rstate.permutation(all_idxs[pos_idxs]) |
Continue the code snippet: <|code_start|> if 'H' in name_video:
video_id = 'H' + name_video.split("_")[1]
else:
if int(name_video) % 2 == 0:
idx = np.where(np.arange(2, 9, 2) == int(name_video))[0]
idx += 1
video_id = 'L%d' % idx[0]
else:
idx = np.where(np.arange(1, 9, 2) == int(name_video))[0]
idx += 1
video_id = 'N%d' % idx[0]
if 'train_release/' in os.path.relpath(fname, inpath):
all_fnames += [fname]
all_labels += [int(reduce(operator.or_, positive_class))]
all_idxs += [img_idx]
train_idxs += [img_idx]
img_idx += 1
else:
if video_id in scenario_1:
test_idxs_1 += [img_idx]
if video_id in scenario_2:
test_idxs_2 += [img_idx]
if video_id in scenario_3:
<|code_end|>
. Use current file imports:
import os
import operator
import itertools
import numpy as np
from glob import glob
from antispoofing.spectralcubes.datasets.dataset import Dataset
and context (classes, functions, or code) from other files:
# Path: antispoofing/spectralcubes/datasets/dataset.py
# class Dataset(object):
#
# """docstring for Dataset"""
#
# __metaclass__ = ABCMeta
#
# def __init__(self, dataset_path, output_path):
#
# self.__dataset_path = ""
# self.__output_path = ""
#
# self.dataset_path = dataset_path
# self.output_path = output_path
# # self.file_types = file_types
#
# @property
# def dataset_path(self):
# return self.__dataset_path
#
# @dataset_path.setter
# def dataset_path(self, path):
# self.__dataset_path = os.path.abspath(path)
#
# @property
# def output_path(self):
# return self.__output_path
#
# @output_path.setter
# def output_path(self, path):
# self.__output_path = os.path.abspath(path)
#
# # @property
# # def file_types(self):
# # return self.__file_types
# #
# # @file_types.setter
# # def file_types(self, filetypes):
# # self.__file_types = filetypes
#
# # @file_types.setter
# # def file_types(self, filetypes):
# # extensions = []
# # for ext in filetypes:
# # if not "*." in ext:
# # ext = "*.%s" % ext
# # extensions += [ext]
# # self.__file_types = extensions
#
# @abstractmethod
# def _build_meta(self, inpath, filetypes):
# """ docstring """
#
# @staticmethod
# def _list_dirs(rootpath, filetype):
# folders = []
#
# for root, dirs, files in os.walk(rootpath):
# for f in files:
# if filetype in os.path.splitext(f)[1]:
# folders += [os.path.relpath(root, rootpath)]
# break
#
# return folders
#
# # def _list_dirs(self, rootpath, filetypes):
# # folders = []
#
# # for root, dirs, files in os.walk(rootpath):
# # for extension in filetypes:
# # for filename in fnmatch.filter(files, extension):
# # folders += [os.path.join(root, filename)]
#
# # # for f in files:
# # # if filetype in os.path.splitext(f)[1]:
# # # folders += [os.path.relpath(root, inpath)]
# # # break
#
# # return folders
#
# # @property
# # def metainfo(self):
# # try:
# # return self.__metainfo
# # except AttributeError:
# # self.__metainfo = self._build_meta(self.dataset_path, self.file_types)
# # return self.__metainfo
#
# # def metainfo_feats(self, output_path, file_types):
# # return self._build_meta(output_path, file_types)
. Output only the next line. | test_idxs_3 += [img_idx] |
Predict the next line after this snippet: <|code_start|>
@property
def output_path(self):
return self.__output_path
@output_path.setter
def output_path(self, path):
path = os.path.abspath(path)
self.__output_path = path
@property
def codebook_selection(self):
return self.__codebook_selection
@codebook_selection.setter
def codebook_selection(self, value):
try:
assert value in self.cs_dict
self.__codebook_selection = self.cs_dict[value]
except AssertionError:
raise AssertionError("Value not found: choose 'random' or 'kmeans'")
@property
def coding_poling(self):
return self.__coding_poling
@coding_poling.setter
def coding_poling(self, value):
try:
assert value in self.cp_dict
<|code_end|>
using the current file's imports:
import os
import sys
import cPickle
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster.k_means_ import _init_centroids
from sklearn.metrics.pairwise import chi2_kernel
from sklearn.metrics.pairwise import pairwise_distances
from antispoofing.spectralcubes.utils import N_JOBS
and any relevant context from other files:
# Path: antispoofing/spectralcubes/utils/constants.py
# N_JOBS = (cpu_count()/2) if ((cpu_count()) > 2) else 1
. Output only the next line. | self.__coding_poling = value |
Given the code snippet: <|code_start|> #params = self.params
print 'doing xy slice'
data = self.data
pixels = self.pixel_mask
# zero out any pixels in the sum that have zero in the pixel count:
data[pixels == 0] = 0
normalization_matrix = ones(data.shape)
normalization_matrix[pixels == 0] = 0
x_min = min(x_range)
x_max = max(x_range)
y_min = min(y_range)
y_max = max(y_range)
x_size,y_size = data.shape
global_x_range = (self.x_max - self.x_min)
global_y_range = (self.y_max - self.y_min)
x_pixel_min = round( (x_min - self.x_min) / global_x_range * x_size )
x_pixel_max = round( (x_max - self.x_min) / global_x_range * x_size )
y_pixel_min = round( (y_min - self.y_min) / global_y_range * y_size )
y_pixel_max = round( (y_max - self.y_min) / global_y_range * y_size )
#correct any sign switches:
if (x_pixel_min > x_pixel_max):
new_min = x_pixel_max
x_pixel_max = x_pixel_min
x_pixel_min = new_min
if (y_pixel_min > y_pixel_max):
<|code_end|>
, generate the next line using the imports in this file:
from pylab import imshow,cm,colorbar,hot,show,xlabel,ylabel,connect, plot, figure, draw, axis, gcf,legend
from numpy import ones, sum, arange, transpose, log
from matplotlib.widgets import RectangleSelector
from colormap import change_colormap
from matplotlib.axis import XAxis, YAxis
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
from matplotlib.font_manager import fontManager, FontProperties
from matplotlib.image import FigureImage
from matplotlib.figure import Figure
from matplotlib.pyplot import figure, get_fignums
from zoom_colorbar import zoom_colorbar
from osrefl.loaders.reduction.cmapmenu import CMapMenu
from matplotlib.cm import get_cmap
from matplotlib.ticker import FormatStrFormatter, ScalarFormatter
import matplotlib.colors as colors
import wx
import matplotlib.cbook as cbook
import matplotlib
and context (functions, classes, or occasionally code) from other files:
# Path: osrefl/loaders/reduction/cmapmenu.py
# class CMapMenu(wx.Menu):
# """
# Menu tree binding to a list of colormaps.
# """
# def __init__(self, window,
# mapper=None, canvas=None, callback=None):
# """
# Define a context menu for selecting colormaps.
#
# Need a window to use as the event handler.
# If mapper is defined, it will be updated with the new colormap.
# If canvas is defined, it will update on idle.
# """
# wx.Menu.__init__(self)
#
# # OS X needs 16x16 icons; Windows and Linux can be longer
# bar_length = 32 if not sys.platform in ['darwin'] else 16
# bar_height = 16
# self.mapper,self.canvas,self.callback = mapper,canvas,callback
# self.selected = None
# self.mapid = {}
# for name in grouped_colormaps():
# if name is None:
# self.AppendSeparator()
# else:
# item = wx.MenuItem(self, wx.ID_ANY, name)
# map = cm.get_cmap(name)
# icon = colorbar_bitmap(map,bar_length,thickness=bar_height)
# item.SetBitmap(icon)
# self.AppendItem(item)
# window.Bind(wx.EVT_MENU,
# event_callback(self._OnSelect, name=name),
# id=item.GetId())
#
# def _OnSelect(self, event, name=None):
# """
# When selected, record the name, update the mapper and invoke the
# callback.
# """
# self.selected = name
# if self.mapper:
# self.mapper.set_cmap(cm.get_cmap(name))
# if self.canvas:
# self.canvas.draw_idle()
# if self.callback:
# self.callback(name)
. Output only the next line. | new_min = y_pixel_max |
Here is a snippet: <|code_start|> self.results = None
self.corrected_results = None
self.anglexvals = None
self.anglezvals = None
return
def BAres(self):
self.results = approximations.BAres(self.feature,self.space,
self.lattice, self.probe)
print self.results
return
def BA(self):
r'''
**Overview:**
This Born Approximation calculation is written entirely in Python
and assumes that the scattered beam is so small that the transmitted
beam is essentially t=1. This makes for a simple calculation,
however, it does not allows for the capturing of the dynamic
effects seen in real scattering.
Because of the simplistic nature of this calculation. Some tricks
can be used to speed up the calculation. This version of the BA
calculation utilizes the chirp-z transform (CZT) to solve the Form Factor.
The chirp-z is essentially an FFT which allows for solving the
transform anywhere on the sphere. With this, we can solve for any Q
range without wasting any resources calculating for areas we don't
<|code_end|>
. Write the next line using the current file imports:
from numpy import *
from pylab import figure, show, subplot, imshow
from . import approximations
from osrefl.viewers.plot_slicer import MultiView
from . import resolution
from DWBAGISANS import DWBA_form
from DWBA_Cuda import DWBA_form
from DWBA import DWBA_form
from numpy import sum
import osrefl.viewers.view as view
import osrefl.loaders.scale
import osrefl.model.sample_prep
import numpy as np
import pickle
and context from other files:
# Path: osrefl/viewers/plot_slicer.py
# def MultiView(data, step, n, extent=None, titles = None,
# axisLabel = None, vlimit = None):
#
# app = wx.PySimpleApp()
#
# infoCol = PlotInfo(data,vlimit,titles,extent,step,n,axisLabel)
# controller = PlotCtrler(app,infoCol)
# app.MainLoop()
# app.Destroy()
#
# return
, which may include functions, classes, or code. Output only the next line. | need. |
Predict the next line for this snippet: <|code_start|># The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Off-specular Modeling Software'
copyright = u'2010, Christopher Metting'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
<|code_end|>
with the help of current file imports:
import sys, os
from distutils.util import get_platform
from osrefl import __version__ as release
and context from other files:
# Path: osrefl.py
, which may contain function names, class names, or code. Output only the next line. | exclude_trees = ['_build']
|
Given the following code snippet before the placeholder: <|code_start|> 0.8498 -6.478861916;
0.8741 -6.159517513;
0.8061 -6.835647144;
0.846 -6.53165267;
0.8751 -6.224098421;
0.8856 -5.910094889;
0.8919 -5.598599459;
0.8934 -5.290645224;
0.894 -4.974284616;
0.8957 -4.64454848;
0.9047 -4.290560426;
0.9129 -3.885055584;
0.9209 -3.408378962;
0.9219 -3.13200249;
0.7739 -8.726767166;
0.7681 -8.66695597;
0.7665 -8.511026475;
0.7703 -8.165388579;
0.7702 -7.886056648;
0.7761 -7.588043762;
0.7809 -7.283412422;
0.7961 -6.995678626;
0.8253 -6.691862621;
0.8602 -6.392544977;
0.8809 -6.067374056;
0.8301 -6.684029655;
0.8664 -6.378719832;
0.8834 -6.065855188;
0.8898 -5.752272167;
0.8964 -5.132414673;
<|code_end|>
, predict the next line using imports from the current file:
import sys
import numpy as N
import pylab
from osrefl.loaders.reduction.wsolve import wpolyfit
and context including class names, function names, and sometimes code from other files:
# Path: osrefl/loaders/reduction/wsolve.py
# def wpolyfit(x,y,dy=1,degree=None,origin=False):
# """
# Return the polynomial of degree n that
# minimizes sum( (p(x_i) - y_i)**2/dy_i**2).
#
# if origin is True, the fit should go through the origin.
# """
# assert degree != None, "Missing degree argument to wpolyfit"
#
# A = _poly_matrix(x,degree,origin)
# s = wsolve(A,y,dy)
# return PolynomialModel(s,origin=origin)
. Output only the next line. | 0.8963 -4.811352704; |
Using the snippet: <|code_start|>
ITER = 30
img = loadimg('/home/siqi/ncidata/rivuletpy/tests/data/test-crop.tif')
bimg = (img > 0).astype('int')
dt = skfmm.distance(bimg, dx=1)
sdt = ssm(dt, anisotropic=True, iterations=ITER)
try:
except ImportError:
s_seg = s > filters.threshold_otsu(s)
plt.figure()
plt.title('DT')
plt.imshow(dt.max(-1))
plt.figure()
plt.title('img > 0')
plt.imshow((img > 0).max(-1))
plt.figure()
<|code_end|>
, determine the next line of code. You have imports:
from filtering.morphology import ssm
from rivuletpy.utils.io import *
from skimage import filters
from skimage import filter as filters
import matplotlib.pyplot as plt
import skfmm
and context (class names, function names, or code) available:
# Path: filtering/morphology.py
# def ssm(img, anisotropic=False, iterations=30):
# '''
# Skeleton strength map
# img: the input image
# anisotropic: True if using anisotropic diffusion
# iterations: number of iterations to optimise the GVF
# '''
# # f = gaussian_gradient_magnitude(img, 1)
# # f = 1 - gimg # Inverted version of the smoothed
# # gradient of the distance transform
#
# gvfmap = gvf(img, mu=0.001, iterations=iterations, anisotropic=anisotropic)
#
# shifts = [-1, 1, -1, -1, -1, 1]
# axis = [1, 1, 2, 2, 3, 3]
# shiftmat = np.zeros(gvfmap.shape)
# f = np.zeros(img.shape) # reuse f for saving the SSM
#
# for i, (s, a) in enumerate(zip(shifts, axis)):
# # Only the orthogonal neighbours
# shiftmat.fill(0)
# shiftmat[a - 1, :, :, :] = s
#
# # Dot product gvf and neighbour displacement fields /
# # distance between neighbour
# f += np.sum(np.roll(
# gvfmap, s, axis=a) * shiftmat, axis=0) / np.linalg.norm(
# shiftmat, axis=0)
#
# f[np.isnan(f)] = 0
# f[f < 0] = 0
# return f
. Output only the next line. | plt.title('SSM-DT') |
Here is a snippet: <|code_start|>
class SWC(object):
def __init__(self, soma=None):
self._data = np.zeros((1, 8))
if soma:
self._data[0, :] = np.asarray([0, 1, soma.centroid[0], soma.centroid[
1], soma.centroid[2], soma.radius, -1, 1])
def add(self, swc_nodes):
<|code_end|>
. Write the next line using the current file imports:
import math
import numpy as np
from .utils.io import saveswc
from collections import Counter
from random import gauss
from random import random
from random import randrange
from scipy.spatial.distance import cdist
from rivuletpy.utils.rendering3 import Viewer3, Line3
and context from other files:
# Path: rivuletpy/utils/io.py
# def saveswc(filepath, swc):
# if swc.shape[1] > 7:
# swc = swc[:, :7]
#
# with open(filepath, 'w') as f:
# for i in range(swc.shape[0]):
# print('%d %d %.3f %.3f %.3f %.3f %d' %
# tuple(swc[i, :].tolist()), file=f)
, which may include functions, classes, or code. Output only the next line. | np.vstack((self._data, swc_nodes)) |
Based on the snippet: <|code_start|>
sys.path.append( ".." )
print cvideo.init()
img = np.zeros([720,1280,3], dtype=np.uint8)
missingIFrame = True
filename = sys.argv[1]
pave = PaVE()
pave.append( open( filename, "rb" ).read() )
header,payload = pave.extract()
while len(header) > 0:
w,h = frameEncodedWidth(header), frameEncodedHeight(header)
if img.shape[0] != h or img.shape[1] != w:
print img.shape, (w,h)
img = np.zeros([h,w,3], dtype=np.uint8)
missingIFrame = missingIFrame and not isIFrame(header)
if not missingIFrame:
assert cvideo.frame( img, isIFrame(header) and 1 or 0, payload )
<|code_end|>
, predict the immediate next line with the help of imports:
import cvideo
import cv2
import numpy as np
import sys
from pave import PaVE, isIFrame, frameEncodedWidth, frameEncodedHeight
and context (classes, functions, sometimes code) from other files:
# Path: pave.py
# class PaVE:
# def __init__( self ):
# self.buf = ""
#
# def append( self, packet ):
# self.buf += packet
#
#
# def extract( self ):
# "return single packet (header, payload)"
# if not self.buf.startswith("PaVE"):
# if "PaVE" in self.buf:
# self.buf = self.buf[ self.buf.index("PaVE") : ]
#
# if len(self.buf) < 4+1+1+2+4:
# # at least struct of version and header and payload sizes must be ready
# return "",""
#
# if not self.buf.startswith( "PaVE" ):
# return "",""
#
# version, codec, headerSize, payloadSize = struct.unpack_from("BBHI", self.buf, 4 )
# if len(self.buf) < headerSize + payloadSize:
# return "",""
#
# ret = self.buf[:headerSize], self.buf[headerSize:headerSize+payloadSize]
# self.buf = self.buf[headerSize + payloadSize : ]
# return ret
#
# def isIFrame( header ):
# "return True if I-Frame"
# return struct.unpack_from("B", header, 30)[0] == 1
#
# def frameEncodedWidth( header ):
# return struct.unpack_from("H", header, 12)[0]
#
# def frameEncodedHeight( header ):
# return struct.unpack_from("H", header, 14)[0]
. Output only the next line. | cv2.imshow('image', img) |
Predict the next line after this snippet: <|code_start|>
sys.path.append( ".." )
print cvideo.init()
img = np.zeros([720,1280,3], dtype=np.uint8)
missingIFrame = True
filename = sys.argv[1]
pave = PaVE()
<|code_end|>
using the current file's imports:
import cvideo
import cv2
import numpy as np
import sys
from pave import PaVE, isIFrame, frameEncodedWidth, frameEncodedHeight
and any relevant context from other files:
# Path: pave.py
# class PaVE:
# def __init__( self ):
# self.buf = ""
#
# def append( self, packet ):
# self.buf += packet
#
#
# def extract( self ):
# "return single packet (header, payload)"
# if not self.buf.startswith("PaVE"):
# if "PaVE" in self.buf:
# self.buf = self.buf[ self.buf.index("PaVE") : ]
#
# if len(self.buf) < 4+1+1+2+4:
# # at least struct of version and header and payload sizes must be ready
# return "",""
#
# if not self.buf.startswith( "PaVE" ):
# return "",""
#
# version, codec, headerSize, payloadSize = struct.unpack_from("BBHI", self.buf, 4 )
# if len(self.buf) < headerSize + payloadSize:
# return "",""
#
# ret = self.buf[:headerSize], self.buf[headerSize:headerSize+payloadSize]
# self.buf = self.buf[headerSize + payloadSize : ]
# return ret
#
# def isIFrame( header ):
# "return True if I-Frame"
# return struct.unpack_from("B", header, 30)[0] == 1
#
# def frameEncodedWidth( header ):
# return struct.unpack_from("H", header, 12)[0]
#
# def frameEncodedHeight( header ):
# return struct.unpack_from("H", header, 14)[0]
. Output only the next line. | pave.append( open( filename, "rb" ).read() ) |
Next line prediction: <|code_start|>
class CoreTestCase(TestCase):
fixtures = ['test.json']
def setUp(self):
self.factory = RequestFactory()
<|code_end|>
. Use current file imports:
(from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test import TestCase
from core.mixins import ChartsUtilityMixin
from core.views import AngularView)
and context including class names, function names, or small code snippets from other files:
# Path: core/mixins.py
# class ChartsUtilityMixin(object):
# num_days = None
#
# @property
# def pageviews(self):
# return PageView.objects.filter(session__website=self.kwargs['website_id'])
#
# @property
# def pageviews_today(self):
# return self.pageviews.filter(view_timestamp__gt=self.today_midnight())
#
# @property
# def pageviews_yesterday(self):
# return self.pageviews.filter(view_timestamp__lte=self.today_midnight(),
# view_timestamp__gte=self.today_midnight() - timezone.timedelta(days=1))
#
# @property
# def sessions(self):
# return Session.objects.filter(website=self.kwargs['website_id'])
#
# @property
# def sessions_today(self):
# return self.sessions.filter(timestamp__gt=self.today_midnight())
#
# @property
# def sessions_yesterday(self):
# return self.sessions.filter(timestamp__lt=self.today_midnight(),
# timestamp__gt=self.today_midnight() - timezone.timedelta(days=1))
#
# def past_timestamp(self, **kwargs):
# return timezone.now().astimezone(self.get_website().timezone) - timezone.timedelta(**kwargs)
#
# def today_midnight(self):
# """
# Returns datetime, today at 00:00 am
# One might use now.date(), but that removes timezone data
# """
# return timezone.now().astimezone(self.get_website().timezone).replace(hour=0, minute=0, second=0)
#
# def seconds_until_midnight(self):
# now = timezone.now().astimezone(self.get_website().timezone)
# midnight = now.replace(hour=23, minute=59, second=59)
# diff = (midnight-now).total_seconds()
# return int(diff) + 60
#
# def yesterday_midnight(self):
# return self.today_midnight() - timezone.timedelta(days=1)
#
# def group_by_date(self, queryset, timestamp_field, aggregation_function):
# """
# Take given queryset, filter by website_id, group by day on timestamp_field
# with aggregation_function for last num_days days
# """
# end_date = self.today_midnight()
# truncate_date = get_date_truncate('day', timestamp_field, self.get_website().timezone)
#
# queryset = queryset.extra({'date': truncate_date})
# queryset = queryset.filter(**{timestamp_field + '__lt': end_date,
# timestamp_field + '__gt': end_date - timezone.timedelta(days=self.num_days)})
# # Need something else than a queryset for serialising
# data = list(queryset.values('date').annotate(aggregated_data=aggregation_function).order_by('date'))
# return self.add_zeros(data, self.num_days)
#
# def add_zeros(self, in_data, num_days):
# """
# Add count=0 on dates that aren't present in in_data list from self.group_by_date()
# """
# data = SortedDict()
# end_date = self.today_midnight().date()
# for date in daterange(end_date - timezone.timedelta(days=num_days), end_date):
# for pair in in_data:
#
# # Dates stored as datetime when using postgresql
# if isinstance(pair['date'], datetime.datetime):
# if pair['date'].date() == date:
# data[str(pair['date'].date())] = pair['aggregated_data']
# break
# # Dates stored as string when using sqlite
# elif isinstance(pair['date'], unicode) or isinstance(pair['date'], str):
# if pair['date'] == str(date):
# data[pair['date']] = pair['aggregated_data']
# break
# else:
# raise StandardError('Can\'t figure out the date format in database. '
# 'Not unicode, str or datetime')
# else:
# data[str(date)] = 0
# return data
#
# Path: core/views.py
# class AngularView(AngularAppMixin, TemplateView):
# template_name = 'core/app.html'
#
# def get_context_data(self, **kwargs):
# context = super(AngularView, self).get_context_data(**kwargs)
# website = self.get_website()
# context.update(static_url=settings.STATIC_URL,
# website=website,
# website_json=self.website_to_json(website),
# series=mark_safe(json.dumps(MonthlyChartView.SERIES)))
# return context
. Output only the next line. | def test_loginrequired_mixin(self): |
Predict the next line for this snippet: <|code_start|>
class CoreTestCase(TestCase):
fixtures = ['test.json']
def setUp(self):
self.factory = RequestFactory()
def test_loginrequired_mixin(self):
request = self.factory.get(reverse('app', args=[2, ]))
request.user = AnonymousUser()
response = AngularView.as_view()(request)
self.assertEqual(response.url, reverse('accounts:login') + '?next=' + reverse('app', args=[2, ]))
def test_charts_mixin(self):
mixin = ChartsUtilityMixin()
<|code_end|>
with the help of current file imports:
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test import TestCase
from core.mixins import ChartsUtilityMixin
from core.views import AngularView
and context from other files:
# Path: core/mixins.py
# class ChartsUtilityMixin(object):
# num_days = None
#
# @property
# def pageviews(self):
# return PageView.objects.filter(session__website=self.kwargs['website_id'])
#
# @property
# def pageviews_today(self):
# return self.pageviews.filter(view_timestamp__gt=self.today_midnight())
#
# @property
# def pageviews_yesterday(self):
# return self.pageviews.filter(view_timestamp__lte=self.today_midnight(),
# view_timestamp__gte=self.today_midnight() - timezone.timedelta(days=1))
#
# @property
# def sessions(self):
# return Session.objects.filter(website=self.kwargs['website_id'])
#
# @property
# def sessions_today(self):
# return self.sessions.filter(timestamp__gt=self.today_midnight())
#
# @property
# def sessions_yesterday(self):
# return self.sessions.filter(timestamp__lt=self.today_midnight(),
# timestamp__gt=self.today_midnight() - timezone.timedelta(days=1))
#
# def past_timestamp(self, **kwargs):
# return timezone.now().astimezone(self.get_website().timezone) - timezone.timedelta(**kwargs)
#
# def today_midnight(self):
# """
# Returns datetime, today at 00:00 am
# One might use now.date(), but that removes timezone data
# """
# return timezone.now().astimezone(self.get_website().timezone).replace(hour=0, minute=0, second=0)
#
# def seconds_until_midnight(self):
# now = timezone.now().astimezone(self.get_website().timezone)
# midnight = now.replace(hour=23, minute=59, second=59)
# diff = (midnight-now).total_seconds()
# return int(diff) + 60
#
# def yesterday_midnight(self):
# return self.today_midnight() - timezone.timedelta(days=1)
#
# def group_by_date(self, queryset, timestamp_field, aggregation_function):
# """
# Take given queryset, filter by website_id, group by day on timestamp_field
# with aggregation_function for last num_days days
# """
# end_date = self.today_midnight()
# truncate_date = get_date_truncate('day', timestamp_field, self.get_website().timezone)
#
# queryset = queryset.extra({'date': truncate_date})
# queryset = queryset.filter(**{timestamp_field + '__lt': end_date,
# timestamp_field + '__gt': end_date - timezone.timedelta(days=self.num_days)})
# # Need something else than a queryset for serialising
# data = list(queryset.values('date').annotate(aggregated_data=aggregation_function).order_by('date'))
# return self.add_zeros(data, self.num_days)
#
# def add_zeros(self, in_data, num_days):
# """
# Add count=0 on dates that aren't present in in_data list from self.group_by_date()
# """
# data = SortedDict()
# end_date = self.today_midnight().date()
# for date in daterange(end_date - timezone.timedelta(days=num_days), end_date):
# for pair in in_data:
#
# # Dates stored as datetime when using postgresql
# if isinstance(pair['date'], datetime.datetime):
# if pair['date'].date() == date:
# data[str(pair['date'].date())] = pair['aggregated_data']
# break
# # Dates stored as string when using sqlite
# elif isinstance(pair['date'], unicode) or isinstance(pair['date'], str):
# if pair['date'] == str(date):
# data[pair['date']] = pair['aggregated_data']
# break
# else:
# raise StandardError('Can\'t figure out the date format in database. '
# 'Not unicode, str or datetime')
# else:
# data[str(date)] = 0
# return data
#
# Path: core/views.py
# class AngularView(AngularAppMixin, TemplateView):
# template_name = 'core/app.html'
#
# def get_context_data(self, **kwargs):
# context = super(AngularView, self).get_context_data(**kwargs)
# website = self.get_website()
# context.update(static_url=settings.STATIC_URL,
# website=website,
# website_json=self.website_to_json(website),
# series=mark_safe(json.dumps(MonthlyChartView.SERIES)))
# return context
, which may contain function names, class names, or code. Output only the next line. | mixin.kwargs = {'website_id': 1} |
Based on the snippet: <|code_start|>
urlpatterns = patterns('',
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^account/$', AccountSettingsView.as_view(), name='settings'),
url(r'^created/$', WebsiteCreatedView.as_view(), name='website_created'),
url(r'^register/$', RegisterView.as_view(), name='register'),
url(r'^websites/$', WebsiteListView.as_view(), name='website_list'),
url(r'^addwebsite/$', AddWebsiteView.as_view(), name='add_website'),
url(r'^editwebsite/(?P<website_id>\d)/$', EditWebsiteView.as_view(), name='edit_website'),
url(r'^signup/$', RegisterView.as_view(), name='signup'),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.conf.urls import patterns, url
from accounts.views import LoginView, RegisterView, WebsiteListView, LogoutView, AddWebsiteView, WebsiteCreatedView, \
AccountSettingsView, EditWebsiteView
and context (classes, functions, sometimes code) from other files:
# Path: accounts/views.py
# class LoginView(FormView):
# template_name = 'accounts/login.html'
# form_class = AuthenticationForm
#
# def get_success_url(self):
# if 'next' in self.request.GET:
# return self.request.GET['next']
# return reverse_lazy('accounts:website_list')
#
# def form_valid(self, form):
# login(self.request, form.get_user())
# return super(LoginView, self).form_valid(form)
#
# class RegisterView(FormView):
# template_name = 'accounts/register.html'
# form_class = EmailUserCreationForm
# success_url = reverse_lazy('accounts:login')
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Registration successful, you can now sign in.')
# return super(RegisterView, self).form_valid(form)
#
# class WebsiteListView(LoginRequiredMixin, ListView):
# template_name = 'accounts/websites.html'
# context_object_name = 'websites'
#
# def get_queryset(self):
# return self.request.user.website_set.order_by('pk')
#
# class LogoutView(RedirectView):
# url = reverse_lazy('home')
#
# def dispatch(self, *args, **kwargs):
# logout(self.request)
# return super(LogoutView, self).dispatch(*args, **kwargs)
#
# class AddWebsiteView(LoginRequiredMixin, FormView):
# template_name = 'accounts/add_website.html'
# form_class = modelform_factory(Website, exclude=('user', ), widgets={'timezone': django_select2.Select2Widget()})
# success_url = reverse_lazy('accounts:website_list')
#
# def redirect_to_website_created(self, pk):
# return HttpResponseRedirect(reverse_lazy('accounts:website_created') + '?id={}'.format(pk))
#
# def form_valid(self, form):
# website = form.save(commit=False)
# website.user = self.request.user
# website.save()
# return self.redirect_to_website_created(website.pk)
#
# class WebsiteCreatedView(TemplateView):
# template_name = 'accounts/website_created.html'
#
# def get_context_data(self, **kwargs):
# context = super(WebsiteCreatedView, self).get_context_data(**kwargs)
# context['jspath'] = self.request.build_absolute_uri(static('js/insightful.min.js'))
# context['report'] = self.request.build_absolute_uri(reverse('report'))
# if 'id' in self.request.GET:
# context['id'] = self.request.GET['id']
# else:
# context['id'] = '"YOUR WEBSITE ID"'
#
# return context
#
# class AccountSettingsView(FormView):
# form_class = PasswordChangeForm
# template_name = 'accounts/password_change.html'
# success_url = reverse_lazy('accounts:website_list')
#
# def get_form(self, form_class):
# return form_class(self.request.user, **self.get_form_kwargs())
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Password successfully changed.')
# return super(AccountSettingsView, self).form_valid(form)
#
# class EditWebsiteView(AngularAppMixin, FormView):
# template_name = 'accounts/edit_website.html'
# form_class = modelform_factory(Website, exclude=('user', ), widgets={'timezone': django_select2.Select2Widget()})
# success_url = reverse_lazy('accounts:website_list')
#
# def get_form_kwargs(self):
# kwargs = super(EditWebsiteView, self).get_form_kwargs()
# kwargs['instance'] = self.get_website()
# return kwargs
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Website successfully edited.')
# return super(EditWebsiteView, self).form_valid(form)
. Output only the next line. | ) |
Predict the next line after this snippet: <|code_start|>
urlpatterns = patterns('',
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^account/$', AccountSettingsView.as_view(), name='settings'),
url(r'^created/$', WebsiteCreatedView.as_view(), name='website_created'),
url(r'^register/$', RegisterView.as_view(), name='register'),
url(r'^websites/$', WebsiteListView.as_view(), name='website_list'),
url(r'^addwebsite/$', AddWebsiteView.as_view(), name='add_website'),
url(r'^editwebsite/(?P<website_id>\d)/$', EditWebsiteView.as_view(), name='edit_website'),
url(r'^signup/$', RegisterView.as_view(), name='signup'),
<|code_end|>
using the current file's imports:
from django.conf.urls import patterns, url
from accounts.views import LoginView, RegisterView, WebsiteListView, LogoutView, AddWebsiteView, WebsiteCreatedView, \
AccountSettingsView, EditWebsiteView
and any relevant context from other files:
# Path: accounts/views.py
# class LoginView(FormView):
# template_name = 'accounts/login.html'
# form_class = AuthenticationForm
#
# def get_success_url(self):
# if 'next' in self.request.GET:
# return self.request.GET['next']
# return reverse_lazy('accounts:website_list')
#
# def form_valid(self, form):
# login(self.request, form.get_user())
# return super(LoginView, self).form_valid(form)
#
# class RegisterView(FormView):
# template_name = 'accounts/register.html'
# form_class = EmailUserCreationForm
# success_url = reverse_lazy('accounts:login')
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Registration successful, you can now sign in.')
# return super(RegisterView, self).form_valid(form)
#
# class WebsiteListView(LoginRequiredMixin, ListView):
# template_name = 'accounts/websites.html'
# context_object_name = 'websites'
#
# def get_queryset(self):
# return self.request.user.website_set.order_by('pk')
#
# class LogoutView(RedirectView):
# url = reverse_lazy('home')
#
# def dispatch(self, *args, **kwargs):
# logout(self.request)
# return super(LogoutView, self).dispatch(*args, **kwargs)
#
# class AddWebsiteView(LoginRequiredMixin, FormView):
# template_name = 'accounts/add_website.html'
# form_class = modelform_factory(Website, exclude=('user', ), widgets={'timezone': django_select2.Select2Widget()})
# success_url = reverse_lazy('accounts:website_list')
#
# def redirect_to_website_created(self, pk):
# return HttpResponseRedirect(reverse_lazy('accounts:website_created') + '?id={}'.format(pk))
#
# def form_valid(self, form):
# website = form.save(commit=False)
# website.user = self.request.user
# website.save()
# return self.redirect_to_website_created(website.pk)
#
# class WebsiteCreatedView(TemplateView):
# template_name = 'accounts/website_created.html'
#
# def get_context_data(self, **kwargs):
# context = super(WebsiteCreatedView, self).get_context_data(**kwargs)
# context['jspath'] = self.request.build_absolute_uri(static('js/insightful.min.js'))
# context['report'] = self.request.build_absolute_uri(reverse('report'))
# if 'id' in self.request.GET:
# context['id'] = self.request.GET['id']
# else:
# context['id'] = '"YOUR WEBSITE ID"'
#
# return context
#
# class AccountSettingsView(FormView):
# form_class = PasswordChangeForm
# template_name = 'accounts/password_change.html'
# success_url = reverse_lazy('accounts:website_list')
#
# def get_form(self, form_class):
# return form_class(self.request.user, **self.get_form_kwargs())
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Password successfully changed.')
# return super(AccountSettingsView, self).form_valid(form)
#
# class EditWebsiteView(AngularAppMixin, FormView):
# template_name = 'accounts/edit_website.html'
# form_class = modelform_factory(Website, exclude=('user', ), widgets={'timezone': django_select2.Select2Widget()})
# success_url = reverse_lazy('accounts:website_list')
#
# def get_form_kwargs(self):
# kwargs = super(EditWebsiteView, self).get_form_kwargs()
# kwargs['instance'] = self.get_website()
# return kwargs
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Website successfully edited.')
# return super(EditWebsiteView, self).form_valid(form)
. Output only the next line. | ) |
Using the snippet: <|code_start|>
urlpatterns = patterns('',
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^account/$', AccountSettingsView.as_view(), name='settings'),
url(r'^created/$', WebsiteCreatedView.as_view(), name='website_created'),
url(r'^register/$', RegisterView.as_view(), name='register'),
url(r'^websites/$', WebsiteListView.as_view(), name='website_list'),
url(r'^addwebsite/$', AddWebsiteView.as_view(), name='add_website'),
url(r'^editwebsite/(?P<website_id>\d)/$', EditWebsiteView.as_view(), name='edit_website'),
url(r'^signup/$', RegisterView.as_view(), name='signup'),
<|code_end|>
, determine the next line of code. You have imports:
from django.conf.urls import patterns, url
from accounts.views import LoginView, RegisterView, WebsiteListView, LogoutView, AddWebsiteView, WebsiteCreatedView, \
AccountSettingsView, EditWebsiteView
and context (class names, function names, or code) available:
# Path: accounts/views.py
# class LoginView(FormView):
# template_name = 'accounts/login.html'
# form_class = AuthenticationForm
#
# def get_success_url(self):
# if 'next' in self.request.GET:
# return self.request.GET['next']
# return reverse_lazy('accounts:website_list')
#
# def form_valid(self, form):
# login(self.request, form.get_user())
# return super(LoginView, self).form_valid(form)
#
# class RegisterView(FormView):
# template_name = 'accounts/register.html'
# form_class = EmailUserCreationForm
# success_url = reverse_lazy('accounts:login')
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Registration successful, you can now sign in.')
# return super(RegisterView, self).form_valid(form)
#
# class WebsiteListView(LoginRequiredMixin, ListView):
# template_name = 'accounts/websites.html'
# context_object_name = 'websites'
#
# def get_queryset(self):
# return self.request.user.website_set.order_by('pk')
#
# class LogoutView(RedirectView):
# url = reverse_lazy('home')
#
# def dispatch(self, *args, **kwargs):
# logout(self.request)
# return super(LogoutView, self).dispatch(*args, **kwargs)
#
# class AddWebsiteView(LoginRequiredMixin, FormView):
# template_name = 'accounts/add_website.html'
# form_class = modelform_factory(Website, exclude=('user', ), widgets={'timezone': django_select2.Select2Widget()})
# success_url = reverse_lazy('accounts:website_list')
#
# def redirect_to_website_created(self, pk):
# return HttpResponseRedirect(reverse_lazy('accounts:website_created') + '?id={}'.format(pk))
#
# def form_valid(self, form):
# website = form.save(commit=False)
# website.user = self.request.user
# website.save()
# return self.redirect_to_website_created(website.pk)
#
# class WebsiteCreatedView(TemplateView):
# template_name = 'accounts/website_created.html'
#
# def get_context_data(self, **kwargs):
# context = super(WebsiteCreatedView, self).get_context_data(**kwargs)
# context['jspath'] = self.request.build_absolute_uri(static('js/insightful.min.js'))
# context['report'] = self.request.build_absolute_uri(reverse('report'))
# if 'id' in self.request.GET:
# context['id'] = self.request.GET['id']
# else:
# context['id'] = '"YOUR WEBSITE ID"'
#
# return context
#
# class AccountSettingsView(FormView):
# form_class = PasswordChangeForm
# template_name = 'accounts/password_change.html'
# success_url = reverse_lazy('accounts:website_list')
#
# def get_form(self, form_class):
# return form_class(self.request.user, **self.get_form_kwargs())
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Password successfully changed.')
# return super(AccountSettingsView, self).form_valid(form)
#
# class EditWebsiteView(AngularAppMixin, FormView):
# template_name = 'accounts/edit_website.html'
# form_class = modelform_factory(Website, exclude=('user', ), widgets={'timezone': django_select2.Select2Widget()})
# success_url = reverse_lazy('accounts:website_list')
#
# def get_form_kwargs(self):
# kwargs = super(EditWebsiteView, self).get_form_kwargs()
# kwargs['instance'] = self.get_website()
# return kwargs
#
# def form_valid(self, form):
# form.save()
# messages.success(self.request, 'Website successfully edited.')
# return super(EditWebsiteView, self).form_valid(form)
. Output only the next line. | ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.