Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|>"""
Multi-backend ContentsManager.
"""
from __future__ import unicode_literals
@outside_root_to_404
def _resolve_path(path, manager_dict):
"""
Resolve a path based on a dictionary of manager prefixes.
Returns a triple of (prefix, manager, manager_relative_path).
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from six import iteritems
from tornado.web import HTTPError
from .api_utils import (
base_directory_model,
normalize_api_path,
outside_root_to_404,
)
from .utils.ipycompat import ContentsManager, Dict
and context:
# Path: pgcontents/api_utils.py
# def base_directory_model(path):
# m = base_model(path)
# m.update(
# type='directory',
# last_modified=DUMMY_CREATED_DATE,
# created=DUMMY_CREATED_DATE,
# )
# return m
#
# def normalize_api_path(api_path):
# """
# Resolve paths with '..' to normalized paths, raising an error if the final
# result is outside root.
# """
# normalized = posixpath.normpath(api_path.strip('/'))
# if normalized == '.':
# normalized = ''
# elif normalized.startswith('..'):
# raise PathOutsideRoot(normalized)
# return normalized
#
# def outside_root_to_404(fn):
# """
# Decorator for converting PathOutsideRoot errors to 404s.
# """
# @wraps(fn)
# def wrapped(*args, **kwargs):
# try:
# return fn(*args, **kwargs)
# except PathOutsideRoot as e:
# raise HTTPError(404, "Path outside root: [%s]" % e.args[0])
# return wrapped
#
# Path: pgcontents/utils/ipycompat.py
which might include code, classes, or functions. Output only the next line. | path = normalize_api_path(path) |
Continue the code snippet: <|code_start|> def _wrapper(self, old_path, new_path, *args, **kwargs):
old_prefix, old_mgr, old_mgr_path = _resolve_path(
old_path, self.managers
)
new_prefix, new_mgr, new_mgr_path = _resolve_path(
new_path, self.managers,
)
if old_mgr is not new_mgr:
# TODO: Consider supporting this via get+delete+save.
raise HTTPError(
400,
"Can't move files between backends ({old} -> {new})".format(
old=old_path,
new=new_path,
)
)
assert new_prefix == old_prefix
result = getattr(new_mgr, mname)(
old_mgr_path,
new_mgr_path,
*args,
**kwargs
)
if returns_model and new_prefix:
return _apply_prefix(new_prefix, result)
else:
return result
return _wrapper
<|code_end|>
. Use current file imports:
from six import iteritems
from tornado.web import HTTPError
from .api_utils import (
base_directory_model,
normalize_api_path,
outside_root_to_404,
)
from .utils.ipycompat import ContentsManager, Dict
and context (classes, functions, or code) from other files:
# Path: pgcontents/api_utils.py
# def base_directory_model(path):
# m = base_model(path)
# m.update(
# type='directory',
# last_modified=DUMMY_CREATED_DATE,
# created=DUMMY_CREATED_DATE,
# )
# return m
#
# def normalize_api_path(api_path):
# """
# Resolve paths with '..' to normalized paths, raising an error if the final
# result is outside root.
# """
# normalized = posixpath.normpath(api_path.strip('/'))
# if normalized == '.':
# normalized = ''
# elif normalized.startswith('..'):
# raise PathOutsideRoot(normalized)
# return normalized
#
# def outside_root_to_404(fn):
# """
# Decorator for converting PathOutsideRoot errors to 404s.
# """
# @wraps(fn)
# def wrapped(*args, **kwargs):
# try:
# return fn(*args, **kwargs)
# except PathOutsideRoot as e:
# raise HTTPError(404, "Path outside root: [%s]" % e.args[0])
# return wrapped
#
# Path: pgcontents/utils/ipycompat.py
. Output only the next line. | class HybridContentsManager(ContentsManager): |
Given snippet: <|code_start|> )
if old_mgr is not new_mgr:
# TODO: Consider supporting this via get+delete+save.
raise HTTPError(
400,
"Can't move files between backends ({old} -> {new})".format(
old=old_path,
new=new_path,
)
)
assert new_prefix == old_prefix
result = getattr(new_mgr, mname)(
old_mgr_path,
new_mgr_path,
*args,
**kwargs
)
if returns_model and new_prefix:
return _apply_prefix(new_prefix, result)
else:
return result
return _wrapper
class HybridContentsManager(ContentsManager):
"""
ContentsManager subclass that delegates specific subdirectories to other
ContentsManager/Checkpoints pairs.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from six import iteritems
from tornado.web import HTTPError
from .api_utils import (
base_directory_model,
normalize_api_path,
outside_root_to_404,
)
from .utils.ipycompat import ContentsManager, Dict
and context:
# Path: pgcontents/api_utils.py
# def base_directory_model(path):
# m = base_model(path)
# m.update(
# type='directory',
# last_modified=DUMMY_CREATED_DATE,
# created=DUMMY_CREATED_DATE,
# )
# return m
#
# def normalize_api_path(api_path):
# """
# Resolve paths with '..' to normalized paths, raising an error if the final
# result is outside root.
# """
# normalized = posixpath.normpath(api_path.strip('/'))
# if normalized == '.':
# normalized = ''
# elif normalized.startswith('..'):
# raise PathOutsideRoot(normalized)
# return normalized
#
# def outside_root_to_404(fn):
# """
# Decorator for converting PathOutsideRoot errors to 404s.
# """
# @wraps(fn)
# def wrapped(*args, **kwargs):
# try:
# return fn(*args, **kwargs)
# except PathOutsideRoot as e:
# raise HTTPError(404, "Path outside root: [%s]" % e.args[0])
# return wrapped
#
# Path: pgcontents/utils/ipycompat.py
which might include code, classes, or functions. Output only the next line. | manager_classes = Dict( |
Predict the next line for this snippet: <|code_start|>from __future__ import with_statement
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
<|code_end|>
with the help of current file imports:
from alembic import context
from logging.config import fileConfig
from sqlalchemy import engine_from_config, pool
from os.path import dirname, abspath
from pgcontents.schema import metadata
import sys
and context from other files:
# Path: pgcontents/schema.py
, which may contain function names, class names, or code. Output only the next line. | target_metadata = metadata |
Given the code snippet: <|code_start|>def test_notebook(name):
"""
Make a test notebook for the given name.
"""
nb = new_notebook()
nb.cells.append(new_code_cell("'code_' + '{}'".format(name)))
nb.cells.append(new_raw_cell("raw_{}".format(name)))
nb.cells.append(new_markdown_cell('markdown_{}'.format(name)))
return nb
def populate(contents_mgr):
"""
Populate a test directory with a ContentsManager.
"""
dirs_nbs = [
('', 'inroot.ipynb'),
('Directory with spaces in', 'inspace.ipynb'),
('unicodé', 'innonascii.ipynb'),
('foo', 'a.ipynb'),
('foo', 'name with spaces.ipynb'),
('foo', 'unicodé.ipynb'),
('foo/bar', 'baz.ipynb'),
('å b', 'ç d.ipynb'),
]
for dirname, nbname in dirs_nbs:
contents_mgr.save({'type': 'directory'}, path=dirname)
contents_mgr.save(
{'type': 'notebook', 'content': test_notebook(nbname)},
<|code_end|>
, generate the next line using the imports in this file:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and context (functions, classes, or occasionally code) from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | path=api_path_join(dirname, nbname), |
Predict the next line after this snippet: <|code_start|># encoding: utf-8
"""
Utilities for testing.
"""
from __future__ import unicode_literals
TEST_DB_URL = os.environ.get('PGCONTENTS_TEST_DB_URL')
if TEST_DB_URL is None:
TEST_DB_URL = "postgresql://{user}@/pgcontents_testing".format(
user=getuser(),
)
def make_fernet():
<|code_end|>
using the current file's imports:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and any relevant context from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | return FernetEncryption(Fernet(Fernet.generate_key())) |
Given the code snippet: <|code_start|> user=getuser(),
)
def make_fernet():
return FernetEncryption(Fernet(Fernet.generate_key()))
def _norm_unicode(s):
"""Normalize unicode strings"""
return normalize('NFC', py3compat.cast_unicode(s))
@contextmanager
def assertRaisesHTTPError(testcase, status, msg=None):
msg = msg or "Should have raised HTTPError(%i)" % status
try:
yield
except HTTPError as e:
testcase.assertEqual(e.status_code, status)
else:
testcase.fail(msg)
_tables = (
'pgcontents.remote_checkpoints',
'pgcontents.files',
'pgcontents.directories',
'pgcontents.users',
)
<|code_end|>
, generate the next line using the imports in this file:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and context (functions, classes, or occasionally code) from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | unexpected_tables = set(metadata.tables) - set(_tables) |
Here is a snippet: <|code_start|>
@nottest
def drop_testing_db_tables():
"""
Drop all tables from the testing db.
"""
engine = create_engine(TEST_DB_URL)
conn = engine.connect()
trans = conn.begin()
conn.execute('DROP SCHEMA IF EXISTS pgcontents CASCADE')
conn.execute('DROP TABLE IF EXISTS alembic_version CASCADE')
trans.commit()
@nottest
def migrate_testing_db(revision='head'):
"""
Migrate the testing db to the latest alembic revision.
"""
upgrade(TEST_DB_URL, revision)
@nottest
def test_notebook(name):
"""
Make a test notebook for the given name.
"""
nb = new_notebook()
<|code_end|>
. Write the next line using the current file imports:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and context from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
, which may include functions, classes, or code. Output only the next line. | nb.cells.append(new_code_cell("'code_' + '{}'".format(name))) |
Next line prediction: <|code_start|>@nottest
def drop_testing_db_tables():
"""
Drop all tables from the testing db.
"""
engine = create_engine(TEST_DB_URL)
conn = engine.connect()
trans = conn.begin()
conn.execute('DROP SCHEMA IF EXISTS pgcontents CASCADE')
conn.execute('DROP TABLE IF EXISTS alembic_version CASCADE')
trans.commit()
@nottest
def migrate_testing_db(revision='head'):
"""
Migrate the testing db to the latest alembic revision.
"""
upgrade(TEST_DB_URL, revision)
@nottest
def test_notebook(name):
"""
Make a test notebook for the given name.
"""
nb = new_notebook()
nb.cells.append(new_code_cell("'code_' + '{}'".format(name)))
nb.cells.append(new_raw_cell("raw_{}".format(name)))
<|code_end|>
. Use current file imports:
(from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath)
and context including class names, function names, or small code snippets from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | nb.cells.append(new_markdown_cell('markdown_{}'.format(name))) |
Given the following code snippet before the placeholder: <|code_start|> migrate_testing_db()
@nottest
def drop_testing_db_tables():
"""
Drop all tables from the testing db.
"""
engine = create_engine(TEST_DB_URL)
conn = engine.connect()
trans = conn.begin()
conn.execute('DROP SCHEMA IF EXISTS pgcontents CASCADE')
conn.execute('DROP TABLE IF EXISTS alembic_version CASCADE')
trans.commit()
@nottest
def migrate_testing_db(revision='head'):
"""
Migrate the testing db to the latest alembic revision.
"""
upgrade(TEST_DB_URL, revision)
@nottest
def test_notebook(name):
"""
Make a test notebook for the given name.
"""
<|code_end|>
, predict the next line using imports from the current file:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and context including class names, function names, and sometimes code from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | nb = new_notebook() |
Given the code snippet: <|code_start|>
@nottest
def drop_testing_db_tables():
"""
Drop all tables from the testing db.
"""
engine = create_engine(TEST_DB_URL)
conn = engine.connect()
trans = conn.begin()
conn.execute('DROP SCHEMA IF EXISTS pgcontents CASCADE')
conn.execute('DROP TABLE IF EXISTS alembic_version CASCADE')
trans.commit()
@nottest
def migrate_testing_db(revision='head'):
"""
Migrate the testing db to the latest alembic revision.
"""
upgrade(TEST_DB_URL, revision)
@nottest
def test_notebook(name):
"""
Make a test notebook for the given name.
"""
nb = new_notebook()
nb.cells.append(new_code_cell("'code_' + '{}'".format(name)))
<|code_end|>
, generate the next line using the imports in this file:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and context (functions, classes, or occasionally code) from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | nb.cells.append(new_raw_cell("raw_{}".format(name))) |
Given the code snippet: <|code_start|>
@nottest
def remigrate_test_schema():
"""
Drop recreate the test db schema.
"""
drop_testing_db_tables()
migrate_testing_db()
@nottest
def drop_testing_db_tables():
"""
Drop all tables from the testing db.
"""
engine = create_engine(TEST_DB_URL)
conn = engine.connect()
trans = conn.begin()
conn.execute('DROP SCHEMA IF EXISTS pgcontents CASCADE')
conn.execute('DROP TABLE IF EXISTS alembic_version CASCADE')
trans.commit()
@nottest
def migrate_testing_db(revision='head'):
"""
Migrate the testing db to the latest alembic revision.
"""
<|code_end|>
, generate the next line using the imports in this file:
from contextlib import contextmanager
from cryptography.fernet import Fernet
from getpass import getuser
from itertools import starmap
from unicodedata import normalize
from IPython.utils import py3compat
from nose.tools import nottest
from sqlalchemy import create_engine
from tornado.web import HTTPError
from ..api_utils import api_path_join
from ..crypto import FernetEncryption
from ..schema import metadata
from ..utils.ipycompat import (
new_code_cell,
new_markdown_cell,
new_notebook,
new_raw_cell,
)
from ..utils.migrate import upgrade
import os
import posixpath
and context (functions, classes, or occasionally code) from other files:
# Path: pgcontents/api_utils.py
# def api_path_join(*paths):
# """
# Join API-style paths.
# """
# return posixpath.join(*paths).strip('/')
#
# Path: pgcontents/crypto.py
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# Path: pgcontents/schema.py
#
# Path: pgcontents/utils/ipycompat.py
#
# Path: pgcontents/utils/migrate.py
# def upgrade(db_url, revision):
# """
# Upgrade the given database to revision.
# """
# with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:
# subprocess.check_call(
# ['alembic', '-c', alembic_ini, 'upgrade', revision]
# )
. Output only the next line. | upgrade(TEST_DB_URL, revision) |
Next line prediction: <|code_start|> ----------
fernet : cryptography.fernet.Fernet
The Fernet object to use for encryption.
Methods
-------
encrypt : callable[bytes -> bytes]
decrypt : callable[bytes -> bytes]
Notes
-----
``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
``Fernet`` to allow zero-downtime key rotation.
See Also
--------
:func:`pgcontents.utils.sync.reencrypt_user`
"""
__slots__ = ('_fernet',)
def __init__(self, fernet):
self._fernet = fernet
def encrypt(self, s):
return self._fernet.encrypt(s)
def decrypt(self, s):
try:
return self._fernet.decrypt(s)
except Exception as e:
<|code_end|>
. Use current file imports:
(import sys
import base64
from functools import wraps
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from .error import CorruptedFile)
and context including class names, function names, or small code snippets from other files:
# Path: pgcontents/error.py
# class CorruptedFile(Exception):
# pass
. Output only the next line. | raise CorruptedFile(e) |
Predict the next line for this snippet: <|code_start|>"""
Utilities for running migrations.
"""
@contextmanager
def temp_alembic_ini(alembic_dir_location, sqlalchemy_url):
"""
Temporarily write an alembic.ini file for use with alembic migration
scripts.
"""
with TemporaryDirectory() as tempdir:
alembic_ini_filename = join(tempdir, 'temp_alembic.ini')
with open(alembic_ini_filename, 'w') as f:
f.write(
<|code_end|>
with the help of current file imports:
from contextlib import contextmanager
from os.path import join
from IPython.utils.tempdir import TemporaryDirectory
from pgcontents.constants import (
ALEMBIC_INI_TEMPLATE,
ALEMBIC_DIR_LOCATION,
)
import subprocess
and context from other files:
# Path: pgcontents/constants.py
# ALEMBIC_INI_TEMPLATE = f.read()
#
# ALEMBIC_DIR_LOCATION = join(dirname(__file__), 'alembic')
, which may contain function names, class names, or code. Output only the next line. | ALEMBIC_INI_TEMPLATE.format( |
Based on the snippet: <|code_start|>"""
Utilities for running migrations.
"""
@contextmanager
def temp_alembic_ini(alembic_dir_location, sqlalchemy_url):
"""
Temporarily write an alembic.ini file for use with alembic migration
scripts.
"""
with TemporaryDirectory() as tempdir:
alembic_ini_filename = join(tempdir, 'temp_alembic.ini')
with open(alembic_ini_filename, 'w') as f:
f.write(
ALEMBIC_INI_TEMPLATE.format(
alembic_dir_location=alembic_dir_location,
sqlalchemy_url=sqlalchemy_url,
)
)
yield alembic_ini_filename
def upgrade(db_url, revision):
"""
Upgrade the given database to revision.
"""
<|code_end|>
, predict the immediate next line with the help of imports:
from contextlib import contextmanager
from os.path import join
from IPython.utils.tempdir import TemporaryDirectory
from pgcontents.constants import (
ALEMBIC_INI_TEMPLATE,
ALEMBIC_DIR_LOCATION,
)
import subprocess
and context (classes, functions, sometimes code) from other files:
# Path: pgcontents/constants.py
# ALEMBIC_INI_TEMPLATE = f.read()
#
# ALEMBIC_DIR_LOCATION = join(dirname(__file__), 'alembic')
. Output only the next line. | with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini: |
Based on the snippet: <|code_start|>"""
Tests for notebook encryption utilities.
"""
class TestEncryption(TestCase):
def test_fernet_derivation(self):
pws = [u'currentpassword', u'oldpassword', None]
# This must be Unicode, so we use the `u` prefix to support py2.
user_id = u'4e322fa200fffd0001000001'
<|code_end|>
, predict the immediate next line with the help of imports:
from unittest import TestCase
from cryptography.fernet import Fernet
from ..crypto import (
derive_fallback_fernet_keys,
FallbackCrypto,
FernetEncryption,
memoize_single_arg,
NoEncryption,
single_password_crypto_factory,
)
and context (classes, functions, sometimes code) from other files:
# Path: pgcontents/crypto.py
# def derive_fallback_fernet_keys(passwords, user_id):
# """
# Derive a list of per-user Fernet keys from a list of master keys and a
# username.
#
# If a None is encountered in ``passwords``, it is forwarded.
#
# Parameters
# ----------
# passwords : list[unicode]
# List of ascii-encodable keys to derive.
# user_id : unicode or None
# ascii-encodable user_id to use as salt
# """
# # Normally I wouldn't advocate for these kinds of assertions, but we really
# # really really don't want to mess up deriving encryption keys.
# assert isinstance(passwords, (list, tuple)), \
# "Expected list or tuple of keys, got %s." % type(passwords)
#
# def derive_single_allow_none(k):
# if k is None:
# return None
# return derive_single_fernet_key(k, user_id).decode('ascii')
#
# return list(map(derive_single_allow_none, passwords))
#
# class FallbackCrypto(object):
# """
# Notebook encryption that accepts a list of crypto instances and decrypts by
# trying them in order.
#
# Sub-cryptos should raise ``CorruptedFile`` if they're unable to decrypt an
# input.
#
# This is conceptually similar to the technique used by
# ``cryptography.fernet.MultiFernet`` for implementing key rotation.
#
# Parameters
# ----------
# cryptos : list[object]
# A sequence of cryptos to use for decryption. cryptos[0] will always be
# used for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# Since NoEncryption will always succeed, it is only supported as the last
# entry in ``cryptos``. Passing a list with a NoEncryption not in the last
# location will raise a ValueError.
# """
# __slots__ = ('_cryptos',)
#
# def __init__(self, cryptos):
# # Only the last crypto can be a ``NoEncryption``.
# for c in cryptos[:-1]:
# if isinstance(c, NoEncryption):
# raise ValueError(
# "NoEncryption is only supported as the last fallback."
# )
#
# self._cryptos = cryptos
#
# def encrypt(self, s):
# return self._cryptos[0].encrypt(s)
#
# def decrypt(self, s):
# errors = []
# for c in self._cryptos:
# try:
# return c.decrypt(s)
# except CorruptedFile as e:
# errors.append(e)
# raise CorruptedFile(errors)
#
# class FernetEncryption(object):
# """
# Notebook encryption using cryptography.fernet for symmetric-key encryption.
#
# Parameters
# ----------
# fernet : cryptography.fernet.Fernet
# The Fernet object to use for encryption.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
#
# Notes
# -----
# ``cryptography.fernet.MultiFernet`` can be used instead of a vanilla
# ``Fernet`` to allow zero-downtime key rotation.
#
# See Also
# --------
# :func:`pgcontents.utils.sync.reencrypt_user`
# """
# __slots__ = ('_fernet',)
#
# def __init__(self, fernet):
# self._fernet = fernet
#
# def encrypt(self, s):
# return self._fernet.encrypt(s)
#
# def decrypt(self, s):
# try:
# return self._fernet.decrypt(s)
# except Exception as e:
# raise CorruptedFile(e)
#
# def __copy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def __deepcopy__(self, memo):
# # Any value that appears in an IPython/Jupyter Config object needs to
# # be deepcopy-able. Cryptography's Fernet objects aren't deepcopy-able,
# # so we copy our underlying state to a new FernetEncryption object.
# return FernetEncryption(self._fernet)
#
# def memoize_single_arg(f):
# """
# Decorator memoizing a single-argument function
# """
# memo = {}
#
# @wraps(f)
# def memoized_f(arg):
# try:
# return memo[arg]
# except KeyError:
# result = memo[arg] = f(arg)
# return result
# return memoized_f
#
# class NoEncryption(object):
# """
# No-op encryption backend.
#
# encrypt() and decrypt() simply return their inputs.
#
# Methods
# -------
# encrypt : callable[bytes -> bytes]
# decrypt : callable[bytes -> bytes]
# """
# def encrypt(self, b):
# return b
#
# def decrypt(self, b):
# return b
#
# def single_password_crypto_factory(password):
# """
# Create and return a function suitable for passing as a crypto_factory to
# ``pgcontents.utils.sync.reencrypt_all_users``
#
# The factory here returns a ``FernetEncryption`` that uses a key derived
# from ``password`` and salted with the supplied user_id.
# """
# @memoize_single_arg
# def factory(user_id):
# return FernetEncryption(
# Fernet(derive_single_fernet_key(password, user_id))
# )
# return factory
. Output only the next line. | current_crypto = single_password_crypto_factory(pws[0])(user_id) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.small.routes
===============================
A small ``Flask-Via`` example Flask application.
"""
routes = [
<|code_end|>
, determine the next line of code. You have imports:
from flask_via.examples.small import views
from flask.ext.via.routers import default, Include
and context (class names, function names, or code) available:
# Path: flask_via/examples/small/views.py
# def home():
# def contact():
# def about():
. Output only the next line. | default.Functional('/', views.home), |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
tests.test_routers.test_restful
================================
Unit tests for the Flask-Restful resource router.
"""
class TestRestfulRouter(unittest.TestCase):
def setUp(self):
self.app = mock.MagicMock()
def test_add_to_app_raises_not_implemented(self):
<|code_end|>
, generate the next line using the imports in this file:
import mock
import unittest
from flask_via.routers import restful
and context (functions, classes, or occasionally code) from other files:
# Path: flask_via/routers/restful.py
# class Resource(BaseRouter):
# def __init__(self, url, resource, endpoint=None):
# def add_to_app(self, app, **kwargs):
. Output only the next line. | resource = restful.Resource('/', mock.MagicMock()) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.blueprints.baz.routes
========================================
A blueprint ``Flask-Via`` example Flask application.
"""
routes = [
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from flask_via.examples.blueprints.baz import views
from flask.ext.via.routers import default
and context:
# Path: flask_via/examples/blueprints/baz/views.py
# def baz():
which might include code, classes, or functions. Output only the next line. | default.Functional('/', views.baz), |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.small.foo.routes
===================================
A small ``Flask-Via`` example Flask application.
"""
routes = [
<|code_end|>
, generate the next line using the imports in this file:
from flask_via.examples.small.foo import views
from flask.ext.via.routers import default, Include
and context (functions, classes, or occasionally code) from other files:
# Path: flask_via/examples/small/foo/views.py
# def foo():
. Output only the next line. | default.Functional('/foo', views.foo), |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.include.foo.routes
=====================================
A include ``Flask-Via`` example Flask application.
"""
routes = [
<|code_end|>
using the current file's imports:
from flask_via.examples.include.foo.views import BarView, BazView
from flask.ext.via.routers import Include
from flask.ext.via.routers.default import Pluggable
and any relevant context from other files:
# Path: flask_via/examples/include/foo/views.py
# class BarView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# class BazView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
. Output only the next line. | Pluggable('/bar', BarView, 'bar'), |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.include.foo.routes
=====================================
A include ``Flask-Via`` example Flask application.
"""
routes = [
Pluggable('/bar', BarView, 'bar'),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from flask_via.examples.include.foo.views import BarView, BazView
from flask.ext.via.routers import Include
from flask.ext.via.routers.default import Pluggable
and context:
# Path: flask_via/examples/include/foo/views.py
# class BarView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# class BazView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
which might include code, classes, or functions. Output only the next line. | Pluggable('/baz', BazView, 'baz'), |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.include.foo.routes
=====================================
A include ``Flask-Via`` example Flask application.
"""
routes = [
<|code_end|>
, predict the next line using imports from the current file:
from flask_via.examples.include.bar.views import FooView, FazView, flop
from flask.ext.via.routers.default import Functional, Pluggable
and context including class names, function names, and sometimes code from other files:
# Path: flask_via/examples/include/bar/views.py
# class FooView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# class FazView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# def flop():
# return '{0.path} - {0.endpoint}'.format(request)
. Output only the next line. | Pluggable('/foo', FooView, 'foo'), |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.include.foo.routes
=====================================
A include ``Flask-Via`` example Flask application.
"""
routes = [
Pluggable('/foo', FooView, 'foo'),
<|code_end|>
, predict the next line using imports from the current file:
from flask_via.examples.include.bar.views import FooView, FazView, flop
from flask.ext.via.routers.default import Functional, Pluggable
and context including class names, function names, and sometimes code from other files:
# Path: flask_via/examples/include/bar/views.py
# class FooView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# class FazView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# def flop():
# return '{0.path} - {0.endpoint}'.format(request)
. Output only the next line. | Pluggable('/faz', FazView, 'faz'), |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.include.foo.routes
=====================================
A include ``Flask-Via`` example Flask application.
"""
routes = [
Pluggable('/foo', FooView, 'foo'),
Pluggable('/faz', FazView, 'faz'),
<|code_end|>
, predict the immediate next line with the help of imports:
from flask_via.examples.include.bar.views import FooView, FazView, flop
from flask.ext.via.routers.default import Functional, Pluggable
and context (classes, functions, sometimes code) from other files:
# Path: flask_via/examples/include/bar/views.py
# class FooView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# class FazView(MethodView):
#
# def get(self):
# return '{0.path} - {0.endpoint}'.format(request)
#
# def flop():
# return '{0.path} - {0.endpoint}'.format(request)
. Output only the next line. | Functional('/flop', flop), |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
tests.test_routers.test_admin
=============================
Unit tests for the Flask-Admin admin router.
"""
class TestRestfulRouter(unittest.TestCase):
def setUp(self):
self.app = mock.MagicMock()
def test_add_to_app_raises_not_implemented(self):
<|code_end|>
, determine the next line of code. You have imports:
import mock
import unittest
from flask_via.routers import admin
and context (class names, function names, or code) available:
# Path: flask_via/routers/admin.py
# class AdminRoute(BaseRouter):
# def __init__(self, view):
# def add_to_app(self, app, **kwargs):
. Output only the next line. | resource = admin.AdminRoute(mock.MagicMock()) |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
flask_via.examples.blueprints.foo.routes
========================================
A blueprint ``Flask-Via`` example Flask application.
"""
routes = [
<|code_end|>
using the current file's imports:
from flask_via.examples.blueprints.foo import views
from flask.ext.via.routers import default
and any relevant context from other files:
# Path: flask_via/examples/blueprints/foo/views.py
# def baz():
. Output only the next line. | default.Functional('/baz', views.baz), |
Next line prediction: <|code_start|>
service_api = Blueprint('service_api', __name__)
@service_api.route('', methods=['GET'])
@login_required
def get_services():
"""
Gets the list of services.
"""
# TODO: pagination
return jsonify(Service.all())
@service_api.route('/<int:service_id>', methods=['GET'])
@required_access('admin')
def get_service(service_id):
"""
Gets a service.
"""
return jsonify(Service.get(service_id))
@service_api.route('', methods=['POST'])
@required_access('admin')
def create_service():
"""
Create a service. Must be an admin.
"""
<|code_end|>
. Use current file imports:
(from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import ServiceForm
from _15thnight.models import Service, Category
from _15thnight.util import required_access, jsonify, api_error)
and context including class names, function names, or small code snippets from other files:
# Path: _15thnight/forms.py
# class ServiceForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
# category = SelectField('Category', coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ServiceForm, self).__init__(*args, **kwargs)
# self.category.choices = [
# (category.id, category.name) for category in Category.all()
# ]
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Service.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | form = ServiceForm() |
Predict the next line for this snippet: <|code_start|>
service_api = Blueprint('service_api', __name__)
@service_api.route('', methods=['GET'])
@login_required
def get_services():
"""
Gets the list of services.
"""
# TODO: pagination
<|code_end|>
with the help of current file imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import ServiceForm
from _15thnight.models import Service, Category
from _15thnight.util import required_access, jsonify, api_error
and context from other files:
# Path: _15thnight/forms.py
# class ServiceForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
# category = SelectField('Category', coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ServiceForm, self).__init__(*args, **kwargs)
# self.category.choices = [
# (category.id, category.name) for category in Category.all()
# ]
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Service.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
, which may contain function names, class names, or code. Output only the next line. | return jsonify(Service.all()) |
Continue the code snippet: <|code_start|>def get_services():
"""
Gets the list of services.
"""
# TODO: pagination
return jsonify(Service.all())
@service_api.route('/<int:service_id>', methods=['GET'])
@required_access('admin')
def get_service(service_id):
"""
Gets a service.
"""
return jsonify(Service.get(service_id))
@service_api.route('', methods=['POST'])
@required_access('admin')
def create_service():
"""
Create a service. Must be an admin.
"""
form = ServiceForm()
if not form.validate_on_submit():
return api_error(form.errors)
service = Service(
name=form.name.data,
description=form.description.data,
<|code_end|>
. Use current file imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import ServiceForm
from _15thnight.models import Service, Category
from _15thnight.util import required_access, jsonify, api_error
and context (classes, functions, or code) from other files:
# Path: _15thnight/forms.py
# class ServiceForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
# category = SelectField('Category', coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ServiceForm, self).__init__(*args, **kwargs)
# self.category.choices = [
# (category.id, category.name) for category in Category.all()
# ]
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Service.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | category=Category.get(form.category.data) |
Using the snippet: <|code_start|>
service_api = Blueprint('service_api', __name__)
@service_api.route('', methods=['GET'])
@login_required
def get_services():
"""
Gets the list of services.
"""
# TODO: pagination
return jsonify(Service.all())
@service_api.route('/<int:service_id>', methods=['GET'])
<|code_end|>
, determine the next line of code. You have imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import ServiceForm
from _15thnight.models import Service, Category
from _15thnight.util import required_access, jsonify, api_error
and context (class names, function names, or code) available:
# Path: _15thnight/forms.py
# class ServiceForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
# category = SelectField('Category', coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ServiceForm, self).__init__(*args, **kwargs)
# self.category.choices = [
# (category.id, category.name) for category in Category.all()
# ]
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Service.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | @required_access('admin') |
Predict the next line after this snippet: <|code_start|>
service_api = Blueprint('service_api', __name__)
@service_api.route('', methods=['GET'])
@login_required
def get_services():
"""
Gets the list of services.
"""
# TODO: pagination
<|code_end|>
using the current file's imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import ServiceForm
from _15thnight.models import Service, Category
from _15thnight.util import required_access, jsonify, api_error
and any relevant context from other files:
# Path: _15thnight/forms.py
# class ServiceForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
# category = SelectField('Category', coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ServiceForm, self).__init__(*args, **kwargs)
# self.category.choices = [
# (category.id, category.name) for category in Category.all()
# ]
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Service.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | return jsonify(Service.all()) |
Given snippet: <|code_start|>service_api = Blueprint('service_api', __name__)
@service_api.route('', methods=['GET'])
@login_required
def get_services():
"""
Gets the list of services.
"""
# TODO: pagination
return jsonify(Service.all())
@service_api.route('/<int:service_id>', methods=['GET'])
@required_access('admin')
def get_service(service_id):
"""
Gets a service.
"""
return jsonify(Service.get(service_id))
@service_api.route('', methods=['POST'])
@required_access('admin')
def create_service():
"""
Create a service. Must be an admin.
"""
form = ServiceForm()
if not form.validate_on_submit():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import ServiceForm
from _15thnight.models import Service, Category
from _15thnight.util import required_access, jsonify, api_error
and context:
# Path: _15thnight/forms.py
# class ServiceForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
# category = SelectField('Category', coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ServiceForm, self).__init__(*args, **kwargs)
# self.category.choices = [
# (category.id, category.name) for category in Category.all()
# ]
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Service.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
which might include code, classes, or functions. Output only the next line. | return api_error(form.errors) |
Predict the next line for this snippet: <|code_start|>"""user name organization
Revision ID: 7ee0d022e8d6
Revises: 3e7e65c44fc2
Create Date: 2016-08-22 22:27:02.630331
"""
# revision identifiers, used by Alembic.
revision = '7ee0d022e8d6'
down_revision = '3e7e65c44fc2'
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(
'users', sa.Column('name', sa.String(length=255), nullable=True))
op.add_column(
'users',
sa.Column('organization', sa.String(length=255), nullable=True))
op.execute(
<|code_end|>
with the help of current file imports:
from alembic import op
from _15thnight.models import User
import sqlalchemy as sa
and context from other files:
# Path: _15thnight/models.py
# class User(Model):
# """
# User Model.
#
# Required parameters:
# - email, password, phone_number
# """
# __tablename__ = "users"
# id = Column(Integer, primary_key=True)
# created_at = Column(DateTime, default=datetime.utcnow)
# updated = Column(DateTime, default=datetime.utcnow)
# name = Column(String(255), nullable=False)
# organization = Column(String(255), nullable=False)
# email = Column(String(255), nullable=False, unique=True)
# password = Column(Text, nullable=False)
# phone_number = Column(String(20), nullable=False)
# role = Column(Enum('admin', 'advocate', 'provider'), default='advocate')
# # Services only apply to providers
# services = relationship(
# "Service", secondary="user_services", backref="providers")
# reset_token = Column(String(255))
# reset_created_at = Column(DateTime)
#
# def __init__(self, name, organization, email, password, phone_number,
# services, role):
# self.name = name
# self.organization = organization
# self.email = email.lower()
# self.set_password(password)
# self.phone_number = phone_number
# self.role = role
# self.services = services
#
# def check_password(self, password):
# """Check a user's password (includes salt)."""
# return check_password_hash(self.password, password)
#
# def generate_reset_token(self):
# self.reset_token = str(uuid.uuid4())
# self.reset_created_at = datetime.utcnow()
#
# def get_alerts(self):
# return Alert.query.filter(Alert.user == self) \
# .order_by(desc(Alert.created_at)).all()
#
# def get_id(self):
# """Get the User id in unicode or ascii."""
# try:
# return unicode(self.id)
# except NameError:
# return str(self.id)
#
# def set_password(self, password):
# """Using pbkdf2:sha512, hash 'password'."""
# self.password = generate_password_hash(
# password=password,
# method='pbkdf2:sha512',
# salt_length=128
# )
#
# @property
# def is_admin(self):
# return self.role == 'admin'
#
# @property
# def is_advocate(self):
# return self.role == 'advocate'
#
# @property
# def is_provider(self):
# return self.role == 'provider'
#
# @property
# def is_authenticated(self):
# """Authenticaition check."""
# return True
#
# @property
# def is_active(self):
# """Active check."""
# return True
#
# @property
# def is_anonymous(self):
# """Anonimity check."""
# return False
#
# @classmethod
# def get_users(cls):
# return cls.query.order_by(desc(cls.created_at)).all()
#
# @classmethod
# def providers_with_services(cls, services):
# """Return a list of users in the passed in services."""
# return cls.query \
# .filter(cls.role == 'provider') \
# .join(cls.services) \
# .filter(Service.id.in_(services)) \
# .distinct() \
# .all()
#
# @classmethod
# def get_by_email(cls, email):
# """Return user based on email."""
# return cls.query.filter(cls.email == email).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# organization=self.organization,
# email=self.email,
# role=self.role,
# phone_number=self.phone_number,
# created_at=self.created_at,
# services=[dict(name=s.name, id=s.id) for s in self.services]
# )
#
# def __repr__(self):
# """Return <User: %(email)."""
# return '<User %s>' % (self.email)
, which may contain function names, class names, or code. Output only the next line. | User.__table__.update() |
Predict the next line for this snippet: <|code_start|> .join(cls.responses) \
.filter(Response.user_id == provider.id) \
.order_by(desc(cls.created_at)) \
.distinct().all()
return [alert.to_provider_json(provider) for alert in alerts]
def provider_has_permission(self, provider):
"""Checks if a provider was notified for this alert"""
provider_ids = map(
lambda notified: notified.provider_id, self.providers_notified)
return provider.id in provider_ids
def get_user_response(self, user):
response = Response.get_by_user_and_alert(user, self)
if response:
return response
return False
def to_json(self):
return dict(
id=self.id,
user=self.user,
created_at=to_local_datetime(self.created_at),
description=self.description,
gender=self.gender,
age=self.age,
needs=[need for need in self.needs]
)
def to_advocate_json(self):
<|code_end|>
with the help of current file imports:
import uuid
from datetime import datetime, timedelta
from flask import url_for
from sqlalchemy import (
Column, DateTime, Enum, ForeignKey, Integer, String, Table, Text, desc,
Boolean
)
from sqlalchemy.orm import backref, relationship
from werkzeug.security import check_password_hash, generate_password_hash
from _15thnight.database import Model
from _15thnight.util import extend, to_local_datetime
and context from other files:
# Path: _15thnight/util.py
# def extend(dict1, dict2):
# dict1.update(dict2)
# return dict1
#
# def to_local_datetime(dt):
# """
# datetime.isoformat does not append +0000 when using UTC, javascript
# needs it, or the date is parsed as if it were in the local timezone
# """
# if not dt:
# return None
# ldt = dt.isoformat()
# return ldt if ldt[-6] == "+" else "%s+0000" % ldt
, which may contain function names, class names, or code. Output only the next line. | return extend(self.to_json(), dict( |
Predict the next line for this snippet: <|code_start|> .filter(ProviderNotified.provider_id == provider.id) \
.order_by(desc(cls.created_at)) \
.distinct().all()
return [alert.to_provider_json(provider) for alert in alerts]
@classmethod
def get_responded_alerts_for_provider(cls, provider):
alerts = Alert.query \
.join(cls.responses) \
.filter(Response.user_id == provider.id) \
.order_by(desc(cls.created_at)) \
.distinct().all()
return [alert.to_provider_json(provider) for alert in alerts]
def provider_has_permission(self, provider):
"""Checks if a provider was notified for this alert"""
provider_ids = map(
lambda notified: notified.provider_id, self.providers_notified)
return provider.id in provider_ids
def get_user_response(self, user):
response = Response.get_by_user_and_alert(user, self)
if response:
return response
return False
def to_json(self):
return dict(
id=self.id,
user=self.user,
<|code_end|>
with the help of current file imports:
import uuid
from datetime import datetime, timedelta
from flask import url_for
from sqlalchemy import (
Column, DateTime, Enum, ForeignKey, Integer, String, Table, Text, desc,
Boolean
)
from sqlalchemy.orm import backref, relationship
from werkzeug.security import check_password_hash, generate_password_hash
from _15thnight.database import Model
from _15thnight.util import extend, to_local_datetime
and context from other files:
# Path: _15thnight/util.py
# def extend(dict1, dict2):
# dict1.update(dict2)
# return dict1
#
# def to_local_datetime(dt):
# """
# datetime.isoformat does not append +0000 when using UTC, javascript
# needs it, or the date is parsed as if it were in the local timezone
# """
# if not dt:
# return None
# ldt = dt.isoformat()
# return ldt if ldt[-6] == "+" else "%s+0000" % ldt
, which may contain function names, class names, or code. Output only the next line. | created_at=to_local_datetime(self.created_at), |
Using the snippet: <|code_start|>
category_api = Blueprint('category_api', __name__)
@category_api.route('', methods=['GET'])
@login_required
def get_categories():
"""
Gets the list of categories.
"""
# TODO: pagination
return jsonify(Category.all())
@category_api.route('/<int:category_id>', methods=['GET'])
@required_access('admin')
def get_category(category_id):
"""
Gets a category.
"""
return jsonify(Category.get(category_id))
@category_api.route('', methods=['POST'])
@required_access('admin')
def create_category():
"""
Create a category. Must be an admin.
"""
<|code_end|>
, determine the next line of code. You have imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import CategoryForm
from _15thnight.models import Category, Service
from _15thnight.util import required_access, jsonify, api_error
and context (class names, function names, or code) available:
# Path: _15thnight/forms.py
# class CategoryForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
#
# def __init__(self, *args, **kwargs):
# super(CategoryForm, self).__init__(*args, **kwargs)
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Category.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | form = CategoryForm() |
Continue the code snippet: <|code_start|>
category_api = Blueprint('category_api', __name__)
@category_api.route('', methods=['GET'])
@login_required
def get_categories():
"""
Gets the list of categories.
"""
# TODO: pagination
<|code_end|>
. Use current file imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import CategoryForm
from _15thnight.models import Category, Service
from _15thnight.util import required_access, jsonify, api_error
and context (classes, functions, or code) from other files:
# Path: _15thnight/forms.py
# class CategoryForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
#
# def __init__(self, *args, **kwargs):
# super(CategoryForm, self).__init__(*args, **kwargs)
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Category.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | return jsonify(Category.all()) |
Given the following code snippet before the placeholder: <|code_start|>
name = form.name.data
description = form.description.data
category = Category(name=name, description=description)
category.save()
return '', 201
@category_api.route('/<int:category_id>', methods=['PUT'])
@required_access('admin')
def update_category(category_id):
"""
Update an category.
"""
category = Category.get(category_id)
if not category:
return api_error('Category not found', 404)
form = CategoryForm(
validate_unique_name=category.name != request.json.get('name')
)
if not form.validate_on_submit():
return api_error(form.errors)
category.name = form.name.data
category.description = form.description.data
if 'services' in request.json:
services = request.json['services']
for data in services:
<|code_end|>
, predict the next line using imports from the current file:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import CategoryForm
from _15thnight.models import Category, Service
from _15thnight.util import required_access, jsonify, api_error
and context including class names, function names, and sometimes code from other files:
# Path: _15thnight/forms.py
# class CategoryForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
#
# def __init__(self, *args, **kwargs):
# super(CategoryForm, self).__init__(*args, **kwargs)
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Category.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | service = Service.get(data['id']) |
Given the code snippet: <|code_start|>
category_api = Blueprint('category_api', __name__)
@category_api.route('', methods=['GET'])
@login_required
def get_categories():
"""
Gets the list of categories.
"""
# TODO: pagination
return jsonify(Category.all())
@category_api.route('/<int:category_id>', methods=['GET'])
<|code_end|>
, generate the next line using the imports in this file:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import CategoryForm
from _15thnight.models import Category, Service
from _15thnight.util import required_access, jsonify, api_error
and context (functions, classes, or occasionally code) from other files:
# Path: _15thnight/forms.py
# class CategoryForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
#
# def __init__(self, *args, **kwargs):
# super(CategoryForm, self).__init__(*args, **kwargs)
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Category.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | @required_access('admin') |
Predict the next line after this snippet: <|code_start|>
category_api = Blueprint('category_api', __name__)
@category_api.route('', methods=['GET'])
@login_required
def get_categories():
"""
Gets the list of categories.
"""
# TODO: pagination
<|code_end|>
using the current file's imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import CategoryForm
from _15thnight.models import Category, Service
from _15thnight.util import required_access, jsonify, api_error
and any relevant context from other files:
# Path: _15thnight/forms.py
# class CategoryForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
#
# def __init__(self, *args, **kwargs):
# super(CategoryForm, self).__init__(*args, **kwargs)
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Category.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | return jsonify(Category.all()) |
Predict the next line after this snippet: <|code_start|>category_api = Blueprint('category_api', __name__)
@category_api.route('', methods=['GET'])
@login_required
def get_categories():
"""
Gets the list of categories.
"""
# TODO: pagination
return jsonify(Category.all())
@category_api.route('/<int:category_id>', methods=['GET'])
@required_access('admin')
def get_category(category_id):
"""
Gets a category.
"""
return jsonify(Category.get(category_id))
@category_api.route('', methods=['POST'])
@required_access('admin')
def create_category():
"""
Create a category. Must be an admin.
"""
form = CategoryForm()
if not form.validate_on_submit():
<|code_end|>
using the current file's imports:
from flask import Blueprint, request
from flask.ext.login import login_required
from _15thnight.forms import CategoryForm
from _15thnight.models import Category, Service
from _15thnight.util import required_access, jsonify, api_error
and any relevant context from other files:
# Path: _15thnight/forms.py
# class CategoryForm(Form):
# name = TextField("Name", validators=[DataRequired()])
# description = TextAreaField("Description")
#
# def __init__(self, *args, **kwargs):
# super(CategoryForm, self).__init__(*args, **kwargs)
# self.validate_unique_name = kwargs.get('validate_unique_name', True)
#
# def validate_name(self, field):
# if self.validate_unique_name and Category.get_by_name(field.data):
# raise ValidationError('This service name is already in use.')
#
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
#
# Path: _15thnight/util.py
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
. Output only the next line. | return api_error(form.errors) |
Here is a snippet: <|code_start|>def mark_need_resolved(need_id):
"""
Resolve a need and close an alert if necessary.
Send out a message stating the alert was closed as well.
"""
need = Need.get(need_id)
# Check validity of need_id
if not need:
return api_error('Need not found')
if not current_user.is_admin and current_user.id != need.alert.user_id:
return api_error('Permission denied')
if need.resolved:
return api_error('Need already resolved!')
# validate the form
form = ResolveNeedForm(need=need)
if not form.validate_on_submit():
return api_error(form.errors)
# Update Need with form data, including setting resolved to True.
need.resolved = True
need.resolved_at = datetime.utcnow()
need.resolve_notes = form.notes.data
need.resolve_message = form.message.data
for provision in need.provisions:
provision.selected = provision.id in form.provisions.data
need.save()
# Check if alert is resolved, notify those involved
<|code_end|>
. Write the next line using the current file imports:
from datetime import datetime
from flask import Blueprint
from flask.ext.login import current_user
from _15thnight.core import resolve_need
from _15thnight.forms import ResolveNeedForm
from _15thnight.models import Need
from _15thnight.util import api_error, jsonify, required_access
and context from other files:
# Path: _15thnight/core.py
# def resolve_need(need):
# """
# Resolve a need and trigger an alert closed if necessary.
# """
# alert = need.alert
# advocate = alert.user
# message = ''
# if need.resolve_message != '':
# message = '\nMsg: ' + need.resolve_message
# gender = alert.get_gender()
# args = (advocate.name, advocate.organization,
# need.service.name, alert.age, gender)
# accepted = ('15th Night help accepted!\n'
# '%s with %s selected you to provide %s for a '
# '%d y/o%s%s') % (args + (message,))
# denied = ('15th Night help not needed\n'
# '%s with %s does not need your help to provide %s for a '
# '%d y/o%s') % args
#
# selected = set()
# users = set(map(
# lambda provision: provision.response.user, need.provisions))
# for provision in need.provisions:
# if provision.selected:
# selected.add(provision.response.user_id)
#
# for provider in users:
# body = accepted if provider.id in selected else denied
# queue_send_message.apply_async(
# kwargs=dict(
# email=advocate.email,
# number=advocate.phone_number,
# subject='15th Night Need Resolution',
# body=body
# )
# )
# # Check if alert is closed, if so, send out resolution notices
# _send_alert_resolution_notice(need)
#
# Path: _15thnight/forms.py
# class ResolveNeedForm(Form):
# notes = TextField('Notes')
# message = TextField('Message')
# provisions = SelectMultipleField('Provisions', choices=[], coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ResolveNeedForm, self).__init__(*args, **kwargs)
# need = kwargs.get('need')
# self.provisions.choices = [
# (provision.id, provision.id) for provision in need.provisions
# ]
#
# Path: _15thnight/models.py
# class Need(Model):
# __tablename__ = 'need'
#
# id = Column(Integer, primary_key=True)
# alert_id = Column(ForeignKey('alerts.id'), nullable=False)
# alert = relationship('Alert', backref='needs')
# service_id = Column(ForeignKey('service.id'), nullable=False)
# service = relationship('Service')
# resolved = Column(Boolean, default=False, nullable=False)
# resolved_at = Column(DateTime)
# resolve_notes = Column(Text)
# resolve_message = Column(Text)
#
# @classmethod
# def get_by_id_and_alert(cls, need_id, alert):
# return cls.query.filter(
# (cls.alert == alert) & (cls.id == need_id)).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# alert_id=self.alert_id,
# service=self.service,
# resolved=self.resolved,
# resolved_at=to_local_datetime(self.resolved_at)
# )
#
# def to_advocate_json(self):
# return extend(self.to_json(), dict(
# provisions=[
# provision.to_advocate_json() for provision in self.provisions
# ]
# ))
#
# def to_provider_json(self, provider):
# return extend(self.to_json(), dict(
# provisions=NeedProvided.get_by_need_and_provider(self, provider)
# ))
#
# Path: _15thnight/util.py
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
, which may include functions, classes, or code. Output only the next line. | resolve_need(need) |
Given the following code snippet before the placeholder: <|code_start|>
@need_api.route('/<int:need_id>')
@required_access('advocate', 'admin')
def get_need(need_id):
need = Need.get(need_id)
if not need:
return api_error('Need not found')
if not current_user.is_admin and current_user.id != need.alert.user_id:
return api_error('Permission denied')
return jsonify(need.to_advocate_json())
@need_api.route('/<int:need_id>/resolve', methods=['POST'])
@required_access('advocate', 'admin')
def mark_need_resolved(need_id):
"""
Resolve a need and close an alert if necessary.
Send out a message stating the alert was closed as well.
"""
need = Need.get(need_id)
# Check validity of need_id
if not need:
return api_error('Need not found')
if not current_user.is_admin and current_user.id != need.alert.user_id:
return api_error('Permission denied')
if need.resolved:
return api_error('Need already resolved!')
# validate the form
<|code_end|>
, predict the next line using imports from the current file:
from datetime import datetime
from flask import Blueprint
from flask.ext.login import current_user
from _15thnight.core import resolve_need
from _15thnight.forms import ResolveNeedForm
from _15thnight.models import Need
from _15thnight.util import api_error, jsonify, required_access
and context including class names, function names, and sometimes code from other files:
# Path: _15thnight/core.py
# def resolve_need(need):
# """
# Resolve a need and trigger an alert closed if necessary.
# """
# alert = need.alert
# advocate = alert.user
# message = ''
# if need.resolve_message != '':
# message = '\nMsg: ' + need.resolve_message
# gender = alert.get_gender()
# args = (advocate.name, advocate.organization,
# need.service.name, alert.age, gender)
# accepted = ('15th Night help accepted!\n'
# '%s with %s selected you to provide %s for a '
# '%d y/o%s%s') % (args + (message,))
# denied = ('15th Night help not needed\n'
# '%s with %s does not need your help to provide %s for a '
# '%d y/o%s') % args
#
# selected = set()
# users = set(map(
# lambda provision: provision.response.user, need.provisions))
# for provision in need.provisions:
# if provision.selected:
# selected.add(provision.response.user_id)
#
# for provider in users:
# body = accepted if provider.id in selected else denied
# queue_send_message.apply_async(
# kwargs=dict(
# email=advocate.email,
# number=advocate.phone_number,
# subject='15th Night Need Resolution',
# body=body
# )
# )
# # Check if alert is closed, if so, send out resolution notices
# _send_alert_resolution_notice(need)
#
# Path: _15thnight/forms.py
# class ResolveNeedForm(Form):
# notes = TextField('Notes')
# message = TextField('Message')
# provisions = SelectMultipleField('Provisions', choices=[], coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ResolveNeedForm, self).__init__(*args, **kwargs)
# need = kwargs.get('need')
# self.provisions.choices = [
# (provision.id, provision.id) for provision in need.provisions
# ]
#
# Path: _15thnight/models.py
# class Need(Model):
# __tablename__ = 'need'
#
# id = Column(Integer, primary_key=True)
# alert_id = Column(ForeignKey('alerts.id'), nullable=False)
# alert = relationship('Alert', backref='needs')
# service_id = Column(ForeignKey('service.id'), nullable=False)
# service = relationship('Service')
# resolved = Column(Boolean, default=False, nullable=False)
# resolved_at = Column(DateTime)
# resolve_notes = Column(Text)
# resolve_message = Column(Text)
#
# @classmethod
# def get_by_id_and_alert(cls, need_id, alert):
# return cls.query.filter(
# (cls.alert == alert) & (cls.id == need_id)).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# alert_id=self.alert_id,
# service=self.service,
# resolved=self.resolved,
# resolved_at=to_local_datetime(self.resolved_at)
# )
#
# def to_advocate_json(self):
# return extend(self.to_json(), dict(
# provisions=[
# provision.to_advocate_json() for provision in self.provisions
# ]
# ))
#
# def to_provider_json(self, provider):
# return extend(self.to_json(), dict(
# provisions=NeedProvided.get_by_need_and_provider(self, provider)
# ))
#
# Path: _15thnight/util.py
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
. Output only the next line. | form = ResolveNeedForm(need=need) |
Next line prediction: <|code_start|>
need_api = Blueprint('need_api', __name__)
@need_api.route('/<int:need_id>')
@required_access('advocate', 'admin')
def get_need(need_id):
<|code_end|>
. Use current file imports:
(from datetime import datetime
from flask import Blueprint
from flask.ext.login import current_user
from _15thnight.core import resolve_need
from _15thnight.forms import ResolveNeedForm
from _15thnight.models import Need
from _15thnight.util import api_error, jsonify, required_access)
and context including class names, function names, or small code snippets from other files:
# Path: _15thnight/core.py
# def resolve_need(need):
# """
# Resolve a need and trigger an alert closed if necessary.
# """
# alert = need.alert
# advocate = alert.user
# message = ''
# if need.resolve_message != '':
# message = '\nMsg: ' + need.resolve_message
# gender = alert.get_gender()
# args = (advocate.name, advocate.organization,
# need.service.name, alert.age, gender)
# accepted = ('15th Night help accepted!\n'
# '%s with %s selected you to provide %s for a '
# '%d y/o%s%s') % (args + (message,))
# denied = ('15th Night help not needed\n'
# '%s with %s does not need your help to provide %s for a '
# '%d y/o%s') % args
#
# selected = set()
# users = set(map(
# lambda provision: provision.response.user, need.provisions))
# for provision in need.provisions:
# if provision.selected:
# selected.add(provision.response.user_id)
#
# for provider in users:
# body = accepted if provider.id in selected else denied
# queue_send_message.apply_async(
# kwargs=dict(
# email=advocate.email,
# number=advocate.phone_number,
# subject='15th Night Need Resolution',
# body=body
# )
# )
# # Check if alert is closed, if so, send out resolution notices
# _send_alert_resolution_notice(need)
#
# Path: _15thnight/forms.py
# class ResolveNeedForm(Form):
# notes = TextField('Notes')
# message = TextField('Message')
# provisions = SelectMultipleField('Provisions', choices=[], coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ResolveNeedForm, self).__init__(*args, **kwargs)
# need = kwargs.get('need')
# self.provisions.choices = [
# (provision.id, provision.id) for provision in need.provisions
# ]
#
# Path: _15thnight/models.py
# class Need(Model):
# __tablename__ = 'need'
#
# id = Column(Integer, primary_key=True)
# alert_id = Column(ForeignKey('alerts.id'), nullable=False)
# alert = relationship('Alert', backref='needs')
# service_id = Column(ForeignKey('service.id'), nullable=False)
# service = relationship('Service')
# resolved = Column(Boolean, default=False, nullable=False)
# resolved_at = Column(DateTime)
# resolve_notes = Column(Text)
# resolve_message = Column(Text)
#
# @classmethod
# def get_by_id_and_alert(cls, need_id, alert):
# return cls.query.filter(
# (cls.alert == alert) & (cls.id == need_id)).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# alert_id=self.alert_id,
# service=self.service,
# resolved=self.resolved,
# resolved_at=to_local_datetime(self.resolved_at)
# )
#
# def to_advocate_json(self):
# return extend(self.to_json(), dict(
# provisions=[
# provision.to_advocate_json() for provision in self.provisions
# ]
# ))
#
# def to_provider_json(self, provider):
# return extend(self.to_json(), dict(
# provisions=NeedProvided.get_by_need_and_provider(self, provider)
# ))
#
# Path: _15thnight/util.py
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
. Output only the next line. | need = Need.get(need_id) |
Predict the next line after this snippet: <|code_start|>
need_api = Blueprint('need_api', __name__)
@need_api.route('/<int:need_id>')
@required_access('advocate', 'admin')
def get_need(need_id):
need = Need.get(need_id)
if not need:
<|code_end|>
using the current file's imports:
from datetime import datetime
from flask import Blueprint
from flask.ext.login import current_user
from _15thnight.core import resolve_need
from _15thnight.forms import ResolveNeedForm
from _15thnight.models import Need
from _15thnight.util import api_error, jsonify, required_access
and any relevant context from other files:
# Path: _15thnight/core.py
# def resolve_need(need):
# """
# Resolve a need and trigger an alert closed if necessary.
# """
# alert = need.alert
# advocate = alert.user
# message = ''
# if need.resolve_message != '':
# message = '\nMsg: ' + need.resolve_message
# gender = alert.get_gender()
# args = (advocate.name, advocate.organization,
# need.service.name, alert.age, gender)
# accepted = ('15th Night help accepted!\n'
# '%s with %s selected you to provide %s for a '
# '%d y/o%s%s') % (args + (message,))
# denied = ('15th Night help not needed\n'
# '%s with %s does not need your help to provide %s for a '
# '%d y/o%s') % args
#
# selected = set()
# users = set(map(
# lambda provision: provision.response.user, need.provisions))
# for provision in need.provisions:
# if provision.selected:
# selected.add(provision.response.user_id)
#
# for provider in users:
# body = accepted if provider.id in selected else denied
# queue_send_message.apply_async(
# kwargs=dict(
# email=advocate.email,
# number=advocate.phone_number,
# subject='15th Night Need Resolution',
# body=body
# )
# )
# # Check if alert is closed, if so, send out resolution notices
# _send_alert_resolution_notice(need)
#
# Path: _15thnight/forms.py
# class ResolveNeedForm(Form):
# notes = TextField('Notes')
# message = TextField('Message')
# provisions = SelectMultipleField('Provisions', choices=[], coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ResolveNeedForm, self).__init__(*args, **kwargs)
# need = kwargs.get('need')
# self.provisions.choices = [
# (provision.id, provision.id) for provision in need.provisions
# ]
#
# Path: _15thnight/models.py
# class Need(Model):
# __tablename__ = 'need'
#
# id = Column(Integer, primary_key=True)
# alert_id = Column(ForeignKey('alerts.id'), nullable=False)
# alert = relationship('Alert', backref='needs')
# service_id = Column(ForeignKey('service.id'), nullable=False)
# service = relationship('Service')
# resolved = Column(Boolean, default=False, nullable=False)
# resolved_at = Column(DateTime)
# resolve_notes = Column(Text)
# resolve_message = Column(Text)
#
# @classmethod
# def get_by_id_and_alert(cls, need_id, alert):
# return cls.query.filter(
# (cls.alert == alert) & (cls.id == need_id)).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# alert_id=self.alert_id,
# service=self.service,
# resolved=self.resolved,
# resolved_at=to_local_datetime(self.resolved_at)
# )
#
# def to_advocate_json(self):
# return extend(self.to_json(), dict(
# provisions=[
# provision.to_advocate_json() for provision in self.provisions
# ]
# ))
#
# def to_provider_json(self, provider):
# return extend(self.to_json(), dict(
# provisions=NeedProvided.get_by_need_and_provider(self, provider)
# ))
#
# Path: _15thnight/util.py
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
. Output only the next line. | return api_error('Need not found') |
Given the code snippet: <|code_start|>
need_api = Blueprint('need_api', __name__)
@need_api.route('/<int:need_id>')
@required_access('advocate', 'admin')
def get_need(need_id):
need = Need.get(need_id)
if not need:
return api_error('Need not found')
if not current_user.is_admin and current_user.id != need.alert.user_id:
return api_error('Permission denied')
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from flask import Blueprint
from flask.ext.login import current_user
from _15thnight.core import resolve_need
from _15thnight.forms import ResolveNeedForm
from _15thnight.models import Need
from _15thnight.util import api_error, jsonify, required_access
and context (functions, classes, or occasionally code) from other files:
# Path: _15thnight/core.py
# def resolve_need(need):
# """
# Resolve a need and trigger an alert closed if necessary.
# """
# alert = need.alert
# advocate = alert.user
# message = ''
# if need.resolve_message != '':
# message = '\nMsg: ' + need.resolve_message
# gender = alert.get_gender()
# args = (advocate.name, advocate.organization,
# need.service.name, alert.age, gender)
# accepted = ('15th Night help accepted!\n'
# '%s with %s selected you to provide %s for a '
# '%d y/o%s%s') % (args + (message,))
# denied = ('15th Night help not needed\n'
# '%s with %s does not need your help to provide %s for a '
# '%d y/o%s') % args
#
# selected = set()
# users = set(map(
# lambda provision: provision.response.user, need.provisions))
# for provision in need.provisions:
# if provision.selected:
# selected.add(provision.response.user_id)
#
# for provider in users:
# body = accepted if provider.id in selected else denied
# queue_send_message.apply_async(
# kwargs=dict(
# email=advocate.email,
# number=advocate.phone_number,
# subject='15th Night Need Resolution',
# body=body
# )
# )
# # Check if alert is closed, if so, send out resolution notices
# _send_alert_resolution_notice(need)
#
# Path: _15thnight/forms.py
# class ResolveNeedForm(Form):
# notes = TextField('Notes')
# message = TextField('Message')
# provisions = SelectMultipleField('Provisions', choices=[], coerce=int)
#
# def __init__(self, *args, **kwargs):
# super(ResolveNeedForm, self).__init__(*args, **kwargs)
# need = kwargs.get('need')
# self.provisions.choices = [
# (provision.id, provision.id) for provision in need.provisions
# ]
#
# Path: _15thnight/models.py
# class Need(Model):
# __tablename__ = 'need'
#
# id = Column(Integer, primary_key=True)
# alert_id = Column(ForeignKey('alerts.id'), nullable=False)
# alert = relationship('Alert', backref='needs')
# service_id = Column(ForeignKey('service.id'), nullable=False)
# service = relationship('Service')
# resolved = Column(Boolean, default=False, nullable=False)
# resolved_at = Column(DateTime)
# resolve_notes = Column(Text)
# resolve_message = Column(Text)
#
# @classmethod
# def get_by_id_and_alert(cls, need_id, alert):
# return cls.query.filter(
# (cls.alert == alert) & (cls.id == need_id)).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# alert_id=self.alert_id,
# service=self.service,
# resolved=self.resolved,
# resolved_at=to_local_datetime(self.resolved_at)
# )
#
# def to_advocate_json(self):
# return extend(self.to_json(), dict(
# provisions=[
# provision.to_advocate_json() for provision in self.provisions
# ]
# ))
#
# def to_provider_json(self, provider):
# return extend(self.to_json(), dict(
# provisions=NeedProvided.get_by_need_and_provider(self, provider)
# ))
#
# Path: _15thnight/util.py
# def api_error(message='Bad Request', code=400):
# return jsonify(error=message, _status_code=code)
#
# def jsonify(*args, **kwargs):
# """Returns a json response"""
# data = None
# indent = not request.is_xhr
# status = kwargs.pop('_status_code', 200)
# if args:
# data = args[0] if len(args) == 1 else args
# if kwargs:
# if data:
# if type(data) != list:
# data = [data]
# data.append(dict(**kwargs))
# else:
# data = dict(**kwargs)
# return current_app.response_class(
# dumps(data, indent=indent), status=status, mimetype='application/json'
# )
#
# def required_access(*roles):
# def templated(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# if current_user.is_anonymous or current_user.role not in roles:
# return 'Access Denied.', 403
# return f(*args, **kwargs)
# return decorated
# return templated
. Output only the next line. | return jsonify(need.to_advocate_json()) |
Next line prediction: <|code_start|>
try:
except:
celery = Celery('15thnight', broker=CELERY_BROKER)
def init_app(app):
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
@celery.task
def queue_send_message(email, number, subject, body):
"""
Celery task to send messages out in sms and email.
"""
if number:
send_sms(to_number=number, body=body)
message = Message(body=body, subject=subject, recipients=[email])
<|code_end|>
. Use current file imports:
(from celery import Celery
from flask_mail import Message
from _15thnight.email import mailer
from _15thnight.twilio_client import send_sms
from config import CELERY_BROKER
from configdist import CELERY_BROKER)
and context including class names, function names, or small code snippets from other files:
# Path: _15thnight/email.py
. Output only the next line. | mailer.send(message) |
Next line prediction: <|code_start|>"""sort order not null
Revision ID: 3e7e65c44fc2
Revises: 368b199625d8
Create Date: 2016-08-21 03:05:06.778204
"""
# revision identifiers, used by Alembic.
revision = '3e7e65c44fc2'
down_revision = '368b199625d8'
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(
<|code_end|>
. Use current file imports:
(from alembic import op
from _15thnight.models import Category, Service
import sqlalchemy as sa)
and context including class names, function names, or small code snippets from other files:
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
. Output only the next line. | Category.__table__.update() |
Predict the next line after this snippet: <|code_start|>"""sort order not null
Revision ID: 3e7e65c44fc2
Revises: 368b199625d8
Create Date: 2016-08-21 03:05:06.778204
"""
# revision identifiers, used by Alembic.
revision = '3e7e65c44fc2'
down_revision = '368b199625d8'
branch_labels = None
depends_on = None
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(
Category.__table__.update()
.where(Category.__table__.c.sort_order == None)
.values(**{'sort_order': 0})
)
op.alter_column(
'categories', 'sort_order', existing_type=sa.INTEGER(),
nullable=False)
op.execute(
<|code_end|>
using the current file's imports:
from alembic import op
from _15thnight.models import Category, Service
import sqlalchemy as sa
and any relevant context from other files:
# Path: _15thnight/models.py
# class Category(Model):
# """Category/type of provided help representation."""
# __tablename__ = "categories"
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def all(cls):
# return cls.query.order_by(cls.sort_order).all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# services=Service.get_by_category(self.id),
# sort_order=self.sort_order
# )
#
# class Service(Model):
# """Service of provider."""
# __tablename__ = 'service'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), nullable=False, unique=True)
# description = Column(Text)
# category_id = Column(ForeignKey('categories.id'), nullable=False)
# category = relationship(
# 'Category', backref=backref('services', cascade="all, delete-orphan"))
# sort_order = Column(Integer, nullable=False, default=0)
#
# @classmethod
# def get_by_category(cls, category_id):
# return cls.query.filter(cls.category_id == category_id) \
# .order_by(cls.sort_order) \
# .all()
#
# @classmethod
# def get_by_name(cls, name):
# return cls.query.filter(cls.name == name).first()
#
# def to_json(self):
# return dict(
# id=self.id,
# name=self.name,
# description=self.description,
# category=dict(
# id=self.category.id,
# name=self.category.name,
# description=self.category.description
# ),
# sort_order=self.sort_order
# )
. Output only the next line. | Service.__table__.update() |
Next line prediction: <|code_start|># useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE-SCHEMAS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
def test_city():
schema = load_schema('conferences')
subschema = schema['properties']['addresses']
record = {
'addresses': [
{
'cities': [
'Tokyo',
],
},
],
}
<|code_end|>
. Use current file imports:
(from inspire_schemas.api import load_schema, validate
from inspire_schemas.readers.conference import ConferenceReader)
and context including class names, function names, or small code snippets from other files:
# Path: inspire_schemas/api.py
#
# Path: inspire_schemas/readers/conference.py
# class ConferenceReader(object):
# """Conference record reader."""
#
# def __init__(self, record):
# self.record = record
#
# @property
# def city(self):
# """Return the first city of a Conference record.
#
# Returns:
# string: the first city of the Conference record.
#
# Examples:
# >>> record = {'addresses': [{'cities': ['Tokyo']}]}
# >>> ConferenceReader(record).city
# 'Tokyo'
#
# """
# return get_value(self.record, 'addresses.cities[0][0]', default='')
#
# @property
# def country(self):
# """Return the first country of a Conference record.
#
# Returns:
# string: the first country of the Conference record.
#
# Examples:
# >>> record = {'address': [{'country_code': 'JP'}]}
# >>> ConferenceReader(record).country
# 'jp'
#
# """
# return get_value(
# self.record,
# 'addresses.country_code[0]',
# default=''
# ).lower()
#
# @property
# def end_date(self):
# """Return the closing date of a conference record.
#
# Returns:
# string: the closing date of the Conference record.
#
# Examples:
# >>> record = {'closing_date': '1999-11-19'}
# >>> ConferenceReader(record).end_date
# '1999-11-19'
#
# """
# return self.record.get('closing_date', '')
#
# @property
# def start_date(self):
# """Return the opening date of a conference record.
#
# Returns:
# string: the opening date of the Conference record.
#
# Examples:
# >>> record = {'opening_date': '1999-11-16'}
# >>> ConferenceReader(record).start_date
# '1999-11-16'
#
# """
# return self.record.get('opening_date', '')
. Output only the next line. | assert validate(record['addresses'], subschema) is None |
Predict the next line after this snippet: <|code_start|>#
# You should have received a copy of the GNU General Public License
# along with INSPIRE-SCHEMAS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
def test_city():
schema = load_schema('conferences')
subschema = schema['properties']['addresses']
record = {
'addresses': [
{
'cities': [
'Tokyo',
],
},
],
}
assert validate(record['addresses'], subschema) is None
expected = 'Tokyo'
<|code_end|>
using the current file's imports:
from inspire_schemas.api import load_schema, validate
from inspire_schemas.readers.conference import ConferenceReader
and any relevant context from other files:
# Path: inspire_schemas/api.py
#
# Path: inspire_schemas/readers/conference.py
# class ConferenceReader(object):
# """Conference record reader."""
#
# def __init__(self, record):
# self.record = record
#
# @property
# def city(self):
# """Return the first city of a Conference record.
#
# Returns:
# string: the first city of the Conference record.
#
# Examples:
# >>> record = {'addresses': [{'cities': ['Tokyo']}]}
# >>> ConferenceReader(record).city
# 'Tokyo'
#
# """
# return get_value(self.record, 'addresses.cities[0][0]', default='')
#
# @property
# def country(self):
# """Return the first country of a Conference record.
#
# Returns:
# string: the first country of the Conference record.
#
# Examples:
# >>> record = {'address': [{'country_code': 'JP'}]}
# >>> ConferenceReader(record).country
# 'jp'
#
# """
# return get_value(
# self.record,
# 'addresses.country_code[0]',
# default=''
# ).lower()
#
# @property
# def end_date(self):
# """Return the closing date of a conference record.
#
# Returns:
# string: the closing date of the Conference record.
#
# Examples:
# >>> record = {'closing_date': '1999-11-19'}
# >>> ConferenceReader(record).end_date
# '1999-11-19'
#
# """
# return self.record.get('closing_date', '')
#
# @property
# def start_date(self):
# """Return the opening date of a conference record.
#
# Returns:
# string: the opening date of the Conference record.
#
# Examples:
# >>> record = {'opening_date': '1999-11-16'}
# >>> ConferenceReader(record).start_date
# '1999-11-16'
#
# """
# return self.record.get('opening_date', '')
. Output only the next line. | result = ConferenceReader(record).city |
Based on the snippet: <|code_start|>
from __future__ import absolute_import, division, print_function
class SeminarBuilder(RecordBuilder):
"""Seminar record builder."""
_collections = ['Seminars']
@staticmethod
def _prepare_url(value, description=None):
"""Build url dict satysfying url.yml requirements
Args:
value (str): URL itself
description (str): URL description
"""
entry = {
'value': value
}
if description:
entry['description'] = description
return entry
def validate_record(self):
"""Validate the record in according to the hep schema."""
validate(self.record, 'seminars')
<|code_end|>
, predict the immediate next line with the help of imports:
import six
from inspire_utils.name import normalize_name
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context (classes, functions, sometimes code) from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
. Output only the next line. | @filter_empty_parameters |
Given the code snippet: <|code_start|> Args:
captioned (boolean)
"""
if captioned is not None:
self.record['captioned'] = captioned
def set_end_datetime(self, date=None):
"""
Args:
date (str)
"""
if date is not None:
self.record['end_datetime'] = date
def set_start_datetime(self, date=None):
"""
Args:
date (str)
"""
if date is not None:
self.record['start_datetime'] = date
def set_abstract(self, value, source=None):
"""
Args:
value (str): the description to set.
source (str): source of the description.
"""
self.record['abstract'] = self._sourced_dict(
source=source,
<|code_end|>
, generate the next line using the imports in this file:
import six
from inspire_utils.name import normalize_name
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context (functions, classes, or occasionally code) from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
. Output only the next line. | value=sanitize_html(value) |
Predict the next line for this snippet: <|code_start|>
"""Conferences builder class and related code."""
from __future__ import absolute_import, division, print_function
class SeminarBuilder(RecordBuilder):
"""Seminar record builder."""
_collections = ['Seminars']
@staticmethod
def _prepare_url(value, description=None):
"""Build url dict satysfying url.yml requirements
Args:
value (str): URL itself
description (str): URL description
"""
entry = {
'value': value
}
if description:
entry['description'] = description
return entry
def validate_record(self):
"""Validate the record in according to the hep schema."""
<|code_end|>
with the help of current file imports:
import six
from inspire_utils.name import normalize_name
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
, which may contain function names, class names, or code. Output only the next line. | validate(self.record, 'seminars') |
Continue the code snippet: <|code_start|>
from __future__ import absolute_import, division, print_function
class ConferenceBuilder(RecordBuilder):
"""Conference record builder."""
_collections = ['Conferences']
@staticmethod
def _prepare_url(value, description=None):
"""Build url dict satysfying url.yml requirements
Args:
value (str): URL itself
description (str): URL description
"""
entry = {
'value': value
}
if description:
entry['description'] = description
return entry
def validate_record(self):
"""Validate the record in according to the hep schema."""
validate(self.record, 'conferences')
<|code_end|>
. Use current file imports:
import six
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context (classes, functions, or code) from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
. Output only the next line. | @filter_empty_parameters |
Predict the next line after this snippet: <|code_start|> """
if date is not None:
self.record['closing_date'] = normalize_date(date=date)
def set_core(self, core=True):
"""Set core flag.
Args:
core (bool): define a core article
"""
self.record['core'] = core
def set_opening_date(self, date=None):
"""Add conference opening date.
Args:
date (str): conference opening date.
"""
if date is not None:
self.record['opening_date'] = normalize_date(date=date)
def set_short_description(self, value, source=None):
"""Set a short descritpion
Args:
value (str): the description to set.
source (str): source of the description.
"""
self.record['short_description'] = self._sourced_dict(
source=source,
<|code_end|>
using the current file's imports:
import six
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and any relevant context from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
. Output only the next line. | value=sanitize_html(value) |
Given snippet: <|code_start|>
"""Conferences builder class and related code."""
from __future__ import absolute_import, division, print_function
class ConferenceBuilder(RecordBuilder):
"""Conference record builder."""
_collections = ['Conferences']
@staticmethod
def _prepare_url(value, description=None):
"""Build url dict satysfying url.yml requirements
Args:
value (str): URL itself
description (str): URL description
"""
entry = {
'value': value
}
if description:
entry['description'] = description
return entry
def validate_record(self):
"""Validate the record in according to the hep schema."""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import six
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
which might include code, classes, or functions. Output only the next line. | validate(self.record, 'conferences') |
Given the code snippet: <|code_start|># as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
class RecordBuilder(object):
"""Base record builder."""
_collections = []
def __init__(self, record=None, source=None):
if record is None:
record = {'_collections': [_ for _ in self.__class__._collections]}
self.record = record
self.source = source
def __repr__(self):
"""Printable representation of the builder."""
return u'{}(source={!r}, record={})'.format(
type(self).__name__,
self.source,
self.record
)
@filter_empty_parameters
def _append_to(self, field, element=None, default_list=None, **kwargs):
if default_list is None:
default_list = []
<|code_end|>
, generate the next line using the imports in this file:
import six
from ..utils import EMPTIES, filter_empty_parameters
and context (functions, classes, or occasionally code) from other files:
# Path: inspire_schemas/utils.py
# EMPTIES = [None, '', [], {}]
#
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
. Output only the next line. | if element not in EMPTIES: |
Given the code snippet: <|code_start|># MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
class RecordBuilder(object):
"""Base record builder."""
_collections = []
def __init__(self, record=None, source=None):
if record is None:
record = {'_collections': [_ for _ in self.__class__._collections]}
self.record = record
self.source = source
def __repr__(self):
"""Printable representation of the builder."""
return u'{}(source={!r}, record={})'.format(
type(self).__name__,
self.source,
self.record
)
<|code_end|>
, generate the next line using the imports in this file:
import six
from ..utils import EMPTIES, filter_empty_parameters
and context (functions, classes, or occasionally code) from other files:
# Path: inspire_schemas/utils.py
# EMPTIES = [None, '', [], {}]
#
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
. Output only the next line. | @filter_empty_parameters |
Given the code snippet: <|code_start|> return schema_data
inspire_format_checker = draft4_format_checker
inspire_format_checker.checks('date', raises=ValueError)(PartialDate.loads)
inspire_format_checker.checks('uri-reference', raises=ValueError)(
partial(rfc3987.parse, rule='URI_reference')
)
inspire_format_checker.checks('orcid')(is_orcid)
inspire_format_checker.checks('timezone', raises=UnknownTimeZoneError)(timezone)
def _load_schema_for_record(data, schema=None):
"""Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
"""
if schema is None:
if '$schema' not in data:
<|code_end|>
, generate the next line using the imports in this file:
import copy
import json
import os
import re
import idutils
import rfc3987
import six
from collections import defaultdict
from functools import partial, wraps
from bleach.linkifier import LinkifyFilter
from bleach.sanitizer import Cleaner
from idutils import is_orcid
from inspire_utils.date import PartialDate
from isbn import ISBN
from jsonschema import Draft4Validator, RefResolver, draft4_format_checker
from jsonschema import validate as jsonschema_validate
from pkg_resources import resource_filename
from pytz import UnknownTimeZoneError, timezone
from six.moves.urllib.parse import urlsplit
from unidecode import unidecode
from .errors import (SchemaKeyNotFound, SchemaNotFound, SchemaUIDConflict,
UnknownUIDSchema)
and context (functions, classes, or occasionally code) from other files:
# Path: inspire_schemas/errors.py
# class SchemaKeyNotFound(InspireSchemasException):
# """Exception raised on missing schema key."""
#
# def __init__(self, data):
# """Exception raised on missing schema key.
#
# :param data: data dict that was checked.
# """
# message = 'Unable to find "$schema" key in "{}".'.format(data)
# super(SchemaKeyNotFound, self).__init__(message)
#
# class SchemaNotFound(InspireSchemasException):
# """Exception raised on missing schema."""
#
# def __init__(self, schema):
# """Exception raised on missing schema.
#
# :param schema: the schema that was requested.
# """
# message = 'Unable to find schema "{}"'.format(
# schema)
# super(SchemaNotFound, self).__init__(message)
#
# class SchemaUIDConflict(InspireSchemasException):
# """Exception raised when a UID is not matching provided schema."""
#
# def __init__(self, schema, uid):
# """Exception raised when a UID is not matching provided schema.
#
# Args:
# schema (string): given schema
# uid (string): UID which conflicts the schema
# """
# message = 'UID "{}" is not of the schema "{}".'.format(uid, schema)
# super(SchemaUIDConflict, self).__init__(message)
#
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
. Output only the next line. | raise SchemaKeyNotFound(data=data) |
Next line prediction: <|code_start|> """
stripped_path = path.split(os.path.sep, 1)[1:]
return ''.join(stripped_path)
def _schema_to_normalized_path(schema):
"""Pass doctests.
Extracts the path from the url, makes sure to get rid of any '..' in
the path and adds the json extension if not there.
"""
path = os.path.normpath(os.path.sep + urlsplit(schema).path)
if path.startswith(os.path.sep):
path = path[1:]
if not path.endswith('.json'):
path += '.json'
return path
path = _schema_to_normalized_path(schema)
while path:
if resolved:
schema_path = os.path.abspath(os.path.join(_schema_root_path, path))
else:
schema_path = os.path.abspath(os.path.join(_schema_root_path, path))
if os.path.exists(schema_path):
return os.path.abspath(schema_path)
path = _strip_first_path_elem(path)
<|code_end|>
. Use current file imports:
(import copy
import json
import os
import re
import idutils
import rfc3987
import six
from collections import defaultdict
from functools import partial, wraps
from bleach.linkifier import LinkifyFilter
from bleach.sanitizer import Cleaner
from idutils import is_orcid
from inspire_utils.date import PartialDate
from isbn import ISBN
from jsonschema import Draft4Validator, RefResolver, draft4_format_checker
from jsonschema import validate as jsonschema_validate
from pkg_resources import resource_filename
from pytz import UnknownTimeZoneError, timezone
from six.moves.urllib.parse import urlsplit
from unidecode import unidecode
from .errors import (SchemaKeyNotFound, SchemaNotFound, SchemaUIDConflict,
UnknownUIDSchema))
and context including class names, function names, or small code snippets from other files:
# Path: inspire_schemas/errors.py
# class SchemaKeyNotFound(InspireSchemasException):
# """Exception raised on missing schema key."""
#
# def __init__(self, data):
# """Exception raised on missing schema key.
#
# :param data: data dict that was checked.
# """
# message = 'Unable to find "$schema" key in "{}".'.format(data)
# super(SchemaKeyNotFound, self).__init__(message)
#
# class SchemaNotFound(InspireSchemasException):
# """Exception raised on missing schema."""
#
# def __init__(self, schema):
# """Exception raised on missing schema.
#
# :param schema: the schema that was requested.
# """
# message = 'Unable to find schema "{}"'.format(
# schema)
# super(SchemaNotFound, self).__init__(message)
#
# class SchemaUIDConflict(InspireSchemasException):
# """Exception raised when a UID is not matching provided schema."""
#
# def __init__(self, schema, uid):
# """Exception raised when a UID is not matching provided schema.
#
# Args:
# schema (string): given schema
# uid (string): UID which conflicts the schema
# """
# message = 'UID "{}" is not of the schema "{}".'.format(uid, schema)
# super(SchemaUIDConflict, self).__init__(message)
#
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
. Output only the next line. | raise SchemaNotFound(schema=schema) |
Given the following code snippet before the placeholder: <|code_start|> uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
"""
def _get_uid_normalized_in_schema(_uid, _schema):
regex, template = _RE_AUTHORS_UID[_schema]
match = regex.match(_uid)
if match:
return template.format(match.group('uid'))
if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
return idutils.normalize_orcid(uid), 'ORCID'
if schema and schema not in _RE_AUTHORS_UID:
# Schema explicitly specified, but this function can't handle it
raise UnknownUIDSchema(uid)
if schema:
normalized_uid = _get_uid_normalized_in_schema(uid, schema)
if normalized_uid:
return normalized_uid, schema
else:
<|code_end|>
, predict the next line using imports from the current file:
import copy
import json
import os
import re
import idutils
import rfc3987
import six
from collections import defaultdict
from functools import partial, wraps
from bleach.linkifier import LinkifyFilter
from bleach.sanitizer import Cleaner
from idutils import is_orcid
from inspire_utils.date import PartialDate
from isbn import ISBN
from jsonschema import Draft4Validator, RefResolver, draft4_format_checker
from jsonschema import validate as jsonschema_validate
from pkg_resources import resource_filename
from pytz import UnknownTimeZoneError, timezone
from six.moves.urllib.parse import urlsplit
from unidecode import unidecode
from .errors import (SchemaKeyNotFound, SchemaNotFound, SchemaUIDConflict,
UnknownUIDSchema)
and context including class names, function names, and sometimes code from other files:
# Path: inspire_schemas/errors.py
# class SchemaKeyNotFound(InspireSchemasException):
# """Exception raised on missing schema key."""
#
# def __init__(self, data):
# """Exception raised on missing schema key.
#
# :param data: data dict that was checked.
# """
# message = 'Unable to find "$schema" key in "{}".'.format(data)
# super(SchemaKeyNotFound, self).__init__(message)
#
# class SchemaNotFound(InspireSchemasException):
# """Exception raised on missing schema."""
#
# def __init__(self, schema):
# """Exception raised on missing schema.
#
# :param schema: the schema that was requested.
# """
# message = 'Unable to find schema "{}"'.format(
# schema)
# super(SchemaNotFound, self).__init__(message)
#
# class SchemaUIDConflict(InspireSchemasException):
# """Exception raised when a UID is not matching provided schema."""
#
# def __init__(self, schema, uid):
# """Exception raised when a UID is not matching provided schema.
#
# Args:
# schema (string): given schema
# uid (string): UID which conflicts the schema
# """
# message = 'UID "{}" is not of the schema "{}".'.format(uid, schema)
# super(SchemaUIDConflict, self).__init__(message)
#
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
. Output only the next line. | raise SchemaUIDConflict(schema, uid) |
Given the code snippet: <|code_start|> return func_wrapper
def author_id_normalize_and_schema(uid, schema=None):
"""Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
"""
def _get_uid_normalized_in_schema(_uid, _schema):
regex, template = _RE_AUTHORS_UID[_schema]
match = regex.match(_uid)
if match:
return template.format(match.group('uid'))
if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
return idutils.normalize_orcid(uid), 'ORCID'
if schema and schema not in _RE_AUTHORS_UID:
# Schema explicitly specified, but this function can't handle it
<|code_end|>
, generate the next line using the imports in this file:
import copy
import json
import os
import re
import idutils
import rfc3987
import six
from collections import defaultdict
from functools import partial, wraps
from bleach.linkifier import LinkifyFilter
from bleach.sanitizer import Cleaner
from idutils import is_orcid
from inspire_utils.date import PartialDate
from isbn import ISBN
from jsonschema import Draft4Validator, RefResolver, draft4_format_checker
from jsonschema import validate as jsonschema_validate
from pkg_resources import resource_filename
from pytz import UnknownTimeZoneError, timezone
from six.moves.urllib.parse import urlsplit
from unidecode import unidecode
from .errors import (SchemaKeyNotFound, SchemaNotFound, SchemaUIDConflict,
UnknownUIDSchema)
and context (functions, classes, or occasionally code) from other files:
# Path: inspire_schemas/errors.py
# class SchemaKeyNotFound(InspireSchemasException):
# """Exception raised on missing schema key."""
#
# def __init__(self, data):
# """Exception raised on missing schema key.
#
# :param data: data dict that was checked.
# """
# message = 'Unable to find "$schema" key in "{}".'.format(data)
# super(SchemaKeyNotFound, self).__init__(message)
#
# class SchemaNotFound(InspireSchemasException):
# """Exception raised on missing schema."""
#
# def __init__(self, schema):
# """Exception raised on missing schema.
#
# :param schema: the schema that was requested.
# """
# message = 'Unable to find schema "{}"'.format(
# schema)
# super(SchemaNotFound, self).__init__(message)
#
# class SchemaUIDConflict(InspireSchemasException):
# """Exception raised when a UID is not matching provided schema."""
#
# def __init__(self, schema, uid):
# """Exception raised when a UID is not matching provided schema.
#
# Args:
# schema (string): given schema
# uid (string): UID which conflicts the schema
# """
# message = 'UID "{}" is not of the schema "{}".'.format(uid, schema)
# super(SchemaUIDConflict, self).__init__(message)
#
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
. Output only the next line. | raise UnknownUIDSchema(uid) |
Based on the snippet: <|code_start|># Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Author builder class and related code."""
from __future__ import absolute_import, division, print_function
RANKS = load_schema('elements/rank')['enum']
RANKS.append(None)
INSTITUTION_RANK_TO_PRIORITY = {rank: -idx for (idx, rank) in enumerate(RANKS)}
EARLIEST_DATE = PartialDate.loads('1000')
<|code_end|>
, predict the immediate next line with the help of imports:
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import filter_empty_parameters, load_schema
from inspire_utils.date import normalize_date, PartialDate
from inspire_utils.helpers import force_list
from inspire_utils.name import normalize_name
from inspire_utils.record import get_value
and context (classes, functions, sometimes code) from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def load_schema(schema_name, resolved=False, _cache={}):
# """Load the given schema from wherever it's installed.
#
# Args:
# schema_name(str): Name of the schema to load, for example 'authors'.
# resolved(bool): If True will return the resolved schema, that is with
# all the $refs replaced by their targets.
# _cache(dict): Private argument used for memoization.
#
# Returns:
# dict: the schema with the given name.
# """
# if schema_name in _cache:
# return _cache[schema_name]
#
# schema_path = get_schema_path(schema_name, resolved)
# if schema_path in _cache:
# schema_data = _cache[schema_path]
# _cache[schema_name] = schema_data
# return schema_data
#
# with open(schema_path) as schema_fd:
# schema_data = json.load(schema_fd)
#
# _cache[schema_name] = schema_data
# _cache[schema_path] = schema_data
#
# return schema_data
. Output only the next line. | class AuthorBuilder(RecordBuilder): |
Given snippet: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Author builder class and related code."""
from __future__ import absolute_import, division, print_function
RANKS = load_schema('elements/rank')['enum']
RANKS.append(None)
INSTITUTION_RANK_TO_PRIORITY = {rank: -idx for (idx, rank) in enumerate(RANKS)}
EARLIEST_DATE = PartialDate.loads('1000')
class AuthorBuilder(RecordBuilder):
"""Author record builder."""
_collections = ['Authors']
def __init__(self, author=None, source=None):
super(AuthorBuilder, self).__init__(author, source)
self.obj = self.record
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import filter_empty_parameters, load_schema
from inspire_utils.date import normalize_date, PartialDate
from inspire_utils.helpers import force_list
from inspire_utils.name import normalize_name
from inspire_utils.record import get_value
and context:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def load_schema(schema_name, resolved=False, _cache={}):
# """Load the given schema from wherever it's installed.
#
# Args:
# schema_name(str): Name of the schema to load, for example 'authors'.
# resolved(bool): If True will return the resolved schema, that is with
# all the $refs replaced by their targets.
# _cache(dict): Private argument used for memoization.
#
# Returns:
# dict: the schema with the given name.
# """
# if schema_name in _cache:
# return _cache[schema_name]
#
# schema_path = get_schema_path(schema_name, resolved)
# if schema_path in _cache:
# schema_data = _cache[schema_path]
# _cache[schema_name] = schema_data
# return schema_data
#
# with open(schema_path) as schema_fd:
# schema_data = json.load(schema_fd)
#
# _cache[schema_name] = schema_data
# _cache[schema_path] = schema_data
#
# return schema_data
which might include code, classes, or functions. Output only the next line. | @filter_empty_parameters |
Given snippet: <|code_start|> self.record,
'publication_info.year[0]',
default=''
))
@property
def is_published(self):
"""Return True if a record is published.
We say that a record is published if it is citeable, which means that
it has enough information in a ``publication_info``, or if we know its
DOI and a ``journal_title``, which means it is in press.
Returns:
bool: whether the record is published.
Examples:
>>> record = {
... 'dois': [
... {'value': '10.1016/0029-5582(61)90469-2'},
... ],
... 'publication_info': [
... {'journal_title': 'Nucl.Phys.'},
... ],
... }
>>> LiteratureReader(record).is_published
True
"""
citeable = 'publication_info' in self.record and \
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from itertools import chain
from six import text_type
from inspire_schemas.builders.literature import is_citeable
from inspire_utils.helpers import force_list
from inspire_utils.record import get_value
and context:
# Path: inspire_schemas/builders/literature.py
# def is_citeable(publication_info):
# """Check some fields in order to define if the article is citeable.
#
# :param publication_info: publication_info field
# already populated
# :type publication_info: list
# """
#
# def _item_has_pub_info(item):
# return all(
# key in item for key in (
# 'journal_title', 'journal_volume'
# )
# )
#
# def _item_has_page_or_artid(item):
# return any(
# key in item for key in (
# 'page_start', 'artid'
# )
# )
#
# has_pub_info = any(
# _item_has_pub_info(item) for item in publication_info
# )
# has_page_or_artid = any(
# _item_has_page_or_artid(item) for item in publication_info
# )
#
# return has_pub_info and has_page_or_artid
which might include code, classes, or functions. Output only the next line. | is_citeable(self.record['publication_info']) |
Predict the next line for this snippet: <|code_start|> example_path = os.path.join(FIXTURES_PATH, schema_name + '_example.json')
with open(example_path) as example_fd:
data = json.loads(example_fd.read())
return data
def change_something(data):
for key, elem in data.items():
if isinstance(elem, int):
data[key] = (
"Look, I'm a knight, I'm supposed to get as much peril as I"
" can."
)
else:
data[key] = 42
break
else:
raise Exception('Unable to change anythng on data "%s"' % data)
return data
@pytest.mark.parametrize(
'schema_name',
get_schema_names(FIXTURES_PATH),
ids=get_schema_names(FIXTURES_PATH),
)
def test_schemas_validate(schema_name):
example_data = load_example(schema_name)
<|code_end|>
with the help of current file imports:
import json
import os
import jsonschema
import pytest
import six
from inspire_schemas import api
and context from other files:
# Path: inspire_schemas/api.py
, which may contain function names, class names, or code. Output only the next line. | api.validate(data=example_data, schema=schema_name) |
Predict the next line for this snippet: <|code_start|># MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Jobs builder class and related code."""
from __future__ import absolute_import, division, print_function
LOG = logging.getLogger(__name__)
class JobBuilder(RecordBuilder):
"""Job record builder."""
_collections = ['Jobs']
def __init__(self, record=None, source=None,):
super(JobBuilder, self).__init__(record, source)
if record is None:
self.record['status'] = 'pending'
def validate_record(self):
"""Validate the record in according to the hep schema."""
validate(self.record, 'jobs')
<|code_end|>
with the help of current file imports:
import logging
import six
from jsonschema._format import is_email
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
, which may contain function names, class names, or code. Output only the next line. | @filter_empty_parameters |
Next line prediction: <|code_start|> Args:
value (str): Url itself.
description (str): Description of the url.
"""
entry = self._prepare_url(value, description)
self._append_to('urls', entry)
@filter_empty_parameters
def set_deadline(self, deadline):
"""Save normalized date of the deadline to ``deadline_date`` field
deadline (str): Date in format recognized by ``normalize_date``
"""
self.record['deadline_date'] = normalize_date(deadline)
@filter_empty_parameters
def set_external_job_identifier(self, identifier):
"""Set external job identifier in ``external_job_identifier`` field
Args:
identifier (str)
"""
self.record['external_job_identifier'] = identifier
@filter_empty_parameters
def set_description(self, description):
"""Set description of job
Args:
description (str): Job description
"""
<|code_end|>
. Use current file imports:
(import logging
import six
from jsonschema._format import is_email
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date)
and context including class names, function names, or small code snippets from other files:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
. Output only the next line. | self.record['description'] = sanitize_html(description) |
Using the snippet: <|code_start|># along with INSPIRE-SCHEMAS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Jobs builder class and related code."""
from __future__ import absolute_import, division, print_function
LOG = logging.getLogger(__name__)
class JobBuilder(RecordBuilder):
"""Job record builder."""
_collections = ['Jobs']
def __init__(self, record=None, source=None,):
super(JobBuilder, self).__init__(record, source)
if record is None:
self.record['status'] = 'pending'
def validate_record(self):
"""Validate the record in according to the hep schema."""
<|code_end|>
, determine the next line of code. You have imports:
import logging
import six
from jsonschema._format import is_email
from inspire_schemas.builders.builder import RecordBuilder
from inspire_schemas.utils import (
filter_empty_parameters,
sanitize_html,
validate,
)
from inspire_utils.date import normalize_date
and context (class names, function names, or code) available:
# Path: inspire_schemas/builders/builder.py
# class RecordBuilder(object):
# """Base record builder."""
#
# _collections = []
#
# def __init__(self, record=None, source=None):
# if record is None:
# record = {'_collections': [_ for _ in self.__class__._collections]}
# self.record = record
# self.source = source
#
# def __repr__(self):
# """Printable representation of the builder."""
# return u'{}(source={!r}, record={})'.format(
# type(self).__name__,
# self.source,
# self.record
# )
#
# @filter_empty_parameters
# def _append_to(self, field, element=None, default_list=None, **kwargs):
# if default_list is None:
# default_list = []
# if element not in EMPTIES:
# self._ensure_list_field(field, default_list)
# if element not in self.record[field]:
# self.record[field].append(element)
# elif kwargs:
# if 'record' in kwargs and isinstance(kwargs['record'], six.string_types):
# kwargs['record'] = {'$ref': kwargs['record']}
# self._ensure_list_field(field, default_list)
# if kwargs not in self.record[field]:
# self.record[field].append(kwargs)
#
# def _ensure_field(self, field_name, default_value, obj=None):
# if obj is None:
# obj = self.record
# if field_name not in obj:
# obj[field_name] = default_value
#
# def _ensure_list_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = []
# self._ensure_field(field_name, default_value, obj)
#
# def _ensure_dict_field(self, field_name, default_value=None, obj=None):
# if default_value is None:
# default_value = {}
# self._ensure_field(field_name, default_value, obj)
#
# def _sourced_dict(self, source=None, **kwargs):
# if source:
# kwargs['source'] = source
# elif self.source:
# kwargs['source'] = self.source
#
# return {key: value for key, value in kwargs.items() if value not in EMPTIES}
#
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def sanitize_html(text):
# """Sanitize HTML for use inside records fields.
#
# This strips most of the tags and attributes, only allowing a safe whitelisted subset."""
# return _bleach_cleaner.clean(text)
#
# def validate(data, schema=None):
# """Validate the given dictionary against the given schema.
#
# Args:
# data (dict): record to validate.
# schema (Union[dict, str]): schema to validate against. If it is a
# string, it is intepreted as the name of the schema to load (e.g.
# ``authors`` or ``jobs``). If it is ``None``, the schema is taken
# from ``data['$schema']``. If it is a dictionary, it is used
# directly.
#
# Raises:
# SchemaNotFound: if the given schema was not found.
# SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
# found in ``data``.
# jsonschema.SchemaError: if the schema is invalid.
# jsonschema.ValidationError: if the data is invalid.
# """
# schema = _load_schema_for_record(data, schema)
#
# return jsonschema_validate(
# instance=data,
# schema=schema,
# resolver=LocalRefResolver.from_schema(schema),
# format_checker=inspire_format_checker,
# )
. Output only the next line. | validate(self.record, 'jobs') |
Predict the next line after this snippet: <|code_start|> self.obj = signature
def _ensure_field(self, field_name, value):
if field_name not in self.obj:
self.obj[field_name] = value
def _ensure_list_field(self, field_name, value):
if value:
self._ensure_field(field_name, [])
if value not in self.obj[field_name]:
self.obj[field_name].append(value)
def add_affiliation(self, value, curated_relation=None, record=None):
"""Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference
"""
if value:
affiliation = {
'value': value
}
if record:
affiliation['record'] = record
if curated_relation is not None:
affiliation['curated_relation'] = curated_relation
self._ensure_list_field('affiliations', affiliation)
<|code_end|>
using the current file's imports:
from inspire_utils.name import normalize_name
from ..utils import (
filter_empty_parameters,
author_id_normalize_and_schema,
)
from ..errors import UnknownUIDSchema
and any relevant context from other files:
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def author_id_normalize_and_schema(uid, schema=None):
# """Detect and normalize an author UID schema.
#
# Args:
# uid (string): a UID string
# schema (string): try to resolve to schema
#
# Returns:
# Tuple[string, string]: a tuple (uid, schema) where:
# - uid: the UID normalized to comply with the id.json schema
# - schema: a schema of the UID or *None* if not recognised
#
# Raise:
# UnknownUIDSchema: if UID is too little to definitively guess the schema
# SchemaUIDConflict: if specified schema is not matching the given UID
# """
# def _get_uid_normalized_in_schema(_uid, _schema):
# regex, template = _RE_AUTHORS_UID[_schema]
# match = regex.match(_uid)
# if match:
# return template.format(match.group('uid'))
#
# if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
# return idutils.normalize_orcid(uid), 'ORCID'
#
# if schema and schema not in _RE_AUTHORS_UID:
# # Schema explicitly specified, but this function can't handle it
# raise UnknownUIDSchema(uid)
#
# if schema:
# normalized_uid = _get_uid_normalized_in_schema(uid, schema)
# if normalized_uid:
# return normalized_uid, schema
# else:
# raise SchemaUIDConflict(schema, uid)
#
# match_schema, normalized_uid = None, None
# for candidate_schema in _RE_AUTHORS_UID:
# candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
# if candidate_uid:
# if match_schema:
# # Valid against more than one candidate schema, ambiguous
# raise UnknownUIDSchema(uid)
# match_schema = candidate_schema
# normalized_uid = candidate_uid
#
# if match_schema:
# return normalized_uid, match_schema
#
# # No guessess have been found
# raise UnknownUIDSchema(uid)
#
# Path: inspire_schemas/errors.py
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
. Output only the next line. | @filter_empty_parameters |
Given snippet: <|code_start|> self._ensure_list_field('emails', email)
@filter_empty_parameters
def set_full_name(self, full_name):
self._ensure_field('full_name', normalize_name(full_name))
@filter_empty_parameters
def _add_uid(self, uid, schema):
self._ensure_list_field('ids', {
'value': uid,
'schema': schema
})
@filter_empty_parameters
def set_uid(self, uid, schema=None):
"""Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
"""
try:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from inspire_utils.name import normalize_name
from ..utils import (
filter_empty_parameters,
author_id_normalize_and_schema,
)
from ..errors import UnknownUIDSchema
and context:
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def author_id_normalize_and_schema(uid, schema=None):
# """Detect and normalize an author UID schema.
#
# Args:
# uid (string): a UID string
# schema (string): try to resolve to schema
#
# Returns:
# Tuple[string, string]: a tuple (uid, schema) where:
# - uid: the UID normalized to comply with the id.json schema
# - schema: a schema of the UID or *None* if not recognised
#
# Raise:
# UnknownUIDSchema: if UID is too little to definitively guess the schema
# SchemaUIDConflict: if specified schema is not matching the given UID
# """
# def _get_uid_normalized_in_schema(_uid, _schema):
# regex, template = _RE_AUTHORS_UID[_schema]
# match = regex.match(_uid)
# if match:
# return template.format(match.group('uid'))
#
# if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
# return idutils.normalize_orcid(uid), 'ORCID'
#
# if schema and schema not in _RE_AUTHORS_UID:
# # Schema explicitly specified, but this function can't handle it
# raise UnknownUIDSchema(uid)
#
# if schema:
# normalized_uid = _get_uid_normalized_in_schema(uid, schema)
# if normalized_uid:
# return normalized_uid, schema
# else:
# raise SchemaUIDConflict(schema, uid)
#
# match_schema, normalized_uid = None, None
# for candidate_schema in _RE_AUTHORS_UID:
# candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
# if candidate_uid:
# if match_schema:
# # Valid against more than one candidate schema, ambiguous
# raise UnknownUIDSchema(uid)
# match_schema = candidate_schema
# normalized_uid = candidate_uid
#
# if match_schema:
# return normalized_uid, match_schema
#
# # No guessess have been found
# raise UnknownUIDSchema(uid)
#
# Path: inspire_schemas/errors.py
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
which might include code, classes, or functions. Output only the next line. | uid, schema = author_id_normalize_and_schema(uid, schema) |
Given snippet: <|code_start|>
@filter_empty_parameters
def set_full_name(self, full_name):
self._ensure_field('full_name', normalize_name(full_name))
@filter_empty_parameters
def _add_uid(self, uid, schema):
self._ensure_list_field('ids', {
'value': uid,
'schema': schema
})
@filter_empty_parameters
def set_uid(self, uid, schema=None):
"""Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
"""
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from inspire_utils.name import normalize_name
from ..utils import (
filter_empty_parameters,
author_id_normalize_and_schema,
)
from ..errors import UnknownUIDSchema
and context:
# Path: inspire_schemas/utils.py
# def filter_empty_parameters(func):
# """Decorator that is filtering empty parameters.
#
# :param func: function that you want wrapping
# :type func: function
# """
# @wraps(func)
# def func_wrapper(self, *args, **kwargs):
# my_kwargs = {key: value for key, value in kwargs.items()
# if value not in EMPTIES}
# args_is_empty = all(arg in EMPTIES for arg in args)
#
# if (
# {'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
# ) and args_is_empty:
# return
# return func(self, *args, **my_kwargs)
#
# return func_wrapper
#
# def author_id_normalize_and_schema(uid, schema=None):
# """Detect and normalize an author UID schema.
#
# Args:
# uid (string): a UID string
# schema (string): try to resolve to schema
#
# Returns:
# Tuple[string, string]: a tuple (uid, schema) where:
# - uid: the UID normalized to comply with the id.json schema
# - schema: a schema of the UID or *None* if not recognised
#
# Raise:
# UnknownUIDSchema: if UID is too little to definitively guess the schema
# SchemaUIDConflict: if specified schema is not matching the given UID
# """
# def _get_uid_normalized_in_schema(_uid, _schema):
# regex, template = _RE_AUTHORS_UID[_schema]
# match = regex.match(_uid)
# if match:
# return template.format(match.group('uid'))
#
# if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
# return idutils.normalize_orcid(uid), 'ORCID'
#
# if schema and schema not in _RE_AUTHORS_UID:
# # Schema explicitly specified, but this function can't handle it
# raise UnknownUIDSchema(uid)
#
# if schema:
# normalized_uid = _get_uid_normalized_in_schema(uid, schema)
# if normalized_uid:
# return normalized_uid, schema
# else:
# raise SchemaUIDConflict(schema, uid)
#
# match_schema, normalized_uid = None, None
# for candidate_schema in _RE_AUTHORS_UID:
# candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
# if candidate_uid:
# if match_schema:
# # Valid against more than one candidate schema, ambiguous
# raise UnknownUIDSchema(uid)
# match_schema = candidate_schema
# normalized_uid = candidate_uid
#
# if match_schema:
# return normalized_uid, match_schema
#
# # No guessess have been found
# raise UnknownUIDSchema(uid)
#
# Path: inspire_schemas/errors.py
# class UnknownUIDSchema(InspireSchemasException):
# """Exception raised when a schema of a UID is unknown."""
#
# def __init__(self, uid):
# """Exception raised when a schema of a UID is unknown.
#
# Args:
# uid (string): given UID
# """
# message = 'Schema of UID "{}" is unrecognized.'.format(uid)
# super(UnknownUIDSchema, self).__init__(message)
which might include code, classes, or functions. Output only the next line. | except UnknownUIDSchema: |
Given snippet: <|code_start|>"""Generate a restructured text document that describes built-in magsystems
and save it to this module's docstring for the purpose of including in
sphinx documentation via the automodule directive."""
lines = ['',
' '.join([10*'=', 60*'=', 35*'=', 15*'=']),
'{0:10} {1:60} {2:35} {3:15}'
.format('Name', 'Description', 'Subclass', 'Spectrum Source')]
lines.append(lines[1])
urlnums = {}
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import string
from sncosmo.magsystems import _MAGSYSTEMS
and context:
# Path: sncosmo/magsystems.py
# _MAGSYSTEMS = Registry()
which might include code, classes, or functions. Output only the next line. | for m in _MAGSYSTEMS.get_loaders_metadata(): |
Based on the snippet: <|code_start|>
# Read header line
for item in line.split(delim):
colnames.append(item.strip())
cols.append([])
readingdata = True
continue
# Now we're reading data
items = line.split(delim)
for col, item in zip(cols, items):
col.append(_cast_str(item))
data = OrderedDict(zip(colnames, cols))
return meta, data
# -----------------------------------------------------------------------------
# Reader: salt2
def _expand_bands(band_list, meta):
"""Given a list containing band names, return a list of Bandpass objects"""
# Treat dependent bandpasses based on metadata contents
# TODO: need a way to figure out which bands are position dependent!
# for now, we assume *all* or none are.
if "X_FOCAL_PLANE" in meta and "Y_FOCAL_PLANE" in meta:
r = math.sqrt(meta["X_FOCAL_PLANE"]**2 + meta["Y_FOCAL_PLANE"]**2)
# map name to object for unique bands
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import math
import os
import numpy as np
from collections import OrderedDict
from astropy import wcs
from astropy.io import fits
from astropy.table import Table
from .bandpasses import get_bandpass
from .utils import dict_to_array
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.data import get_pkg_data_filename
and context (classes, functions, sometimes code) from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/utils.py
# def dict_to_array(d):
# """Convert a dictionary of lists (or single values) to a structured
# numpy.ndarray."""
#
# # Convert all lists/values to 1-d arrays, in order to let numpy
# # figure out the necessary size of the string arrays.
# new_d = OrderedDict()
# for key in d:
# new_d[key] = np.atleast_1d(d[key])
#
# # Determine dtype of output array.
# dtype = [(key, arr.dtype)
# for key, arr in new_d.items()]
#
# # Initialize ndarray and then fill it.
# col_len = max([len(v) for v in new_d.values()])
# result = np.empty(col_len, dtype=dtype)
# for key in new_d:
# result[key] = new_d[key]
#
# return result
. Output only the next line. | name_to_band = {name: get_bandpass(name, r) |
Using the snippet: <|code_start|> Filename.
format : {'ascii', 'salt2', 'snana', 'json'}, optional
Format of file. Default is 'ascii'. 'salt2' is the new format available
in snfit version >= 2.3.0.
delim : str, optional
**[ascii only]** Character used to separate entries on a line.
Default is ' '.
metachar : str, optional
**[ascii only]** Metadata designator. Default is '@'.
raw : bool, optional
**[salt2, snana]** By default, the SALT2 and SNANA writers rename
some metadata keys and column names in order to comply with what
snfit and SNANA expect. Set to True to override this.
Default is False.
pedantic : bool, optional
**[salt2, snana]** If True, check that output column names and header
keys comply with expected formatting, and raise a ValueError if not.
It is probably a good idea to set to False when raw is True.
Default is True.
"""
if format not in WRITERS:
raise ValueError("Writer not defined for format {0!r}. Options: "
.format(format) + ", ".join(WRITERS.keys()))
if isinstance(data, Table):
meta = data.meta
data = np.asarray(data)
else:
meta = OrderedDict()
if not isinstance(data, np.ndarray):
<|code_end|>
, determine the next line of code. You have imports:
import json
import math
import os
import numpy as np
from collections import OrderedDict
from astropy import wcs
from astropy.io import fits
from astropy.table import Table
from .bandpasses import get_bandpass
from .utils import dict_to_array
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.data import get_pkg_data_filename
and context (class names, function names, or code) available:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/utils.py
# def dict_to_array(d):
# """Convert a dictionary of lists (or single values) to a structured
# numpy.ndarray."""
#
# # Convert all lists/values to 1-d arrays, in order to let numpy
# # figure out the necessary size of the string arrays.
# new_d = OrderedDict()
# for key in d:
# new_d[key] = np.atleast_1d(d[key])
#
# # Determine dtype of output array.
# dtype = [(key, arr.dtype)
# for key, arr in new_d.items()]
#
# # Initialize ndarray and then fill it.
# col_len = max([len(v) for v in new_d.values()])
# result = np.empty(col_len, dtype=dtype)
# for key in new_d:
# result[key] = new_d[key]
#
# return result
. Output only the next line. | data = dict_to_array(data) |
Here is a snippet: <|code_start|>"""Generate a restructured text document that describes built-in bandpasses
and save it to this module's docstring for the purpose of including in
sphinx documentation via the automodule directive."""
__all__ = [] # so that bandpass_table is not documented.
# string.ascii_letters in py3
ASCII_LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
<|code_end|>
. Write the next line using the current file imports:
from sncosmo.bandpasses import _BANDPASSES, _BANDPASS_INTERPOLATORS
and context from other files:
# Path: sncosmo/bandpasses.py
# _BANDPASSES = Registry()
#
# _BANDPASS_INTERPOLATORS = Registry()
, which may include functions, classes, or code. Output only the next line. | bandpass_meta = _BANDPASSES.get_loaders_metadata() |
Next line prediction: <|code_start|> lines.append(".. [{0}] {1}".format(refkey, ref))
return "\n".join(lines)
# -----------------------------------------------------------------------------
# Build the module docstring
# Get the names of all filtersets
setnames = []
for m in bandpass_meta:
setname = m['filterset']
if setname not in setnames:
setnames.append(setname)
# For each set of bandpasses, write a heading, the table, and a plot.
lines = []
for setname in setnames:
lines.append("")
lines.append(setname)
lines.append(len(setname) * "-")
lines.append("")
lines.append(bandpass_table(setname))
lines.append("""
.. plot::
from bandpass_plot import plot_bandpass_set
plot_bandpass_set({0!r})
""".format(setname))
# Bandpass interpolators
<|code_end|>
. Use current file imports:
(from sncosmo.bandpasses import _BANDPASSES, _BANDPASS_INTERPOLATORS)
and context including class names, function names, or small code snippets from other files:
# Path: sncosmo/bandpasses.py
# _BANDPASSES = Registry()
#
# _BANDPASS_INTERPOLATORS = Registry()
. Output only the next line. | bandpass_interpolator_meta = _BANDPASS_INTERPOLATORS.get_loaders_metadata() |
Using the snippet: <|code_start|> self._phase = phase
self._wave = wave
# ensure that fluxes are on the same scale
flux2 = flux1.max() / flux2.max() * flux2
self._model_flux1 = RectBivariateSpline(phase, wave, flux1, kx=3, ky=3)
self._model_flux2 = RectBivariateSpline(phase, wave, flux2, kx=3, ky=3)
self._parameters = np.array([1., 0.5]) # initial parameters
def _flux(self, phase, wave):
amplitude, w = self._parameters
return amplitude * ((1.0 - w) * self._model_flux1(phase, wave) +
w * self._model_flux2(phase, wave))
########################################################################
# ... and that's all that we need to define!: A couple class attributes
# (``_param_names`` and ``param_names_latex``, an ``__init__`` method,
# and a ``_flux`` method. The ``_flux`` method is guaranteed to be passed
# numpy arrays for phase and wavelength.
#
# We can now initialize an instance of this source from two spectral time
# series:
#Just as an example, we'll use some undocumented functionality in
# sncosmo to download the Nugent Ia and 2p templates. Don't rely on this
# the `DATADIR` object, or these paths in your code though, as these are
# subject to change between version of sncosmo!
phase1, wave1, flux1 = sncosmo.read_griddata_ascii(
<|code_end|>
, determine the next line of code. You have imports:
import numpy as np
import sncosmo
from scipy.interpolate import RectBivariateSpline
from sncosmo.builtins import DATADIR
from matplotlib import pyplot as plt
and context (class names, function names, or code) available:
# Path: sncosmo/builtins.py
# DATADIR = DataMirror(get_rootdir, "http://sncosmo.github.io/data")
. Output only the next line. | DATADIR.abspath('models/nugent/sn1a_flux.v1.2.dat')) |
Given snippet: <|code_start|> self._wave_unit = u.AA
# internally, flux is in F_lambda:
if unit != FLAMBDA_UNIT:
self.flux = unit.to(FLAMBDA_UNIT, self.flux,
u.spectral_density(u.AA, self.wave))
self._unit = FLAMBDA_UNIT
# Set up interpolation.
# This appears to be the fastest-evaluating interpolant in
# scipy.interpolate.
self._tck = splrep(self.wave, self.flux, k=1)
def bandflux(self, band):
"""Perform synthentic photometry in a given bandpass.
The bandpass transmission is interpolated onto the wavelength grid
of the spectrum. The result is a weighted sum of the spectral flux
density values (weighted by transmission values).
Parameters
----------
band : Bandpass or str
Bandpass object or name of registered bandpass.
Returns
-------
float
Total flux in ph/s/cm^2.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import astropy.units as u
import numpy as np
from scipy.interpolate import splev, splrep
from .bandpasses import get_bandpass
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING, FLAMBDA_UNIT
from .utils import integration_grid
and context:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/constants.py
# HC_ERG_AA = H_ERG_S * C_AA_PER_S
#
# SPECTRUM_BANDFLUX_SPACING = 1.0
#
# FLAMBDA_UNIT = u.erg / u.s / u.cm**2 / u.AA
#
# Path: sncosmo/utils.py
# def integration_grid(low, high, target_spacing):
# """Divide the range between `start` and `stop` into uniform bins
# with spacing less than or equal to `target_spacing` and return the
# bin midpoints and the actual spacing."""
#
# range_diff = high - low
# spacing = range_diff / int(math.ceil(range_diff / target_spacing))
# grid = np.arange(low + 0.5 * spacing, high, spacing)
#
# return grid, spacing
which might include code, classes, or functions. Output only the next line. | band = get_bandpass(band) |
Predict the next line after this snippet: <|code_start|> of the spectrum. The result is a weighted sum of the spectral flux
density values (weighted by transmission values).
Parameters
----------
band : Bandpass or str
Bandpass object or name of registered bandpass.
Returns
-------
float
Total flux in ph/s/cm^2.
"""
band = get_bandpass(band)
# Check that bandpass wavelength range is fully contained in spectrum
# wavelength range.
if (band.minwave() < self.wave[0] or band.maxwave() > self.wave[-1]):
raise ValueError('bandpass {0!r:s} [{1:.6g}, .., {2:.6g}] '
'outside spectral range [{3:.6g}, .., {4:.6g}]'
.format(band.name, band.minwave(), band.maxwave(),
self.wave[0], self.wave[-1]))
# Set up wavelength grid. Spacing (dwave) evenly divides the bandpass,
# closest to 5 angstroms without going over.
wave, dwave = integration_grid(band.minwave(), band.maxwave(),
SPECTRUM_BANDFLUX_SPACING)
trans = band(wave)
f = splev(wave, self._tck, ext=1)
<|code_end|>
using the current file's imports:
import astropy.units as u
import numpy as np
from scipy.interpolate import splev, splrep
from .bandpasses import get_bandpass
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING, FLAMBDA_UNIT
from .utils import integration_grid
and any relevant context from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/constants.py
# HC_ERG_AA = H_ERG_S * C_AA_PER_S
#
# SPECTRUM_BANDFLUX_SPACING = 1.0
#
# FLAMBDA_UNIT = u.erg / u.s / u.cm**2 / u.AA
#
# Path: sncosmo/utils.py
# def integration_grid(low, high, target_spacing):
# """Divide the range between `start` and `stop` into uniform bins
# with spacing less than or equal to `target_spacing` and return the
# bin midpoints and the actual spacing."""
#
# range_diff = high - low
# spacing = range_diff / int(math.ceil(range_diff / target_spacing))
# grid = np.arange(low + 0.5 * spacing, high, spacing)
#
# return grid, spacing
. Output only the next line. | return np.sum(wave * trans * f) * dwave / HC_ERG_AA |
Continue the code snippet: <|code_start|> def bandflux(self, band):
"""Perform synthentic photometry in a given bandpass.
The bandpass transmission is interpolated onto the wavelength grid
of the spectrum. The result is a weighted sum of the spectral flux
density values (weighted by transmission values).
Parameters
----------
band : Bandpass or str
Bandpass object or name of registered bandpass.
Returns
-------
float
Total flux in ph/s/cm^2.
"""
band = get_bandpass(band)
# Check that bandpass wavelength range is fully contained in spectrum
# wavelength range.
if (band.minwave() < self.wave[0] or band.maxwave() > self.wave[-1]):
raise ValueError('bandpass {0!r:s} [{1:.6g}, .., {2:.6g}] '
'outside spectral range [{3:.6g}, .., {4:.6g}]'
.format(band.name, band.minwave(), band.maxwave(),
self.wave[0], self.wave[-1]))
# Set up wavelength grid. Spacing (dwave) evenly divides the bandpass,
# closest to 5 angstroms without going over.
wave, dwave = integration_grid(band.minwave(), band.maxwave(),
<|code_end|>
. Use current file imports:
import astropy.units as u
import numpy as np
from scipy.interpolate import splev, splrep
from .bandpasses import get_bandpass
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING, FLAMBDA_UNIT
from .utils import integration_grid
and context (classes, functions, or code) from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/constants.py
# HC_ERG_AA = H_ERG_S * C_AA_PER_S
#
# SPECTRUM_BANDFLUX_SPACING = 1.0
#
# FLAMBDA_UNIT = u.erg / u.s / u.cm**2 / u.AA
#
# Path: sncosmo/utils.py
# def integration_grid(low, high, target_spacing):
# """Divide the range between `start` and `stop` into uniform bins
# with spacing less than or equal to `target_spacing` and return the
# bin midpoints and the actual spacing."""
#
# range_diff = high - low
# spacing = range_diff / int(math.ceil(range_diff / target_spacing))
# grid = np.arange(low + 0.5 * spacing, high, spacing)
#
# return grid, spacing
. Output only the next line. | SPECTRUM_BANDFLUX_SPACING) |
Based on the snippet: <|code_start|>class SpectrumModel(object):
"""A model spectrum, representing wavelength and spectral density values.
Parameters
----------
wave : list_like
Wavelength values.
flux : list_like
Spectral flux density values.
wave_unit : `~astropy.units.Unit`
Unit on wavelength.
unit : `~astropy.units.BaseUnit`
For now, only units with flux density in energy (not photon counts).
"""
def __init__(self, wave, flux, wave_unit=u.AA,
unit=(u.erg / u.s / u.cm**2 / u.AA)):
self.wave = np.asarray(wave, dtype=np.float64)
self.flux = np.asarray(flux, dtype=np.float64)
if self.wave.shape != self.flux.shape:
raise ValueError('shape of wavelength and flux must match')
if self.wave.ndim != 1:
raise ValueError('only 1-d arrays supported')
# internally, wavelength is in Angstroms:
if wave_unit != u.AA:
self.wave = wave_unit.to(u.AA, self.wave, u.spectral())
self._wave_unit = u.AA
# internally, flux is in F_lambda:
<|code_end|>
, predict the immediate next line with the help of imports:
import astropy.units as u
import numpy as np
from scipy.interpolate import splev, splrep
from .bandpasses import get_bandpass
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING, FLAMBDA_UNIT
from .utils import integration_grid
and context (classes, functions, sometimes code) from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/constants.py
# HC_ERG_AA = H_ERG_S * C_AA_PER_S
#
# SPECTRUM_BANDFLUX_SPACING = 1.0
#
# FLAMBDA_UNIT = u.erg / u.s / u.cm**2 / u.AA
#
# Path: sncosmo/utils.py
# def integration_grid(low, high, target_spacing):
# """Divide the range between `start` and `stop` into uniform bins
# with spacing less than or equal to `target_spacing` and return the
# bin midpoints and the actual spacing."""
#
# range_diff = high - low
# spacing = range_diff / int(math.ceil(range_diff / target_spacing))
# grid = np.arange(low + 0.5 * spacing, high, spacing)
#
# return grid, spacing
. Output only the next line. | if unit != FLAMBDA_UNIT: |
Next line prediction: <|code_start|>
def bandflux(self, band):
"""Perform synthentic photometry in a given bandpass.
The bandpass transmission is interpolated onto the wavelength grid
of the spectrum. The result is a weighted sum of the spectral flux
density values (weighted by transmission values).
Parameters
----------
band : Bandpass or str
Bandpass object or name of registered bandpass.
Returns
-------
float
Total flux in ph/s/cm^2.
"""
band = get_bandpass(band)
# Check that bandpass wavelength range is fully contained in spectrum
# wavelength range.
if (band.minwave() < self.wave[0] or band.maxwave() > self.wave[-1]):
raise ValueError('bandpass {0!r:s} [{1:.6g}, .., {2:.6g}] '
'outside spectral range [{3:.6g}, .., {4:.6g}]'
.format(band.name, band.minwave(), band.maxwave(),
self.wave[0], self.wave[-1]))
# Set up wavelength grid. Spacing (dwave) evenly divides the bandpass,
# closest to 5 angstroms without going over.
<|code_end|>
. Use current file imports:
(import astropy.units as u
import numpy as np
from scipy.interpolate import splev, splrep
from .bandpasses import get_bandpass
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING, FLAMBDA_UNIT
from .utils import integration_grid)
and context including class names, function names, or small code snippets from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/constants.py
# HC_ERG_AA = H_ERG_S * C_AA_PER_S
#
# SPECTRUM_BANDFLUX_SPACING = 1.0
#
# FLAMBDA_UNIT = u.erg / u.s / u.cm**2 / u.AA
#
# Path: sncosmo/utils.py
# def integration_grid(low, high, target_spacing):
# """Divide the range between `start` and `stop` into uniform bins
# with spacing less than or equal to `target_spacing` and return the
# bin midpoints and the actual spacing."""
#
# range_diff = high - low
# spacing = range_diff / int(math.ceil(range_diff / target_spacing))
# grid = np.arange(low + 0.5 * spacing, high, spacing)
#
# return grid, spacing
. Output only the next line. | wave, dwave = integration_grid(band.minwave(), band.maxwave(), |
Using the snippet: <|code_start|>
"""Test downloading all of the builtins
These tests download lots of files (~1.2 GB as of Oct. 12, 2021) so they
aren't included by default with the regular tests. They can be run with
`tox -e builtins`. This will make sure that the downloads happen in a clean
environment without any caching.
"""
<|code_end|>
, determine the next line of code. You have imports:
import pytest
import sncosmo
from sncosmo.bandpasses import _BANDPASSES, _BANDPASS_INTERPOLATORS
from sncosmo.magsystems import _MAGSYSTEMS
from sncosmo.models import _SOURCES
and context (class names, function names, or code) available:
# Path: sncosmo/bandpasses.py
# _BANDPASSES = Registry()
#
# _BANDPASS_INTERPOLATORS = Registry()
#
# Path: sncosmo/magsystems.py
# _MAGSYSTEMS = Registry()
#
# Path: sncosmo/models.py
# _SOURCES = Registry()
. Output only the next line. | bandpasses = [i['name'] for i in _BANDPASSES.get_loaders_metadata()] |
Given the code snippet: <|code_start|>
"""Test downloading all of the builtins
These tests download lots of files (~1.2 GB as of Oct. 12, 2021) so they
aren't included by default with the regular tests. They can be run with
`tox -e builtins`. This will make sure that the downloads happen in a clean
environment without any caching.
"""
bandpasses = [i['name'] for i in _BANDPASSES.get_loaders_metadata()]
bandpass_interpolators = [i['name'] for i in
<|code_end|>
, generate the next line using the imports in this file:
import pytest
import sncosmo
from sncosmo.bandpasses import _BANDPASSES, _BANDPASS_INTERPOLATORS
from sncosmo.magsystems import _MAGSYSTEMS
from sncosmo.models import _SOURCES
and context (functions, classes, or occasionally code) from other files:
# Path: sncosmo/bandpasses.py
# _BANDPASSES = Registry()
#
# _BANDPASS_INTERPOLATORS = Registry()
#
# Path: sncosmo/magsystems.py
# _MAGSYSTEMS = Registry()
#
# Path: sncosmo/models.py
# _SOURCES = Registry()
. Output only the next line. | _BANDPASS_INTERPOLATORS.get_loaders_metadata()] |
Given the following code snippet before the placeholder: <|code_start|>
"""Test downloading all of the builtins
These tests download lots of files (~1.2 GB as of Oct. 12, 2021) so they
aren't included by default with the regular tests. They can be run with
`tox -e builtins`. This will make sure that the downloads happen in a clean
environment without any caching.
"""
bandpasses = [i['name'] for i in _BANDPASSES.get_loaders_metadata()]
bandpass_interpolators = [i['name'] for i in
_BANDPASS_INTERPOLATORS.get_loaders_metadata()]
<|code_end|>
, predict the next line using imports from the current file:
import pytest
import sncosmo
from sncosmo.bandpasses import _BANDPASSES, _BANDPASS_INTERPOLATORS
from sncosmo.magsystems import _MAGSYSTEMS
from sncosmo.models import _SOURCES
and context including class names, function names, and sometimes code from other files:
# Path: sncosmo/bandpasses.py
# _BANDPASSES = Registry()
#
# _BANDPASS_INTERPOLATORS = Registry()
#
# Path: sncosmo/magsystems.py
# _MAGSYSTEMS = Registry()
#
# Path: sncosmo/models.py
# _SOURCES = Registry()
. Output only the next line. | magsystems = [i['name'] for i in _MAGSYSTEMS.get_loaders_metadata()] |
Continue the code snippet: <|code_start|>
"""Test downloading all of the builtins
These tests download lots of files (~1.2 GB as of Oct. 12, 2021) so they
aren't included by default with the regular tests. They can be run with
`tox -e builtins`. This will make sure that the downloads happen in a clean
environment without any caching.
"""
bandpasses = [i['name'] for i in _BANDPASSES.get_loaders_metadata()]
bandpass_interpolators = [i['name'] for i in
_BANDPASS_INTERPOLATORS.get_loaders_metadata()]
magsystems = [i['name'] for i in _MAGSYSTEMS.get_loaders_metadata()]
<|code_end|>
. Use current file imports:
import pytest
import sncosmo
from sncosmo.bandpasses import _BANDPASSES, _BANDPASS_INTERPOLATORS
from sncosmo.magsystems import _MAGSYSTEMS
from sncosmo.models import _SOURCES
and context (classes, functions, or code) from other files:
# Path: sncosmo/bandpasses.py
# _BANDPASSES = Registry()
#
# _BANDPASS_INTERPOLATORS = Registry()
#
# Path: sncosmo/magsystems.py
# _MAGSYSTEMS = Registry()
#
# Path: sncosmo/models.py
# _SOURCES = Registry()
. Output only the next line. | sources = [(i['name'], i['version']) for i in _SOURCES.get_loaders_metadata()] |
Predict the next line after this snippet: <|code_start|> Notes
-----
``skynoise`` is the image background contribution to the flux measurement
error (in units corresponding to the specified zeropoint and zeropoint
system). To get the error on a given measurement, ``skynoise`` is added
in quadrature to the photon noise from the source.
It is left up to the user to calculate ``skynoise`` as they see fit as the
details depend on how photometry is done and possibly how the PSF is
is modeled. As a simple example, assuming a Gaussian PSF, and perfect
PSF photometry, ``skynoise`` would be ``4 * pi * sigma_PSF * sigma_pixel``
where ``sigma_PSF`` is the standard deviation of the PSF in pixels and
``sigma_pixel`` is the background noise in a single pixel in counts.
"""
RESULT_COLNAMES = ('time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys')
lcs = []
# Copy model so we don't mess up the user's model.
model = copy.copy(model)
# get observations as a Table
if not isinstance(observations, Table):
if isinstance(observations, np.ndarray):
observations = Table(observations)
else:
raise ValueError("observations not understood")
# map column name aliases
<|code_end|>
using the current file's imports:
import copy
import numpy as np
from collections import OrderedDict
from astropy.cosmology import FlatLambdaCDM
from astropy.table import Table
from numpy import random
from scipy.interpolate import InterpolatedUnivariateSpline as Spline1d
from .utils import alias_map
and any relevant context from other files:
# Path: sncosmo/utils.py
# def alias_map(aliased, aliases, required=()):
# """For each key in ``aliases``, find the item in ``aliased`` matching
# exactly one of the corresponding items in ``aliases``.
#
# Parameters
# ----------
# aliased : list of str
# Input keys, will be values in output map.
# aliases : dict of sets
# Dictionary where keys are "canonical name" and values are sets of
# possible aliases.
# required : list_like
# Keys in ``aliases`` that are considered required. An error is raised
# if no alias is found in ``aliased``.
#
#
# Returns
# -------
#
# Example::
#
# >>> aliases = {'a':set(['a', 'a_']), 'b':set(['b', 'b_'])}
# >>> alias_map(['A', 'B_', 'foo'], aliases)
# {'a': 'A', 'b': 'B_'}
#
#
#
# """
# lowered_to_orig = {key.lower(): key for key in aliased}
# lowered = set(lowered_to_orig.keys())
# mapping = {}
# for key, key_aliases in aliases.items():
# common = lowered & key_aliases
# if len(common) == 1:
# mapping[key] = lowered_to_orig[common.pop()]
#
# elif len(common) == 0 and key in required:
# raise ValueError('no alias found for {!r} (possible '
# 'case-independent aliases: {})'.format(
# key,
# ', '.join(repr(ka) for ka in key_aliases)))
# elif len(common) > 1:
# raise ValueError('multiple aliases found for {!r}: {}'
# .format(key, ', '.join(repr(a) for a in common)))
#
# return mapping
. Output only the next line. | colname = alias_map(observations.colnames, OBSERVATIONS_ALIASES, |
Predict the next line after this snippet: <|code_start|> # when this test was written (does not test whether the number is
# correct)
assert len(z) == 14
# check that all values are indeed between the input limits.
zarr = np.array(z)
assert np.all((zarr > 0.) & (zarr < 0.25))
def test_realize_lcs():
# here's some completely made-up data:
obs1 = Table({'time': [10., 60., 110.],
'band': ['bessellb', 'bessellr', 'besselli'],
'gain': [1., 1., 1.],
'skynoise': [100., 100., 100.],
'zp': [30., 30., 30.],
'zpsys': ['ab', 'ab', 'ab']})
# same made up data with aliased column names:
obs2 = Table({'MJD': [10., 60., 110.],
'filter': ['bessellb', 'bessellr', 'besselli'],
'GAIN': [1., 1., 1.],
'skynoise': [100., 100., 100.],
'ZPT': [30., 30., 30.],
'zpmagsys': ['ab', 'ab', 'ab']})
for obs in (obs1, obs2):
# A model with a flat spectrum between 0 and 100 days.
<|code_end|>
using the current file's imports:
import numpy as np
import sncosmo
from astropy.table import Table
from .test_models import flatsource
and any relevant context from other files:
# Path: sncosmo/tests/test_models.py
# def flatsource():
# """Create and return a TimeSeriesSource with a flat spectrum == 1.0 at
# all times."""
# phase = np.linspace(0., 100., 10)
# wave = np.linspace(800., 20000., 100)
# flux = np.ones((len(phase), len(wave)), dtype=float)
# return sncosmo.TimeSeriesSource(phase, wave, flux)
. Output only the next line. | model = sncosmo.Model(source=flatsource()) |
Predict the next line for this snippet: <|code_start|> """Effect with transmission 0 below cutoff wavelength, 1 above.
Useful for testing behavior with redshift."""
_param_names = ['stepwave']
param_names_latex = [r'\lambda_{s}']
def __init__(self, minwave, maxwave, stepwave=10000.):
self._minwave = minwave
self._maxwave = maxwave
self._parameters = np.array([stepwave], dtype=np.float64)
def propagate(self, wave, flux):
return flux * (wave >= self._parameters[0])
class TestTimeSeriesSource:
def setup_class(self):
self.source = flatsource()
def test_flux(self):
for a in [1., 2.]:
self.source.set(amplitude=a)
assert_allclose(self.source.flux(1., 2000.), a)
assert_allclose(self.source.flux(1., [2000.]), np.array([a]))
assert_allclose(self.source.flux([1.], 2000.), np.array([[a]]))
assert_allclose(self.source.flux([1.], [2000.]), np.array([[a]]))
def test_getsource(self):
# register the source & retrieve it from the registry
<|code_end|>
with the help of current file imports:
from io import StringIO
from numpy.testing import assert_allclose, assert_approx_equal
from sncosmo import registry
import numpy as np
import sncosmo
and context from other files:
# Path: sncosmo/registry.py
# def _get_registry(data_class):
# def register_loader(data_class, name, func, args=None,
# version=None, meta=None, force=False):
# def register(instance, name=None, data_class=None, force=False):
# def retrieve(data_class, name, version=None):
, which may contain function names, class names, or code. Output only the next line. | registry.register(self.source, name="testsource", |
Next line prediction: <|code_start|>"""Generate a restructured text document that describes built-in sources
and save it to this module's docstring for the purpose of including in
sphinx documentation via the automodule directive."""
lines = [
'',
' '.join([30*'=', 7*'=', 10*'=', 27*'=', 30*'=', 7*'=', 20*'=']),
'{0:30} {1:7} {2:10} {3:27} {4:30} {5:7} {6:50}'.format(
'Name', 'Version', 'Type', 'Subclass', 'Reference', 'Website', 'Notes')
]
lines.append(lines[1])
urlnums = {}
allnotes = []
allrefs = []
<|code_end|>
. Use current file imports:
(import string
from sncosmo.models import _SOURCES)
and context including class names, function names, or small code snippets from other files:
# Path: sncosmo/models.py
# _SOURCES = Registry()
. Output only the next line. | for m in _SOURCES.get_loaders_metadata(): |
Continue the code snippet: <|code_start|>
# Descriptions for docstring only.
_photdata_descriptions = {
'time': 'Time of observation in days',
'band': 'Bandpass of observation',
'flux': 'Flux of observation',
'fluxerr': 'Gaussian uncertainty on flux',
'zp': 'Zeropoint corresponding to flux',
'zpsys': 'Magnitude system for zeropoint',
'fluxcov': 'Covariance between observations (array; optional)'
}
_photdata_types = {
'time': 'float',
'band': 'str',
'flux': 'float',
'fluxerr': 'float',
'zp': 'float',
'zpsys': 'str',
'fluxcov': 'ndarray'
}
lines = [
'',
' '.join([10 * '=', 60 * '=', 50 * '=', 50 * '=']),
'{0:10} {1:60} {2:50} {3:50}'
.format('Column', 'Acceptable aliases (case-independent)',
'Description', 'Type')
]
lines.append(lines[1])
<|code_end|>
. Use current file imports:
from sncosmo.photdata import PHOTDATA_ALIASES
and context (classes, functions, or code) from other files:
# Path: sncosmo/photdata.py
# PHOTDATA_ALIASES = OrderedDict([
# ('time', {'time', 'date', 'jd', 'mjd', 'mjdobs', 'mjd_obs'}),
# ('band', {'band', 'bandpass', 'filter', 'flt'}),
# ('flux', {'flux', 'f'}),
# ('fluxerr', {'fluxerr', 'fe', 'fluxerror', 'flux_error', 'flux_err'}),
# ('zp', {'zp', 'zpt', 'zeropoint', 'zero_point', 'zeropt'}),
# ('zpsys', {'zpsys', 'zpmagsys', 'magsys'}),
# ('fluxcov', {'cov', 'covar', 'covariance', 'covmat', 'fluxcov'})
# ])
. Output only the next line. | for colname in PHOTDATA_ALIASES: |
Based on the snippet: <|code_start|> Parameters
----------
data : `~astropy.table.Table`, dict, `~numpy.ndarray`
Astropy Table, dictionary of arrays or structured numpy array
containing the "correct" column names.
"""
def __init__(self, data):
# get column names in input data
if isinstance(data, Table):
colnames = data.colnames
elif isinstance(data, np.ndarray):
colnames = data.dtype.names
elif isinstance(data, dict):
colnames = data.keys()
else:
raise ValueError('unrecognized data type')
mapping = alias_map(colnames, PHOTDATA_ALIASES,
required=PHOTDATA_REQUIRED_ALIASES)
self.time = np.asarray(data[mapping['time']])
# ensure self.band contains Bandpass objects. (We could check
# if the original array already contains all bandpass objects,
# but constructing a new array is simpler.)
band_orig = data[mapping['band']]
self.band = np.empty(len(band_orig), dtype=object)
for i in range(len(band_orig)):
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
import numpy as np
from collections import OrderedDict
from astropy.table import Table
from .bandpasses import get_bandpass
from .magsystems import get_magsystem
from .utils import alias_map
and context (classes, functions, sometimes code) from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/magsystems.py
# def get_magsystem(name):
# """Get a MagSystem from the registry by name."""
# if isinstance(name, MagSystem):
# return name
# return _MAGSYSTEMS.retrieve(name)
#
# Path: sncosmo/utils.py
# def alias_map(aliased, aliases, required=()):
# """For each key in ``aliases``, find the item in ``aliased`` matching
# exactly one of the corresponding items in ``aliases``.
#
# Parameters
# ----------
# aliased : list of str
# Input keys, will be values in output map.
# aliases : dict of sets
# Dictionary where keys are "canonical name" and values are sets of
# possible aliases.
# required : list_like
# Keys in ``aliases`` that are considered required. An error is raised
# if no alias is found in ``aliased``.
#
#
# Returns
# -------
#
# Example::
#
# >>> aliases = {'a':set(['a', 'a_']), 'b':set(['b', 'b_'])}
# >>> alias_map(['A', 'B_', 'foo'], aliases)
# {'a': 'A', 'b': 'B_'}
#
#
#
# """
# lowered_to_orig = {key.lower(): key for key in aliased}
# lowered = set(lowered_to_orig.keys())
# mapping = {}
# for key, key_aliases in aliases.items():
# common = lowered & key_aliases
# if len(common) == 1:
# mapping[key] = lowered_to_orig[common.pop()]
#
# elif len(common) == 0 and key in required:
# raise ValueError('no alias found for {!r} (possible '
# 'case-independent aliases: {})'.format(
# key,
# ', '.join(repr(ka) for ka in key_aliases)))
# elif len(common) > 1:
# raise ValueError('multiple aliases found for {!r}: {}'
# .format(key, ', '.join(repr(a) for a in common)))
#
# return mapping
. Output only the next line. | self.band[i] = get_bandpass(band_orig[i]) |
Given the following code snippet before the placeholder: <|code_start|> newdata.zpsys = self.zpsys[key]
newdata.fluxcov = (None if self.fluxcov is None else
self.fluxcov[np.ix_(key, key)])
return newdata
def normalized(self, zp=25., zpsys='ab'):
"""Return a copy of the data with all flux and fluxerr values
normalized to the given zeropoint.
"""
factor = self._normalization_factor(zp, zpsys)
newdata = copy.copy(self)
newdata.flux = factor * self.flux
newdata.fluxerr = factor * self.fluxerr
newdata.zp = np.full(len(self), zp, dtype=np.float64)
newdata.zpsys = np.full(len(self), zpsys, dtype=np.array(zpsys).dtype)
if newdata.fluxcov is not None:
newdata.fluxcov = factor * factor[:, None] * self.fluxcov
return newdata
def normalized_flux(self, zp=25., zpsys='ab'):
return self._normalization_factor(zp, zpsys) * self.flux
def _normalization_factor(self, zp, zpsys):
"""Factor such that multiplying by this amount brings all fluxes onto
the given zeropoint and zeropoint system."""
<|code_end|>
, predict the next line using imports from the current file:
import copy
import numpy as np
from collections import OrderedDict
from astropy.table import Table
from .bandpasses import get_bandpass
from .magsystems import get_magsystem
from .utils import alias_map
and context including class names, function names, and sometimes code from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/magsystems.py
# def get_magsystem(name):
# """Get a MagSystem from the registry by name."""
# if isinstance(name, MagSystem):
# return name
# return _MAGSYSTEMS.retrieve(name)
#
# Path: sncosmo/utils.py
# def alias_map(aliased, aliases, required=()):
# """For each key in ``aliases``, find the item in ``aliased`` matching
# exactly one of the corresponding items in ``aliases``.
#
# Parameters
# ----------
# aliased : list of str
# Input keys, will be values in output map.
# aliases : dict of sets
# Dictionary where keys are "canonical name" and values are sets of
# possible aliases.
# required : list_like
# Keys in ``aliases`` that are considered required. An error is raised
# if no alias is found in ``aliased``.
#
#
# Returns
# -------
#
# Example::
#
# >>> aliases = {'a':set(['a', 'a_']), 'b':set(['b', 'b_'])}
# >>> alias_map(['A', 'B_', 'foo'], aliases)
# {'a': 'A', 'b': 'B_'}
#
#
#
# """
# lowered_to_orig = {key.lower(): key for key in aliased}
# lowered = set(lowered_to_orig.keys())
# mapping = {}
# for key, key_aliases in aliases.items():
# common = lowered & key_aliases
# if len(common) == 1:
# mapping[key] = lowered_to_orig[common.pop()]
#
# elif len(common) == 0 and key in required:
# raise ValueError('no alias found for {!r} (possible '
# 'case-independent aliases: {})'.format(
# key,
# ', '.join(repr(ka) for ka in key_aliases)))
# elif len(common) > 1:
# raise ValueError('multiple aliases found for {!r}: {}'
# .format(key, ', '.join(repr(a) for a in common)))
#
# return mapping
. Output only the next line. | normmagsys = get_magsystem(zpsys) |
Here is a snippet: <|code_start|>class PhotometricData(object):
"""Internal standardized representation of photometric data table.
Has attributes ``time``, ``band``, ``flux``, ``fluxerr``, ``zp``
and ``zpsys``, which are all numpy arrays of the same length
sorted by ``time``. ``band`` is an array of Bandpass objects. This
is intended for use within sncosmo; its implementation may change
without warning in future versions.
Has attribute ``fluxcov`` which may be ``None``.
Parameters
----------
data : `~astropy.table.Table`, dict, `~numpy.ndarray`
Astropy Table, dictionary of arrays or structured numpy array
containing the "correct" column names.
"""
def __init__(self, data):
# get column names in input data
if isinstance(data, Table):
colnames = data.colnames
elif isinstance(data, np.ndarray):
colnames = data.dtype.names
elif isinstance(data, dict):
colnames = data.keys()
else:
raise ValueError('unrecognized data type')
<|code_end|>
. Write the next line using the current file imports:
import copy
import numpy as np
from collections import OrderedDict
from astropy.table import Table
from .bandpasses import get_bandpass
from .magsystems import get_magsystem
from .utils import alias_map
and context from other files:
# Path: sncosmo/bandpasses.py
# def get_bandpass(name, *args):
# """Get a Bandpass from the registry by name."""
# if isinstance(name, Bandpass):
# return name
# if len(args) == 0:
# return _BANDPASSES.retrieve(name)
# else:
# interp = _BANDPASS_INTERPOLATORS.retrieve(name)
# return interp.at(*args)
#
# Path: sncosmo/magsystems.py
# def get_magsystem(name):
# """Get a MagSystem from the registry by name."""
# if isinstance(name, MagSystem):
# return name
# return _MAGSYSTEMS.retrieve(name)
#
# Path: sncosmo/utils.py
# def alias_map(aliased, aliases, required=()):
# """For each key in ``aliases``, find the item in ``aliased`` matching
# exactly one of the corresponding items in ``aliases``.
#
# Parameters
# ----------
# aliased : list of str
# Input keys, will be values in output map.
# aliases : dict of sets
# Dictionary where keys are "canonical name" and values are sets of
# possible aliases.
# required : list_like
# Keys in ``aliases`` that are considered required. An error is raised
# if no alias is found in ``aliased``.
#
#
# Returns
# -------
#
# Example::
#
# >>> aliases = {'a':set(['a', 'a_']), 'b':set(['b', 'b_'])}
# >>> alias_map(['A', 'B_', 'foo'], aliases)
# {'a': 'A', 'b': 'B_'}
#
#
#
# """
# lowered_to_orig = {key.lower(): key for key in aliased}
# lowered = set(lowered_to_orig.keys())
# mapping = {}
# for key, key_aliases in aliases.items():
# common = lowered & key_aliases
# if len(common) == 1:
# mapping[key] = lowered_to_orig[common.pop()]
#
# elif len(common) == 0 and key in required:
# raise ValueError('no alias found for {!r} (possible '
# 'case-independent aliases: {})'.format(
# key,
# ', '.join(repr(ka) for ka in key_aliases)))
# elif len(common) > 1:
# raise ValueError('multiple aliases found for {!r}: {}'
# .format(key, ', '.join(repr(a) for a in common)))
#
# return mapping
, which may include functions, classes, or code. Output only the next line. | mapping = alias_map(colnames, PHOTDATA_ALIASES, |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# ==============================================================================
# Stores an io-implied do list
class IODoList(BasicRepr):
# Stores '(obj1, obj2, ..., objn, i=1, n)
def __init__(self,sParOpen, obj1, sComma1):
self.sParOpen = sParOpen
self.sParClose = None
<|code_end|>
with the help of current file imports:
import string
from types import StringType, IntType
from AOR.DoubleList import DoubleList
from AOR.BasicStatement import BasicStatement, BasicRepr
from AOR.Test.AllocateTest import RunTest
and context from other files:
# Path: AOR/DoubleList.py
# class DoubleList:
#
# # Constructor.
# def __init__(self, l1=None, l2=None):
# if l1:
# if type(l1) is ListType:
# self.l1 = l1
# else:
# self.l1 = [l1]
# else:
# self.l1 = []
# if l2:
# if type(l2) is ListType:
# self.l2 = l2
# else:
# self.l2 = [l2]
# else:
# self.l2 = []
# # --------------------------------------------------------------------------
# # This must distinguish the two above described cases
# def append(self, e1, e2=None):
# if not e2:
# # This simplifies creating statements manually, since the
# # , does not have to be added, (e.g. addVar(a), addVar(b)
# # --> the , between say call(a,b) will be added automatically)
# if len(self.l1)-1==len(self.l2):
# self.l1.append(e1)
# self.l2.append(",")
# else:
# self.l1.append(e1)
# return
# if len(self.l1)==len(self.l2):
# self.l1.append(e1)
# self.l2.append(e2)
# else:
# self.l2.append(e1)
# self.l1.append(e2)
# # --------------------------------------------------------------------------
# # Returns the 'main' list: l1
# def GetMainList(self): return self.l1
# # --------------------------------------------------------------------------
# def lGetSecondaryList(self): return self.l2
# # --------------------------------------------------------------------------
# # Returns the number of elements in the main list, i.e. l1
# def __len__(self): return len(self.l1)
# # --------------------------------------------------------------------------
# def __getitem__(self, n): return self.l1[n]
# # --------------------------------------------------------------------------
# def lGetAsSimpleList(self):
# l = []
# n = len(self.l1)
# if n==0: return []
# # Handle everything except the last element
# for i in range(n-1):
# l.append(self.l1[i])
# l.append(self.l2[i])
#
# # Add the last element
# l.append(self.l1[n-1])
# # Add the corresponding element from the 2nd list if it exists
# if len(self.l2)==n:
# l.append(self.l2[n-1])
# return l
# # --------------------------------------------------------------------------
# def ToList(self, stylesheet, l, bAddSpace=None):
# n = len(self.l1)
# if n==0:
# return
# if self.l1[0]:
# stylesheet.ToList(self.l1[0], l)
#
# #if type(lTemp) is StringType:
# # l=[lTemp]
# #else:
# # l = lTemp
# #print "doublelist",self.l1,self.l2
# if bAddSpace:
# nIndent = 1
# else:
# nIndent = 0
# for i in range(1,len(self.l1)):
# l.append(self.l2[i-1])
# if self.l1[i]:
# l.indent(nIndent)
# stylesheet.ToList(self.l1[i], l)
# if len(self.l1)==len(self.l2):
# l.indent(nIndent)
# stylesheet.ToList(self.l2[-1], l)
# # --------------------------------------------------------------------------
# def __repr__(self):
# return "%sx%s"%(`self.l1`, `self.l2`)
#
# Path: AOR/BasicStatement.py
# class BasicStatement(Label, Location):
#
# def __init__(self, sLabel=None, loc=None, nIndent=0, isDeclaration=1):
# Label.__init__(self, sLabel)
# Location.__init__(self, loc)
# self._isDeclaration=isDeclaration
# self.nIndent=nIndent
# # --------------------------------------------------------------------------
# def isA(self, c):
# return self.__class__ == c
# # --------------------------------------------------------------------------
# # Returns true if the statement is a 'real' fortran statement (as opposed to
# # a compiler directive, comment line, preprocessor directive). This method
# # is overwritten in those classes.
# def isStatement(self): return 1
# # --------------------------------------------------------------------------
# # Returns if this statement is a declaration statement or not
# def isDeclaration(self): return self._isDeclaration
# # --------------------------------------------------------------------------
# def ToString(self, stylesheet): return stylesheet.ToString(self)
# # --------------------------------------------------------------------------
# def GetIndentation(self): return self.nIndent
# # --------------------------------------------------------------------------
# def GetVarUsage(self, varUsage, sType="", obj=None, loc=None): return
# # --------------------------------------------------------------------------
# def ToList(self, stylesheet, l):
# Label.ToList(self, stylesheet, l)
# l.append(stylesheet.GetIndentation(self))
# # --------------------------------------------------------------------------
# def __str__(self): return self.__repr__()
# # --------------------------------------------------------------------------
# def __repr__(self):
# stylesheet = DefaultStylesheet()
# return "%s%s"%(Label.__repr__(self),
# "".join(stylesheet.ToString(self)))
#
# class BasicRepr:
# def __repr__(self):
# ssheet = DefaultStylesheet()
# return self.ToString(ssheet)
# # --------------------------------------------------------------------------
# def ToString(self, stylesheet): return stylesheet.ToString(self)
# # --------------------------------------------------------------------------
# def __str__(self): return self.__repr__()
, which may contain function names, class names, or code. Output only the next line. | self.lObj = DoubleList(l1=[obj1], l2=[sComma1]) |
Predict the next line after this snippet: <|code_start|> line_type=self.CONT_STATEMENT
# check for macro-token types (string literals, continuation
# markers,statement separators (;) and trailing comments)
col_num = self.Tokenise(line_num,col_num,everything[j])
# if token was an unfinished quote we will need to set line_type
if self.tokens[-1][2]==self.tok_QUOTE_START \
and line_type!=self.CONT_SQUOTE and line_type!=self.CONT_DQUOTE:
if everything[j][0]=='\'':
line_type = self.CONT_SQUOTE
else:
line_type = self.CONT_DQUOTE
if remainder!='':
self.AppendAttribute(Comment(remainder, (line_num, col_num)))
if line_type==self.NEW_STATEMENT:
self.AppendToken(line_num, col_num, self.tok_SEPARATOR, "")
if self.lPrefix:
if len(self.tokens)==0:
# This can happen if we have a (usually include) file which doesn't
# have any real tokens, e.g. only comments and another include.
# Any include file in the current file IS handled correctly, just
# the current file can not be correctly represented, since there is
# no way to store the attributes.
return
prevtok = self.tokens[-1]
sOld = prevtok[3]
if sOld.__class__==str:
<|code_end|>
using the current file's imports:
import re
from string import expandtabs,rstrip,lstrip,strip, upper
from Token import Token, Token2Name
from AOR.AttributeString import AttributeString
from AOR.SpecialStatements import Comment, ContMarker, CommentLine
from Tools.Project import Project
from Tools.Project import Project
from Test.ScannerTest import RunAllTests
and any relevant context from other files:
# Path: AOR/AttributeString.py
# class AttributeString(str):
# def __init__(self, s):
# str.__init__(self, s)
# # If this object is initialised with an AttributeString,
# # copy the attributes as well
# if isinstance(s, AttributeString):
# self.lPrefix = s.GetPrefixAttributes()[:]
# self.lPostfix = s.GetPostfixAttributes()[:]
# else:
# self.lPrefix = []
# self.lPostfix = []
# # --------------------------------------------------------------------------
# # Creates a new attribute string which has the same pre- and postfix
# # attributes, but a new string.
# def sCreateCopy(self, s):
# new = AttributeString(s)
# new.SetPrefixAttributes(self.GetPrefixAttributes())
# new.SetPostfixAttributes(self.GetPostfixAttributes())
# return new
# # --------------------------------------------------------------------------
# # Splits an AttributeString into two strings, the first one containing
# # all prefix attributes, the second one all postfix ones. The parameter
# # n specified the number of character to for the first string.
# def tSplitString(self, n):
# s1 = AttributeString(self[0:n])
# s2 = AttributeString(self[n: ])
# s1.SetPrefixAttributes(self.GetPrefixAttributes())
# s2.SetPostfixAttributes(self.GetPostfixAttributes())
# return s1,s2
# # --------------------------------------------------------------------------
# def SetPrefixAttributes(self, l) : self.lPrefix = l[:]
# # --------------------------------------------------------------------------
# def SetPostfixAttributes(self, l) : self.lPostfix = l[:]
# # --------------------------------------------------------------------------
# def AppendPrefix(self, o):
# if type(o)==ListType:
# self.lPrefix.extend(o)
# else:
# self.lPrefix.append(o)
# # --------------------------------------------------------------------------
# def AppendPostfix(self, o):
# if type(o)==ListType:
# self.lPostfix.extend(o)
# else:
# self.lPostfix.append(o)
# # --------------------------------------------------------------------------
# def GetPrefixAttributes(self ): return self.lPrefix
# # --------------------------------------------------------------------------
# def GetPostfixAttributes(self): return self.lPostfix
# # --------------------------------------------------------------------------
# def GetString(self): return str.__str__(self)
# # --------------------------------------------------------------------------
# # We have to overwrite this method so that a new AttributeString is
# # returned!
# def upper(self):
# return self.sCreateCopy(str.__str__(self).upper())
# # --------------------------------------------------------------------------
# # We have to overwrite this method so that a new AttributeString is
# # returned!
# def lower(self):
# return self.sCreateCopy(str.__str__(self).lower())
# # --------------------------------------------------------------------------
# # We have to overwrite this method so that a new AttributeString is
# # returned!
# def capitalize(self):
# return self.sCreateCopy(str.__str__(self).capitalize())
# # --------------------------------------------------------------------------
# #def ToList(self, stylesheet, l): return l.append(str.__str__(self))
# def ToList(self, stylesheet, l): return l.append(self)
# # --------------------------------------------------------------------------
# #def __repr__(self): return str.__str__(self)
# # Debug: output an attribute string with all attributes
# def __repr__(self):
# return "%s-%s-%s"%(`self.lPrefix`,str.__str__(self),`self.lPostfix`)
#
# Path: AOR/SpecialStatements.py
# class Comment(BasicNonStatement):
# def __init__(self, sComment, loc):
# BasicNonStatement.__init__(self, sComment, loc)
#
# class ContMarker(BasicNonStatement):
# def __init__(self, sContMarker, loc):
# BasicNonStatement.__init__(self, sContMarker, loc)
#
# class CommentLine(BasicNonStatement):
# def __init__(self, sComment, loc=None):
# BasicNonStatement.__init__(self, sComment, loc)
. Output only the next line. | sOld=AttributeString(sOld) |
Predict the next line for this snippet: <|code_start|> everything = self.re_all_tokens.findall(line)
spacing = self.re_all_tokens.split(line)
for j in range(len(everything)):
space_len = len(spacing[j])
space_text = lstrip(spacing[j])
if space_text!='':
# we have found something we don't understand!
col_num = col_num+(space_len-len(space_text))
self.AppendToken(line_num, col_num, self.tok_UNKNOWN, space_text)
col_num = col_num+len(space_text)
else:
col_num = col_num+space_len
if everything[j]=='&' and line_type!=self.CONT_SQUOTE \
and line_type!=self.CONT_DQUOTE:
line_type=self.CONT_STATEMENT
# check for macro-token types (string literals, continuation
# markers,statement separators (;) and trailing comments)
col_num = self.Tokenise(line_num,col_num,everything[j])
# if token was an unfinished quote we will need to set line_type
if self.tokens[-1][2]==self.tok_QUOTE_START \
and line_type!=self.CONT_SQUOTE and line_type!=self.CONT_DQUOTE:
if everything[j][0]=='\'':
line_type = self.CONT_SQUOTE
else:
line_type = self.CONT_DQUOTE
if remainder!='':
<|code_end|>
with the help of current file imports:
import re
from string import expandtabs,rstrip,lstrip,strip, upper
from Token import Token, Token2Name
from AOR.AttributeString import AttributeString
from AOR.SpecialStatements import Comment, ContMarker, CommentLine
from Tools.Project import Project
from Tools.Project import Project
from Test.ScannerTest import RunAllTests
and context from other files:
# Path: AOR/AttributeString.py
# class AttributeString(str):
# def __init__(self, s):
# str.__init__(self, s)
# # If this object is initialised with an AttributeString,
# # copy the attributes as well
# if isinstance(s, AttributeString):
# self.lPrefix = s.GetPrefixAttributes()[:]
# self.lPostfix = s.GetPostfixAttributes()[:]
# else:
# self.lPrefix = []
# self.lPostfix = []
# # --------------------------------------------------------------------------
# # Creates a new attribute string which has the same pre- and postfix
# # attributes, but a new string.
# def sCreateCopy(self, s):
# new = AttributeString(s)
# new.SetPrefixAttributes(self.GetPrefixAttributes())
# new.SetPostfixAttributes(self.GetPostfixAttributes())
# return new
# # --------------------------------------------------------------------------
# # Splits an AttributeString into two strings, the first one containing
# # all prefix attributes, the second one all postfix ones. The parameter
# # n specified the number of character to for the first string.
# def tSplitString(self, n):
# s1 = AttributeString(self[0:n])
# s2 = AttributeString(self[n: ])
# s1.SetPrefixAttributes(self.GetPrefixAttributes())
# s2.SetPostfixAttributes(self.GetPostfixAttributes())
# return s1,s2
# # --------------------------------------------------------------------------
# def SetPrefixAttributes(self, l) : self.lPrefix = l[:]
# # --------------------------------------------------------------------------
# def SetPostfixAttributes(self, l) : self.lPostfix = l[:]
# # --------------------------------------------------------------------------
# def AppendPrefix(self, o):
# if type(o)==ListType:
# self.lPrefix.extend(o)
# else:
# self.lPrefix.append(o)
# # --------------------------------------------------------------------------
# def AppendPostfix(self, o):
# if type(o)==ListType:
# self.lPostfix.extend(o)
# else:
# self.lPostfix.append(o)
# # --------------------------------------------------------------------------
# def GetPrefixAttributes(self ): return self.lPrefix
# # --------------------------------------------------------------------------
# def GetPostfixAttributes(self): return self.lPostfix
# # --------------------------------------------------------------------------
# def GetString(self): return str.__str__(self)
# # --------------------------------------------------------------------------
# # We have to overwrite this method so that a new AttributeString is
# # returned!
# def upper(self):
# return self.sCreateCopy(str.__str__(self).upper())
# # --------------------------------------------------------------------------
# # We have to overwrite this method so that a new AttributeString is
# # returned!
# def lower(self):
# return self.sCreateCopy(str.__str__(self).lower())
# # --------------------------------------------------------------------------
# # We have to overwrite this method so that a new AttributeString is
# # returned!
# def capitalize(self):
# return self.sCreateCopy(str.__str__(self).capitalize())
# # --------------------------------------------------------------------------
# #def ToList(self, stylesheet, l): return l.append(str.__str__(self))
# def ToList(self, stylesheet, l): return l.append(self)
# # --------------------------------------------------------------------------
# #def __repr__(self): return str.__str__(self)
# # Debug: output an attribute string with all attributes
# def __repr__(self):
# return "%s-%s-%s"%(`self.lPrefix`,str.__str__(self),`self.lPostfix`)
#
# Path: AOR/SpecialStatements.py
# class Comment(BasicNonStatement):
# def __init__(self, sComment, loc):
# BasicNonStatement.__init__(self, sComment, loc)
#
# class ContMarker(BasicNonStatement):
# def __init__(self, sContMarker, loc):
# BasicNonStatement.__init__(self, sContMarker, loc)
#
# class CommentLine(BasicNonStatement):
# def __init__(self, sComment, loc=None):
# BasicNonStatement.__init__(self, sComment, loc)
, which may contain function names, class names, or code. Output only the next line. | self.AppendAttribute(Comment(remainder, (line_num, col_num))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.