Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|># (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests the stdtelem sensor functions
"""
class TestStdtelem:
def test_valid_times(self):
times = [
("12:00:00", "12:00:00"),
("11:15:10", "11:15:10"),
("00:00:00", "00:00:00"),
("23:59:59", "23:59:59"),
("12:00", "12:00:00"),
("01:24", "01:24:00"),
("123456", "12:34:56"),
("0124", "01:24:00")
]
for i in times:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from nose.tools import raises, assert_raises
from ...sensors import stdtelem
and context:
# Path: habitat/sensors/stdtelem.py
# def time(data):
# def coordinate(config, data):
# def binary_timestamp(data):
# def binary_bcd_time(data):
which might include code, classes, or functions. Output only the next line. | assert stdtelem.time(i[0]) == i[1] |
Given the following code snippet before the placeholder: <|code_start|>Tests for habitat.utils.immortal_changes
"""
class DummyConsumer(object):
# couchdbkit.Consumer, which Consumer is a subclass of, proxies
# all methods to a backend object (e.g., "sync"). This is the
# easiest way to get hold of those calls in order to test them,
# but it's a bit dependent on couchdbkit's internals, which sucks.
def __init__(self, *args, **kwargs):
pass
def wait(func, **kwargs):
raise NotImplementedError
class DummyTimeModule(object):
# replacing the 'time' item in immortal_changes' namespace is probably
# nicer than modifying the real time module.
def sleep(self, length):
raise NotImplementedError
class TestParser(object):
def setup(self):
self.m = mox.Mox()
<|code_end|>
, predict the next line using imports from the current file:
import mox
import couchdbkit
import time
from nose.tools import assert_raises
from ...utils import immortal_changes
and context including class names, function names, and sometimes code from other files:
# Path: habitat/utils/immortal_changes.py
# class Consumer(couchdbkit.Consumer):
# def wait(self, callback, **kwargs):
# def wrapped_callback(changes):
. Output only the next line. | self.consumer = immortal_changes.Consumer(None, |
Continue the code snippet: <|code_start|># Copyright 2011 (C) Adam Greig
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests the base sensor functions
"""
class TestBaseSensors:
def test_ascii_ints(self):
<|code_end|>
. Use current file imports:
from nose.tools import assert_raises, raises
from ...sensors import base
and context (classes, functions, or code) from other files:
# Path: habitat/sensors/base.py
# def ascii_int(config, data):
# def ascii_float(config, data):
# def string(data):
# def constant(config, data):
# def binary_b64(data):
. Output only the next line. | assert base.ascii_int({}, "12") == 12 |
Based on the snippet: <|code_start|> for k in required_keys[f['type']]:
if k not in f:
raise ForbiddenError(
"{0} filters must include '{1}'.".format(f['type'], k))
@version(1)
@only_validates("payload_configuration")
def validate(new, old, userctx, secobj):
"""
Validate payload_configuration documents against the schema and then
against specific validation requirements.
* Must match schema
* If editing, must be an administrator
* If there are any sentences with protocol=UKHAS:
* Checksum must be a valid type if provided
* Must have at least one field
* Coordinate fields must have a format
* If any sentences have filters:
* Normal filters must specify a filter path
* Hotfix filters must specify code, a signature and a certificate
* If any transmissions have modulation=RTTY:
* Must also specify shift, encoding, baud, parity and stop.
"""
if old:
must_be_admin(userctx)
global schema
if not schema:
<|code_end|>
, predict the immediate next line with the help of imports:
from couch_named_python import ForbiddenError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import read_json_schema, validate_doc, must_be_admin
from .utils import only_validates
and context (classes, functions, sometimes code) from other files:
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | schema = read_json_schema("payload_configuration.json") |
Next line prediction: <|code_start|> if k not in f:
raise ForbiddenError(
"{0} filters must include '{1}'.".format(f['type'], k))
@version(1)
@only_validates("payload_configuration")
def validate(new, old, userctx, secobj):
"""
Validate payload_configuration documents against the schema and then
against specific validation requirements.
* Must match schema
* If editing, must be an administrator
* If there are any sentences with protocol=UKHAS:
* Checksum must be a valid type if provided
* Must have at least one field
* Coordinate fields must have a format
* If any sentences have filters:
* Normal filters must specify a filter path
* Hotfix filters must specify code, a signature and a certificate
* If any transmissions have modulation=RTTY:
* Must also specify shift, encoding, baud, parity and stop.
"""
if old:
must_be_admin(userctx)
global schema
if not schema:
schema = read_json_schema("payload_configuration.json")
<|code_end|>
. Use current file imports:
(from couch_named_python import ForbiddenError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import read_json_schema, validate_doc, must_be_admin
from .utils import only_validates)
and context including class names, function names, or small code snippets from other files:
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | validate_doc(new, schema) |
Given snippet: <|code_start|> """
required_keys = {
'normal': ['filter'],
'hotfix': ['code', 'signature', 'certificate']}
for k in required_keys[f['type']]:
if k not in f:
raise ForbiddenError(
"{0} filters must include '{1}'.".format(f['type'], k))
@version(1)
@only_validates("payload_configuration")
def validate(new, old, userctx, secobj):
"""
Validate payload_configuration documents against the schema and then
against specific validation requirements.
* Must match schema
* If editing, must be an administrator
* If there are any sentences with protocol=UKHAS:
* Checksum must be a valid type if provided
* Must have at least one field
* Coordinate fields must have a format
* If any sentences have filters:
* Normal filters must specify a filter path
* Hotfix filters must specify code, a signature and a certificate
* If any transmissions have modulation=RTTY:
* Must also specify shift, encoding, baud, parity and stop.
"""
if old:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from couch_named_python import ForbiddenError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import read_json_schema, validate_doc, must_be_admin
from .utils import only_validates
and context:
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
which might include code, classes, or functions. Output only the next line. | must_be_admin(userctx) |
Given the following code snippet before the placeholder: <|code_start|>def _validate_modulation_settings(transmission):
"""
Check that required keys for each modulation type are present.
"""
required_keys = {'RTTY': ['shift', 'encoding', 'baud', 'parity', 'stop'],
'DominoEX': ['speed'],
'Hellschreiber': ['variant']}
modulation = transmission['modulation']
if modulation not in required_keys:
return
for k in required_keys[modulation]:
if k not in transmission:
raise ForbiddenError(
"{0} transmissions must include '{1}'.".format(modulation, k))
def _validate_filter(f):
"""
Check that filters have the required keys according to their type.
"""
required_keys = {
'normal': ['filter'],
'hotfix': ['code', 'signature', 'certificate']}
for k in required_keys[f['type']]:
if k not in f:
raise ForbiddenError(
"{0} filters must include '{1}'.".format(f['type'], k))
@version(1)
<|code_end|>
, predict the next line using imports from the current file:
from couch_named_python import ForbiddenError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import read_json_schema, validate_doc, must_be_admin
from .utils import only_validates
and context including class names, function names, and sometimes code from other files:
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | @only_validates("payload_configuration") |
Predict the next line after this snippet: <|code_start|> """dynamicloader.load():"""
def setup(self):
"""setup a tempdir for messing about in (e.g. reloadablemodulewriter);
also use it to copy example_module.py into two other names, then put
this folder on the path so it can be imported from."""
self.tempdir = tempfile.mkdtemp()
unimp_dir = os.path.join(self.tempdir, 'unimp')
os.mkdir(unimp_dir)
unimp_a = os.path.join(unimp_dir, 'dynamicloadunimp_a.py')
unimp_b = os.path.join(unimp_dir, 'dynamicloadunimp_b.py')
this_path = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(this_path, 'example_module.py')
shutil.copyfile(file_path, unimp_a)
shutil.copyfile(file_path, unimp_b)
open(os.path.join(unimp_dir, '__init__.py'), 'w').close()
sys.path.insert(0, self.tempdir)
def teardown(self):
"""clean up the temp folder and path entries"""
sys.path.remove(self.tempdir)
if 'unimp' in sys.modules:
del sys.modules['unimp']
shutil.rmtree(self.tempdir)
def test_load_gets_correct_object(self):
# This tests calls of the format load(MyClass) and
# load("packagea.packageb.aclass")
for i in [example_module.AClass, example_module.BClass,
example_module.AFunction, example_module.BFunction]:
<|code_end|>
using the current file's imports:
import sys
import os
import time
import datetime
import tempfile
import shutil
from nose.tools import raises
from ....utils import dynamicloader
from . import example_module
from .example_module import AClass
and any relevant context from other files:
# Path: habitat/utils/dynamicloader.py
# def load(loadable, force_reload=False):
# def fullname(loadable):
# def isclass(thing):
# def isfunction(thing):
# def isgeneratorfunction(thing):
# def issubclass(thing, the_other_thing):
# def hasattr(thing, attr):
# def isstandardfunction(thing):
# def hasnumargs(thing, num):
# def hasmethod(loadable, name):
# def iscallable(loadable):
# def _expectify(error):
# def decorator(function):
# def new_function(*args, **kwargs):
#
# Path: habitat/tests/test_utils/test_dynamicloader/example_module.py
# class AClass(Parent, Parent2):
# anattr = "asdf"
# afunc = afunction
. Output only the next line. | assert dynamicloader.load(i) == i |
Based on the snippet: <|code_start|>
class TestLoad:
"""dynamicloader.load():"""
def setup(self):
"""setup a tempdir for messing about in (e.g. reloadablemodulewriter);
also use it to copy example_module.py into two other names, then put
this folder on the path so it can be imported from."""
self.tempdir = tempfile.mkdtemp()
unimp_dir = os.path.join(self.tempdir, 'unimp')
os.mkdir(unimp_dir)
unimp_a = os.path.join(unimp_dir, 'dynamicloadunimp_a.py')
unimp_b = os.path.join(unimp_dir, 'dynamicloadunimp_b.py')
this_path = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(this_path, 'example_module.py')
shutil.copyfile(file_path, unimp_a)
shutil.copyfile(file_path, unimp_b)
open(os.path.join(unimp_dir, '__init__.py'), 'w').close()
sys.path.insert(0, self.tempdir)
def teardown(self):
"""clean up the temp folder and path entries"""
sys.path.remove(self.tempdir)
if 'unimp' in sys.modules:
del sys.modules['unimp']
shutil.rmtree(self.tempdir)
def test_load_gets_correct_object(self):
# This tests calls of the format load(MyClass) and
# load("packagea.packageb.aclass")
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import os
import time
import datetime
import tempfile
import shutil
from nose.tools import raises
from ....utils import dynamicloader
from . import example_module
from .example_module import AClass
and context (classes, functions, sometimes code) from other files:
# Path: habitat/utils/dynamicloader.py
# def load(loadable, force_reload=False):
# def fullname(loadable):
# def isclass(thing):
# def isfunction(thing):
# def isgeneratorfunction(thing):
# def issubclass(thing, the_other_thing):
# def hasattr(thing, attr):
# def isstandardfunction(thing):
# def hasnumargs(thing, num):
# def hasmethod(loadable, name):
# def iscallable(loadable):
# def _expectify(error):
# def decorator(function):
# def new_function(*args, **kwargs):
#
# Path: habitat/tests/test_utils/test_dynamicloader/example_module.py
# class AClass(Parent, Parent2):
# anattr = "asdf"
# afunc = afunction
. Output only the next line. | for i in [example_module.AClass, example_module.BClass, |
Using the snippet: <|code_start|> {
"protocol": "UKHAS",
"checksum": "crc16-ccitt",
"callsign": "HABITAT",
"fields": [{'sensor': 'bla', 'name': 'whatever'}],
"filters": {
"intermediate": [
{
"type": "normal",
"filter": "a.b.c"
}
],
"post": [
{
"type": "hotfix",
"code": "test",
"signature": "test",
"certificate": "test"
}
]
}
}
]
}
schema = read_json_schema("payload_configuration.json")
class TestPayloadConfiguration(object):
def setup(self):
self.m = mox.Mox()
<|code_end|>
, determine the next line of code. You have imports:
from ...views import payload_configuration
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context (class names, function names, or code) available:
# Path: habitat/views/payload_configuration.py
# def _validate_ukhas(sentence):
# def _validate_modulation_settings(transmission):
# def _validate_filter(f):
# def validate(new, old, userctx, secobj):
# def name_time_created_map(doc):
# def callsign_time_created_index_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | self.m.StubOutWithMock(payload_configuration, 'validate_doc') |
Here is a snippet: <|code_start|> "parity": "none",
"stop": 2
}
],
"sentences": [
{
"protocol": "UKHAS",
"checksum": "crc16-ccitt",
"callsign": "HABITAT",
"fields": [{'sensor': 'bla', 'name': 'whatever'}],
"filters": {
"intermediate": [
{
"type": "normal",
"filter": "a.b.c"
}
],
"post": [
{
"type": "hotfix",
"code": "test",
"signature": "test",
"certificate": "test"
}
]
}
}
]
}
<|code_end|>
. Write the next line using the current file imports:
from ...views import payload_configuration
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context from other files:
# Path: habitat/views/payload_configuration.py
# def _validate_ukhas(sentence):
# def _validate_modulation_settings(transmission):
# def _validate_filter(f):
# def validate(new, old, userctx, secobj):
# def name_time_created_map(doc):
# def callsign_time_created_index_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
, which may include functions, classes, or code. Output only the next line. | schema = read_json_schema("payload_configuration.json") |
Based on the snippet: <|code_start|># Copyright 2010 (C) Adam Greig
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
class TestChecksums:
def setUp(self):
self.data = "hello, world"
def test_calculates_crc16_ccitt_checksum(self):
<|code_end|>
, predict the immediate next line with the help of imports:
from ...utils import checksums
and context (classes, functions, sometimes code) from other files:
# Path: habitat/utils/checksums.py
# def crc16_ccitt(data):
# def xor(data):
# def fletcher_16(data, modulus=255):
. Output only the next line. | assert checksums.crc16_ccitt(self.data) == "D4C0" |
Given the following code snippet before the placeholder: <|code_start|>"""
Tests Flight document views and functions
"""
doc = {
"_id": "fid",
"type": "flight",
"approved": False,
"start": "2012-07-14T22:54:23+01:00",
"end": "2012-07-15T00:00:00+01:00",
"name": "Test Launch",
"launch": {
"time": "2012-07-14T23:30:00+01:00",
"timezone": "Europe/London",
"location": {
"latitude": 12.345,
"longitude": 54.321
}
}
}
schema = read_json_schema("flight.json")
class TestFlight(object):
def setup(self):
self.m = mox.Mox()
<|code_end|>
, predict the next line using imports from the current file:
from ...views import flight
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context including class names, function names, and sometimes code from other files:
# Path: habitat/views/flight.py
# def validate(new, old, userctx, secobj):
# def end_start_including_payloads_map(doc):
# def launch_time_including_payloads_map(doc):
# def unapproved_name_including_payloads_map(doc):
# def all_name_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | self.m.StubOutWithMock(flight, 'validate_doc') |
Given the following code snippet before the placeholder: <|code_start|># GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests Flight document views and functions
"""
doc = {
"_id": "fid",
"type": "flight",
"approved": False,
"start": "2012-07-14T22:54:23+01:00",
"end": "2012-07-15T00:00:00+01:00",
"name": "Test Launch",
"launch": {
"time": "2012-07-14T23:30:00+01:00",
"timezone": "Europe/London",
"location": {
"latitude": 12.345,
"longitude": 54.321
}
}
}
<|code_end|>
, predict the next line using imports from the current file:
from ...views import flight
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context including class names, function names, and sometimes code from other files:
# Path: habitat/views/flight.py
# def validate(new, old, userctx, secobj):
# def end_start_including_payloads_map(doc):
# def launch_time_including_payloads_map(doc):
# def unapproved_name_including_payloads_map(doc):
# def all_name_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | schema = read_json_schema("flight.json") |
Next line prediction: <|code_start|>
Filters are small functions that can be run against incoming payload telemetry
during the parse phase, either before attempts at callsign extraction, before
the actual parse (but after the callsign has been identified) or after parsing
is complete.
This module contains commonly used filters which are supplied with habitat, but
end users are free to write their own and have :mod:`habitat.loadable_manager`
load them for use.
"""
__all__ = ["semicolons_to_commas", "numeric_scale", "simple_map",
"invalid_always", "invalid_location_zero", "invalid_gps_lock",
"zero_pad_coordinates", "zero_pad_times"]
def semicolons_to_commas(config, data):
"""
Intermediate filter that converts semicolons to commas.
All semicolons in the string are replaced with colons and the checksum is
updated; ``crc16-ccitt`` is assumed by default but can be overwritten with
``config["checksum"]``.
>>> semicolons_to_commas({}, '$$testpayload,1,2,3;4;5;6*8A24')
'$$testpayload,1,2,3,4,5,6*888F'
"""
data = {"data": data}
checksum = config['checksum'] if 'checksum' in config else 'crc16-ccitt'
<|code_end|>
. Use current file imports:
(from .utils import filtertools
import math)
and context including class names, function names, or small code snippets from other files:
# Path: habitat/utils/filtertools.py
# class UKHASChecksumFixer(object):
# def __init__(self, protocol, data):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def fix(cls, protocol, old_data, new_data):
# def _sum(cls, protocol, data):
# def _sum_length(cls, protocol):
# def _split_str(cls, protocol, data):
. Output only the next line. | with filtertools.UKHASChecksumFixer(checksum, data) as c: |
Predict the next line for this snippet: <|code_start|>#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests Parser document functions
"""
doc = {
"type": "payload_telemetry",
"data": {
"_parsed": "tset tset"
}
}
def test_unparsed_filter():
<|code_end|>
with the help of current file imports:
from ...views import parser
from copy import deepcopy
and context from other files:
# Path: habitat/views/parser.py
# def unparsed_filter(doc, req):
, which may contain function names, class names, or code. Output only the next line. | fil = parser.unparsed_filter |
Given the following code snippet before the placeholder: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests listener_telemetry document views and functions.
"""
doc = {
"type": "listener_telemetry",
"time_created": "2012-07-17T21:03:26+01:00",
"time_uploaded": "2012-07-17T21:03:29+01:00",
"data": {
"callsign": "M0RND",
"latitude": 52.2135,
"longitude": 0.0964
}
}
schema = read_json_schema("listener_telemetry.json")
class TestListenerInformation(object):
def setup(self):
self.m = mox.Mox()
<|code_end|>
, predict the next line using imports from the current file:
from ...views import listener_telemetry
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context including class names, function names, and sometimes code from other files:
# Path: habitat/views/listener_telemetry.py
# def validate(new, old, userctx, secobj):
# def time_created_callsign_map(doc):
# def callsign_time_created_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | self.m.StubOutWithMock(listener_telemetry, 'validate_doc') |
Next line prediction: <|code_start|># the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests listener_telemetry document views and functions.
"""
doc = {
"type": "listener_telemetry",
"time_created": "2012-07-17T21:03:26+01:00",
"time_uploaded": "2012-07-17T21:03:29+01:00",
"data": {
"callsign": "M0RND",
"latitude": 52.2135,
"longitude": 0.0964
}
}
<|code_end|>
. Use current file imports:
(from ...views import listener_telemetry
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox)
and context including class names, function names, or small code snippets from other files:
# Path: habitat/views/listener_telemetry.py
# def validate(new, old, userctx, secobj):
# def time_created_callsign_map(doc):
# def callsign_time_created_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | schema = read_json_schema("listener_telemetry.json") |
Given the following code snippet before the placeholder: <|code_start|> "name": "temperature"
}
],
"filters": {
"post": [
{
"type": "normal",
"filter": "common.numeric_scale",
"source": "latitude",
"scale": 1E-7
},
{
"type": "normal",
"filter": "common.numeric_scale",
"source": "longitude",
"scale": 1E-7
}
]
}
}
For the list of format string specifiers, please see:
`<http://docs.python.org/2/library/struct.html>`_.
The filter ``common.numeric_scale`` may be useful for fixed-point data rather
than sending floats, and the various ``std_telem.binary*`` sensors may be
applicable.
"""
<|code_end|>
, predict the next line using imports from the current file:
import struct
from ..parser import ParserModule, CantExtractCallsign
and context including class names, function names, and sometimes code from other files:
# Path: habitat/parser.py
# class ParserModule(object):
# """
# Base class for real ParserModules to inherit from.
#
# **ParserModules** are classes which turn radio strings into useful data.
# They do not have to inherit from :class:`ParserModule`, but can if they
# want. They must implement :meth:`pre_parse` and :meth:`parse` as described
# below.
# """
# def __init__(self, parser):
# self.parser = parser
# self.loadable_manager = parser.loadable_manager
#
# def pre_parse(self, string):
# """
# Go though *string* and attempt to extract a callsign, returning
# it as a string. If *string* is not parseable by this module, raise
# :py:class:`CantParse`. If *string* might be parseable but no callsign
# could be extracted, raise :py:class:`CantExtractCallsign`.
# """
# raise ValueError()
#
# def parse(self, string, config):
# """
# Go through *string* which has been identified as the format this
# parser module should be able to parse, extracting the data as per
# the information in *config*, which is the ``sentence`` dictionary
# extracted from the payload's configuration document.
# """
# raise ValueError()
#
# class CantExtractCallsign(Exception):
# """
# Parser submodule cannot find a callsign, though in theory might be able
# to parse the sentence if one were provided.
# """
# pass
. Output only the next line. | class SimpleBinaryParser(ParserModule): |
Predict the next line after this snippet: <|code_start|> "source": "latitude",
"scale": 1E-7
},
{
"type": "normal",
"filter": "common.numeric_scale",
"source": "longitude",
"scale": 1E-7
}
]
}
}
For the list of format string specifiers, please see:
`<http://docs.python.org/2/library/struct.html>`_.
The filter ``common.numeric_scale`` may be useful for fixed-point data rather
than sending floats, and the various ``std_telem.binary*`` sensors may be
applicable.
"""
class SimpleBinaryParser(ParserModule):
"""The Simple Binary Parser Module"""
def pre_parse(self, string):
"""
As no callsign is provided by the protocol, assume any string we are
given is potentially parseable binary data.
"""
<|code_end|>
using the current file's imports:
import struct
from ..parser import ParserModule, CantExtractCallsign
and any relevant context from other files:
# Path: habitat/parser.py
# class ParserModule(object):
# """
# Base class for real ParserModules to inherit from.
#
# **ParserModules** are classes which turn radio strings into useful data.
# They do not have to inherit from :class:`ParserModule`, but can if they
# want. They must implement :meth:`pre_parse` and :meth:`parse` as described
# below.
# """
# def __init__(self, parser):
# self.parser = parser
# self.loadable_manager = parser.loadable_manager
#
# def pre_parse(self, string):
# """
# Go though *string* and attempt to extract a callsign, returning
# it as a string. If *string* is not parseable by this module, raise
# :py:class:`CantParse`. If *string* might be parseable but no callsign
# could be extracted, raise :py:class:`CantExtractCallsign`.
# """
# raise ValueError()
#
# def parse(self, string, config):
# """
# Go through *string* which has been identified as the format this
# parser module should be able to parse, extracting the data as per
# the information in *config*, which is the ``sentence`` dictionary
# extracted from the payload's configuration document.
# """
# raise ValueError()
#
# class CantExtractCallsign(Exception):
# """
# Parser submodule cannot find a callsign, though in theory might be able
# to parse the sentence if one were provided.
# """
# pass
. Output only the next line. | raise CantExtractCallsign() |
Next line prediction: <|code_start|>loadable function name, as with sensors.
See :py:mod:`habitat.filters` for some filters included with habitat.
Filters can take one or two arguments, *config*, *data* or just *data*. They
should return a suitably modified form of data, optionally using anything from
*config* which was specified by the user in the flight document.
"""
class LoadableManager:
"""
The main Loadable Manager class.
"""
def __init__(self, config):
"""
On construction, all modules listed in config["loadables"] will be
loaded using :py:meth:`load`.
"""
self.libraries = {}
for loadable in config["loadables"]:
self.load(loadable["class"], loadable["name"])
def load(self, module, shorthand):
"""Loads *module* as a library and assigns it to *shorthand*."""
<|code_end|>
. Use current file imports:
(from .utils import dynamicloader)
and context including class names, function names, or small code snippets from other files:
# Path: habitat/utils/dynamicloader.py
# def load(loadable, force_reload=False):
# def fullname(loadable):
# def isclass(thing):
# def isfunction(thing):
# def isgeneratorfunction(thing):
# def issubclass(thing, the_other_thing):
# def hasattr(thing, attr):
# def isstandardfunction(thing):
# def hasnumargs(thing, num):
# def hasmethod(loadable, name):
# def iscallable(loadable):
# def _expectify(error):
# def decorator(function):
# def new_function(*args, **kwargs):
. Output only the next line. | module = dynamicloader.load(module) |
Next line prediction: <|code_start|># Copyright 2013 (C) Daniel Richman
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
class TestOneline:
def test_exception_without_value(self):
try:
raise ValueError
except Exception as e:
<|code_end|>
. Use current file imports:
(from ...utils.quick_traceback import oneline)
and context including class names, function names, or small code snippets from other files:
# Path: habitat/utils/quick_traceback.py
# def oneline(exc_value=None):
# """
# Return a single line describing 'exc_value'
#
# *exc_value* shold be either an Exception instance, for example, acquired
# via 'except ValueError as e:'; or None, in which case the exception
# currently being handled is used.
#
# The string returned is the last line of Python's normal traceback;
# something like 'ValueError: some message', with no newline.
# """
# if exc_value is None:
# (exc_type, exc_value, discard_tb) = sys.exc_info()
# else:
# exc_type = type(exc_value)
#
# exc_tb = traceback.format_exception_only(exc_type, exc_value)
# info = exc_tb[-1].strip()
# return info
. Output only the next line. | assert oneline(e) == oneline() == "ValueError" |
Based on the snippet: <|code_start|>#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the view function utilities
"""
def test_must_be_admin():
nonadmin = {'roles': ['not an admin']}
noroles = {'noroles': True}
oddroles = {'roles': 12}
admin = {'roles': ['_admin']}
alsoadmin = {'roles': ['lowly', '_admin']}
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
from nose.tools import assert_raises
from couch_named_python import UnauthorizedError, ForbiddenError
from ...views import utils
and context (classes, functions, sometimes code) from other files:
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# def _validate_timestr(data):
# def _validate_base64(data):
# def _validate_timezone(data):
# def _validate_formats(data, schema):
# def validate_doc(data, schema):
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
. Output only the next line. | assert_raises(UnauthorizedError, utils.must_be_admin, nonadmin) |
Using the snippet: <|code_start|> return True
# relaxed float comparison.
# Doubles provide 15-17 bits of precision. Converting to decimal and
# back should not introduce an error larger than 1e-15, really.
tolerance = max(a, b) * 1e-14
return abs(a - b) < tolerance
else:
# string, int, bool, None, ...
return a == b
@version(2)
@only_validates("payload_telemetry")
def validate(new, old, userctx, secobj):
"""
Validate this payload_telemetry document against the schema, then perform
some specific checks:
* Admins may perform any further editing
* If edited
* Only the parser may add new fields to data
* The receivers list may only get new receivers
* If created
* Must have one receiver
* Must have _raw and nothing but _raw in data
"""
global schema
if not schema:
schema = read_json_schema("payload_telemetry.json")
<|code_end|>
, determine the next line of code. You have imports:
import math
import json
import base64
import hashlib
import datetime
import calendar
from couch_named_python import ForbiddenError, UnauthorizedError, version
from strict_rfc3339 import rfc3339_to_timestamp, now_to_rfc3339_utcoffset
from strict_rfc3339 import timestamp_to_rfc3339_utcoffset
from .utils import validate_doc, read_json_schema
from .utils import only_validates
and context (class names, function names, or code) available:
# Path: habitat/views/utils.py
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | validate_doc(new, schema) |
Here is a snippet: <|code_start|> if a == b:
return True
# relaxed float comparison.
# Doubles provide 15-17 bits of precision. Converting to decimal and
# back should not introduce an error larger than 1e-15, really.
tolerance = max(a, b) * 1e-14
return abs(a - b) < tolerance
else:
# string, int, bool, None, ...
return a == b
@version(2)
@only_validates("payload_telemetry")
def validate(new, old, userctx, secobj):
"""
Validate this payload_telemetry document against the schema, then perform
some specific checks:
* Admins may perform any further editing
* If edited
* Only the parser may add new fields to data
* The receivers list may only get new receivers
* If created
* Must have one receiver
* Must have _raw and nothing but _raw in data
"""
global schema
if not schema:
<|code_end|>
. Write the next line using the current file imports:
import math
import json
import base64
import hashlib
import datetime
import calendar
from couch_named_python import ForbiddenError, UnauthorizedError, version
from strict_rfc3339 import rfc3339_to_timestamp, now_to_rfc3339_utcoffset
from strict_rfc3339 import timestamp_to_rfc3339_utcoffset
from .utils import validate_doc, read_json_schema
from .utils import only_validates
and context from other files:
# Path: habitat/views/utils.py
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
, which may include functions, classes, or code. Output only the next line. | schema = read_json_schema("payload_telemetry.json") |
Predict the next line for this snippet: <|code_start|> else:
if not isinstance(b, dict):
return False
keys_iter = a
if len(a) != len(b):
return False
return all(_is_equal_relaxed_floats(a[i], b[i]) for i in keys_iter)
elif isinstance(a, float) or isinstance(b, float):
if not (isinstance(a, float) or isinstance(a, int)) or \
not (isinstance(b, float) or isinstance(b, int)):
return False
# fast path
if a == b:
return True
# relaxed float comparison.
# Doubles provide 15-17 bits of precision. Converting to decimal and
# back should not introduce an error larger than 1e-15, really.
tolerance = max(a, b) * 1e-14
return abs(a - b) < tolerance
else:
# string, int, bool, None, ...
return a == b
@version(2)
<|code_end|>
with the help of current file imports:
import math
import json
import base64
import hashlib
import datetime
import calendar
from couch_named_python import ForbiddenError, UnauthorizedError, version
from strict_rfc3339 import rfc3339_to_timestamp, now_to_rfc3339_utcoffset
from strict_rfc3339 import timestamp_to_rfc3339_utcoffset
from .utils import validate_doc, read_json_schema
from .utils import only_validates
and context from other files:
# Path: habitat/views/utils.py
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
, which may contain function names, class names, or code. Output only the next line. | @only_validates("payload_telemetry") |
Given the code snippet: <|code_start|># habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_telemetry design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
@only_validates("listener_telemetry")
def validate(new, old, userctx, secobj):
"""
Only allow admins to edit/delete and validate the document against the
schema for listener_telemetry documents.
"""
if old:
<|code_end|>
, generate the next line using the imports in this file:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context (functions, classes, or occasionally code) from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | must_be_admin(userctx) |
Given snippet: <|code_start|># habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_telemetry design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
@only_validates("listener_telemetry")
def validate(new, old, userctx, secobj):
"""
Only allow admins to edit/delete and validate the document against the
schema for listener_telemetry documents.
"""
if old:
must_be_admin(userctx)
global schema
if not schema:
schema = read_json_schema("listener_telemetry.json")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
which might include code, classes, or functions. Output only the next line. | validate_doc(new, schema) |
Given the code snippet: <|code_start|>#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_telemetry design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
@only_validates("listener_telemetry")
def validate(new, old, userctx, secobj):
"""
Only allow admins to edit/delete and validate the document against the
schema for listener_telemetry documents.
"""
if old:
must_be_admin(userctx)
global schema
if not schema:
<|code_end|>
, generate the next line using the imports in this file:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context (functions, classes, or occasionally code) from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | schema = read_json_schema("listener_telemetry.json") |
Predict the next line for this snippet: <|code_start|># Copyright 2011, 2012 (C) Adam Greig
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_telemetry design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
<|code_end|>
with the help of current file imports:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
, which may contain function names, class names, or code. Output only the next line. | @only_validates("listener_telemetry") |
Predict the next line for this snippet: <|code_start|># habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
class TestUKHASChecksumFixer:
"""UKHAS Checksum Fixer"""
def test_leaves_bad_data(self):
self.check_fixer("crc16-ccitt", "$$habitat,bad*ABCD\n",
"$$habitat,good*ABCD\n", "$$habitat,bad*ABCD\n")
def test_updates_checksum(self):
self.check_fixer("crc16-ccitt", "$$habitat,good*4918\n",
"$$habitat,other*4918\n", "$$habitat,other*2E0C\n")
def test_updates_xor_checksum(self):
self.check_fixer("xor", "$$habitat,good*4c\n",
"$$habitat,other*4c\n", "$$habitat,other*2B\n")
def test_leaves_when_protocol_is_none(self):
self.check_fixer("none", "$$habitat,boring\n",
"$$habitat,sucky\n", "$$habitat,sucky\n")
def check_fixer(self, protocol, old, new, expect):
data = {"data": old}
<|code_end|>
with the help of current file imports:
from ...utils import filtertools
and context from other files:
# Path: habitat/utils/filtertools.py
# class UKHASChecksumFixer(object):
# def __init__(self, protocol, data):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def fix(cls, protocol, old_data, new_data):
# def _sum(cls, protocol, data):
# def _sum_length(cls, protocol):
# def _split_str(cls, protocol, data):
, which may contain function names, class names, or code. Output only the next line. | with filtertools.UKHASChecksumFixer(protocol, data) as c: |
Using the snippet: <|code_start|> Flight data is passed to :meth:`got_payloads`.
"""
self._do_queue(("payloads", [], {}))
def debug(self, msg):
"""Log a debug message"""
logger.debug(msg)
def log(self, msg):
"""Log a generic string message"""
logger.info(msg)
def warning(self, msg):
"""Alike log, but more important"""
logger.warn(msg)
def saved_id(self, doc_type, doc_id):
"""Called when a document is succesfully saved to couch"""
self.log("Saved {0} doc: {1}".format(doc_type, doc_id))
def initialised(self):
"""Called immiediately after successful Uploader initialisation"""
self.debug("Initialised Uploader")
def reset_done(self):
"""Called immediately after resetting the Uploader object"""
self.debug("Settings reset")
def caught_exception(self):
"""Called when the Uploader throws an exception"""
<|code_end|>
, determine the next line of code. You have imports:
import sys
import copy
import base64
import hashlib
import couchdbkit
import couchdbkit.exceptions
import restkit
import restkit.errors
import threading
import Queue
import time
import json
import logging
import strict_rfc3339
from .utils import quick_traceback
and context (class names, function names, or code) available:
# Path: habitat/utils/quick_traceback.py
# def oneline(exc_value=None):
. Output only the next line. | self.warning("Caught " + quick_traceback.oneline()) |
Based on the snippet: <|code_start|># (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
Functions for the flight design document.
Contains schema validation and views by flight launch time, window end time and
payload name and window end time.
"""
schema = None
@version(1)
@only_validates("flight")
def validate(new, old, userctx, secobj):
"""
Validate this flight document against the schema, then check that
only managers are approving documents and approved documents are only
edited by managers.
"""
global schema
if not schema:
schema = read_json_schema("flight.json")
<|code_end|>
, predict the immediate next line with the help of imports:
from couch_named_python import ForbiddenError, UnauthorizedError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import validate_doc, read_json_schema
from .utils import only_validates
and context (classes, functions, sometimes code) from other files:
# Path: habitat/views/utils.py
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | validate_doc(new, schema) |
Predict the next line after this snippet: <|code_start|># the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
Functions for the flight design document.
Contains schema validation and views by flight launch time, window end time and
payload name and window end time.
"""
schema = None
@version(1)
@only_validates("flight")
def validate(new, old, userctx, secobj):
"""
Validate this flight document against the schema, then check that
only managers are approving documents and approved documents are only
edited by managers.
"""
global schema
if not schema:
<|code_end|>
using the current file's imports:
from couch_named_python import ForbiddenError, UnauthorizedError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import validate_doc, read_json_schema
from .utils import only_validates
and any relevant context from other files:
# Path: habitat/views/utils.py
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | schema = read_json_schema("flight.json") |
Continue the code snippet: <|code_start|># Copyright 2011, 2012 (C) Adam Greig
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
Functions for the flight design document.
Contains schema validation and views by flight launch time, window end time and
payload name and window end time.
"""
schema = None
@version(1)
<|code_end|>
. Use current file imports:
from couch_named_python import ForbiddenError, UnauthorizedError, version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import validate_doc, read_json_schema
from .utils import only_validates
and context (classes, functions, or code) from other files:
# Path: habitat/views/utils.py
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# Path: habitat/views/utils.py
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | @only_validates("flight") |
Given the code snippet: <|code_start|># habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_information design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
@only_validates("listener_information")
def validate(new, old, userctx, secobj):
"""
Only allow admins to edit/delete and validate the document against the
schema for listener_information documents.
"""
if old:
<|code_end|>
, generate the next line using the imports in this file:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context (functions, classes, or occasionally code) from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | must_be_admin(userctx) |
Continue the code snippet: <|code_start|># habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_information design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
@only_validates("listener_information")
def validate(new, old, userctx, secobj):
"""
Only allow admins to edit/delete and validate the document against the
schema for listener_information documents.
"""
if old:
must_be_admin(userctx)
global schema
if not schema:
schema = read_json_schema("listener_information.json")
<|code_end|>
. Use current file imports:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context (classes, functions, or code) from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | validate_doc(new, schema) |
Given the following code snippet before the placeholder: <|code_start|>#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_information design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
@only_validates("listener_information")
def validate(new, old, userctx, secobj):
"""
Only allow admins to edit/delete and validate the document against the
schema for listener_information documents.
"""
if old:
must_be_admin(userctx)
global schema
if not schema:
<|code_end|>
, predict the next line using imports from the current file:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context including class names, function names, and sometimes code from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | schema = read_json_schema("listener_information.json") |
Using the snippet: <|code_start|># Copyright 2011, 2012 (C) Adam Greig
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the listener_information design document.
Contains schema validation and a view by creation time and callsign.
"""
schema = None
@version(1)
<|code_end|>
, determine the next line of code. You have imports:
from couch_named_python import version
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import must_be_admin, validate_doc
from .utils import read_json_schema, only_validates
and context (class names, function names, or code) available:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
#
# def validate_doc(data, schema):
# """Validate *data* against *schema*, raising descriptive errors"""
# v = Validator()
# errors = list(v.iter_errors(data, schema))
# if errors:
# errors = ', '.join((str(error) for error in errors))
# raise ForbiddenError("Validation errors: {0}".format(errors))
# _validate_formats(data, schema)
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
#
# def only_validates(doc_type):
# def decorator(func):
# def wrapped(new, old, userctx, secobj):
# new_type = new.get("type", None)
# new_deleted = new.get("_deleted", False)
# if old:
# old_type = old.get("type", None)
# else:
# old_type = None
#
# # sanity checks
# if old_type is None:
# assert old == {} or old is None
# if new_deleted:
# assert new_type is None
#
# if new_type == doc_type and old_type in [None, doc_type]:
# # new doc, or modified doc of correct type. validate:
# return func(new, old, userctx, secobj)
#
# elif new_deleted and old_type == doc_type:
# # deletion is managed by habitat.validate
# return
#
# elif new_type == doc_type or old_type == doc_type:
# # one or the other types match but not both, and not a new or deleted doc.
# raise ForbiddenError("You cannot change the type of a doc")
#
# else:
# # other type: not our business
# return
#
# # Be a well behaved decorator!
# wrapped.__name__ = func.__name__
# wrapped.__doc__ = func.__doc__
# wrapped.__dict__.update(func.__dict__)
#
# return wrapped
# return decorator
. Output only the next line. | @only_validates("listener_information") |
Predict the next line for this snippet: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests listener_information document views and functions.
"""
doc = {
"type": "listener_information",
"time_created": "2012-07-17T21:03:26+01:00",
"time_uploaded": "2012-07-17T21:03:29+01:00",
"data": {
"callsign": "M0RND",
"antenna": "2m/70cm Colinear",
"radio": "ICOM IC-7000"
}
}
schema = read_json_schema("listener_information.json")
class TestListenerInformation(object):
def setup(self):
self.m = mox.Mox()
<|code_end|>
with the help of current file imports:
from ...views import listener_information
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context from other files:
# Path: habitat/views/listener_information.py
# def validate(new, old, userctx, secobj):
# def time_created_callsign_map(doc):
# def callsign_time_created_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
, which may contain function names, class names, or code. Output only the next line. | self.m.StubOutWithMock(listener_information, 'validate_doc') |
Here is a snippet: <|code_start|># the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests listener_information document views and functions.
"""
doc = {
"type": "listener_information",
"time_created": "2012-07-17T21:03:26+01:00",
"time_uploaded": "2012-07-17T21:03:29+01:00",
"data": {
"callsign": "M0RND",
"antenna": "2m/70cm Colinear",
"radio": "ICOM IC-7000"
}
}
<|code_end|>
. Write the next line using the current file imports:
from ...views import listener_information
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import mox
and context from other files:
# Path: habitat/views/listener_information.py
# def validate(new, old, userctx, secobj):
# def time_created_callsign_map(doc):
# def callsign_time_created_map(doc):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
, which may include functions, classes, or code. Output only the next line. | schema = read_json_schema("listener_information.json") |
Given the code snippet: <|code_start|># but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Functions for the core habitat design document.
Contains a validation function that applies to every document.
"""
allowed_types = set(
("flight", "listener_information", "listener_telemetry",
"payload_telemetry", "payload_configuration"))
@version(1)
def validate(new, old, userctx, secobj):
"""
Core habitat validation function.
* Prevent deletion by anyone except administrators.
* Prevent documents without a type.
* Prevent documents whose type is invalid.
* Prevent changing document type.
"""
if '_deleted' in new:
<|code_end|>
, generate the next line using the imports in this file:
from couch_named_python import ForbiddenError, version
from .utils import must_be_admin
and context (functions, classes, or occasionally code) from other files:
# Path: habitat/views/utils.py
# def must_be_admin(user,
# msg="Only server administrators may edit this document."):
# """Raise UnauthorizedError if the user is not an admin"""
# try:
# if '_admin' not in user['roles']:
# raise UnauthorizedError(msg)
# except (KeyError, TypeError):
# raise UnauthorizedError(msg)
. Output only the next line. | must_be_admin(userctx, "Only administrators may delete documents.") |
Next line prediction: <|code_start|># You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests payload_telemetry document views and functions.
"""
doc = {
"_id": "54e9ac9ec19a57d6828a737525e3cc792743a16344b1c69dfe1562620b0fac9b",
"type": "payload_telemetry",
"data": {
"_raw": "ABCDEF=="
},
"receivers": {
"M0RND": {
"time_created": "2012-07-17T21:03:26+01:00",
"time_uploaded": "2012-07-17T21:03:29+01:00"
}
}
}
schema = read_json_schema("payload_telemetry.json")
class TestPayloadTelemetry(object):
def setup(self):
self.m = mox.Mox()
<|code_end|>
. Use current file imports:
(from ...views import payload_telemetry
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import json
import mox)
and context including class names, function names, or small code snippets from other files:
# Path: habitat/views/payload_telemetry.py
# def _check_only_new(new, old):
# def _is_equal_relaxed_floats(a, b):
# def validate(new, old, userctx, secobj):
# def _estimate_time_received(receivers):
# def flight_payload_time_map(doc):
# def payload_time_map(doc):
# def time_map(doc):
# def add_listener_update(doc, req):
# def http_post_update(doc, req):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | self.m.StubOutWithMock(payload_telemetry, 'validate_doc') |
Given the code snippet: <|code_start|># habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Tests payload_telemetry document views and functions.
"""
doc = {
"_id": "54e9ac9ec19a57d6828a737525e3cc792743a16344b1c69dfe1562620b0fac9b",
"type": "payload_telemetry",
"data": {
"_raw": "ABCDEF=="
},
"receivers": {
"M0RND": {
"time_created": "2012-07-17T21:03:26+01:00",
"time_uploaded": "2012-07-17T21:03:29+01:00"
}
}
}
<|code_end|>
, generate the next line using the imports in this file:
from ...views import payload_telemetry
from ...views.utils import read_json_schema
from couch_named_python import ForbiddenError, UnauthorizedError
from copy import deepcopy
from nose.tools import assert_raises
import json
import mox
and context (functions, classes, or occasionally code) from other files:
# Path: habitat/views/payload_telemetry.py
# def _check_only_new(new, old):
# def _is_equal_relaxed_floats(a, b):
# def validate(new, old, userctx, secobj):
# def _estimate_time_received(receivers):
# def flight_payload_time_map(doc):
# def payload_time_map(doc):
# def time_map(doc):
# def add_listener_update(doc, req):
# def http_post_update(doc, req):
#
# Path: habitat/views/utils.py
# def read_json_schema(schemaname):
# mypath = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.join(mypath, "..", "..", "couchdb", "schemas", schemaname)
# with open(path) as f:
# schema = json.load(f)
# return schema
. Output only the next line. | schema = read_json_schema("payload_telemetry.json") |
Given snippet: <|code_start|>
"""
Tests for habitat.utils.startup
"""
_example_yaml = \
"""d: 4
blah: moo
o:
listy:
- 2
- cow"""
_example_parsed = {"d": 4, "blah": "moo", "o": {"listy": [2, "cow"]}}
class TestLoadConfig(object):
def setup(self):
self.config = tempfile.NamedTemporaryFile()
self.old_argv = sys.argv
sys.argv = ["bin/something", self.config.name]
def teardown(self):
sys.argv = self.old_argv
self.config.close()
def test_works(self):
self.config.write(_example_yaml)
self.config.flush()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import tempfile
import mox
import smtplib
import copy
import logging
import os
import os.path
from ...utils import startup
and context:
# Path: habitat/utils/startup.py
# def load_config():
# def _get_logging_level(value):
# def emit(self, record):
# def setup_logging(config, daemon_name):
# def main(main_class):
# class null_logger(logging.Handler):
which might include code, classes, or functions. Output only the next line. | assert startup.load_config() == _example_parsed |
Continue the code snippet: <|code_start|><data>,...,<data>*<checksum>
The number of custom data fields and their types are configurable.
Data fields are typically human readable (or at the least ASCII) readings
of sensors or other system information. See the :py:mod:`habitat.sensors`
module for more information on supported formats.
Checksums work on the message content between the ``$$`` and the ``*``,
non-inclusive, and are given as hexadecimal (upper or lower case) after
the ``*`` in the message.
Supported checksums are CRC16-CCITT with polynomial 0x1021 and start 0xFFFF,
Fletcher-16 and an 8bit XOR over the characters. The corresponding values
for configuration are ``crc16-ccitt``, ``fletcher-16`` and ``xor``.
For compatibility, a varient of Fletcher16 using modulus 256 is also provided,
as ``fletcher-16-256``. Don't use it for new payloads.
``none`` may also be specified as a checksum type if no checksum is used; in
this case the message should not include a terminating ``*``.
.. seealso:: :ref:`ukhas-parser-config`
"""
checksum_algorithms = [
"crc16-ccitt", "xor", "fletcher-16", "fletcher-16-256", "none"]
<|code_end|>
. Use current file imports:
import re
from ..parser import ParserModule, CantParse
from ..utils import checksums
and context (classes, functions, or code) from other files:
# Path: habitat/parser.py
# class ParserModule(object):
# """
# Base class for real ParserModules to inherit from.
#
# **ParserModules** are classes which turn radio strings into useful data.
# They do not have to inherit from :class:`ParserModule`, but can if they
# want. They must implement :meth:`pre_parse` and :meth:`parse` as described
# below.
# """
# def __init__(self, parser):
# self.parser = parser
# self.loadable_manager = parser.loadable_manager
#
# def pre_parse(self, string):
# """
# Go though *string* and attempt to extract a callsign, returning
# it as a string. If *string* is not parseable by this module, raise
# :py:class:`CantParse`. If *string* might be parseable but no callsign
# could be extracted, raise :py:class:`CantExtractCallsign`.
# """
# raise ValueError()
#
# def parse(self, string, config):
# """
# Go through *string* which has been identified as the format this
# parser module should be able to parse, extracting the data as per
# the information in *config*, which is the ``sentence`` dictionary
# extracted from the payload's configuration document.
# """
# raise ValueError()
#
# class CantParse(Exception):
# """Parser module cannot parse the given sentence."""
# pass
#
# Path: habitat/utils/checksums.py
# def crc16_ccitt(data):
# def xor(data):
# def fletcher_16(data, modulus=255):
. Output only the next line. | class UKHASParser(ParserModule): |
Continue the code snippet: <|code_start|> :py:exc:`ValueError <exceptions.ValueError>` is raised in invalid
inputs.
"""
name = config["name"]
sensor = 'sensors.' + config["sensor"]
try:
data = self.loadable_manager.run(sensor, config, field)
except (ValueError, KeyError) as e:
# Annotate error with the field name.
error_type = type(e)
raise error_type("(field {f}): {e!s}".format(f=name, e=e))
return name, data
def pre_parse(self, string):
"""
Check if *string* is parsable by this module.
If it is, :meth:`pre_parse` extracts the payload
name and return it. Otherwise, a
:exc:`ValueError <exceptions.ValueError>` is raised.
"""
try:
string, checksum = self._split_basic_format(string)
fields = self._extract_fields(string)
self._verify_callsign(fields[0])
except (ValueError, KeyError):
<|code_end|>
. Use current file imports:
import re
from ..parser import ParserModule, CantParse
from ..utils import checksums
and context (classes, functions, or code) from other files:
# Path: habitat/parser.py
# class ParserModule(object):
# """
# Base class for real ParserModules to inherit from.
#
# **ParserModules** are classes which turn radio strings into useful data.
# They do not have to inherit from :class:`ParserModule`, but can if they
# want. They must implement :meth:`pre_parse` and :meth:`parse` as described
# below.
# """
# def __init__(self, parser):
# self.parser = parser
# self.loadable_manager = parser.loadable_manager
#
# def pre_parse(self, string):
# """
# Go though *string* and attempt to extract a callsign, returning
# it as a string. If *string* is not parseable by this module, raise
# :py:class:`CantParse`. If *string* might be parseable but no callsign
# could be extracted, raise :py:class:`CantExtractCallsign`.
# """
# raise ValueError()
#
# def parse(self, string, config):
# """
# Go through *string* which has been identified as the format this
# parser module should be able to parse, extracting the data as per
# the information in *config*, which is the ``sentence`` dictionary
# extracted from the payload's configuration document.
# """
# raise ValueError()
#
# class CantParse(Exception):
# """Parser module cannot parse the given sentence."""
# pass
#
# Path: habitat/utils/checksums.py
# def crc16_ccitt(data):
# def xor(data):
# def fletcher_16(data, modulus=255):
. Output only the next line. | raise CantParse |
Given snippet: <|code_start|> raise ValueError(
"Configuration document is not for UKHAS parser.")
if config["checksum"] not in checksum_algorithms:
raise ValueError("Specified checksum algorithm is invalid.")
if len(config["fields"]) < 1:
raise ValueError("No fields are defined.")
for field in config["fields"]:
field["name"]
field["sensor"]
if field["name"][0] == "_":
raise ValueError("Field name starts with an underscore.")
field_names.append(field["name"])
if len(field_names) != len(set(field_names)):
raise ValueError("Duplicate field name.")
except (KeyError, TypeError):
raise ValueError("Invalid configuration document.")
def _verify_checksum(self, string, checksum, algorithm):
"""
Verifies *string*'s checksum.
Computes the checksum defined by *algorithm* over *string*
and compares it to that given in *checksum*.
Raises :py:exc:`ValueError <exceptions.ValueError>`
on discrepancy.
"""
if checksum == None and algorithm != "none":
raise ValueError("No checksum found but config specifies one.")
elif algorithm == "crc16-ccitt":
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from ..parser import ParserModule, CantParse
from ..utils import checksums
and context:
# Path: habitat/parser.py
# class ParserModule(object):
# """
# Base class for real ParserModules to inherit from.
#
# **ParserModules** are classes which turn radio strings into useful data.
# They do not have to inherit from :class:`ParserModule`, but can if they
# want. They must implement :meth:`pre_parse` and :meth:`parse` as described
# below.
# """
# def __init__(self, parser):
# self.parser = parser
# self.loadable_manager = parser.loadable_manager
#
# def pre_parse(self, string):
# """
# Go though *string* and attempt to extract a callsign, returning
# it as a string. If *string* is not parseable by this module, raise
# :py:class:`CantParse`. If *string* might be parseable but no callsign
# could be extracted, raise :py:class:`CantExtractCallsign`.
# """
# raise ValueError()
#
# def parse(self, string, config):
# """
# Go through *string* which has been identified as the format this
# parser module should be able to parse, extracting the data as per
# the information in *config*, which is the ``sentence`` dictionary
# extracted from the payload's configuration document.
# """
# raise ValueError()
#
# class CantParse(Exception):
# """Parser module cannot parse the given sentence."""
# pass
#
# Path: habitat/utils/checksums.py
# def crc16_ccitt(data):
# def xor(data):
# def fletcher_16(data, modulus=255):
which might include code, classes, or functions. Output only the next line. | if checksums.crc16_ccitt(string) != checksum.upper(): |
Continue the code snippet: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Test the simple binary protocol parser.
"""
# Mocking the LoadableManager is a heck of a lot of effort. Not worth it.
# Provide the sensor functions to the parser
fake_sensors_config = {
"loadables": [
{"name": "sensors.base", "class": "habitat.sensors.base"},
{"name": "sensors.stdtelem", "class": "habitat.sensors.stdtelem"}
]
}
class FakeParser:
def __init__(self):
<|code_end|>
. Use current file imports:
import struct
from nose.tools import assert_raises
from copy import deepcopy
from ...loadable_manager import LoadableManager
from ...parser_modules.simple_binary_parser import SimpleBinaryParser
from ...parser import CantExtractCallsign
and context (classes, functions, or code) from other files:
# Path: habitat/loadable_manager.py
# class LoadableManager:
# """
# The main Loadable Manager class.
# """
#
# def __init__(self, config):
# """
# On construction, all modules listed in config["loadables"] will be
# loaded using :py:meth:`load`.
# """
#
# self.libraries = {}
#
# for loadable in config["loadables"]:
# self.load(loadable["class"], loadable["name"])
#
# def load(self, module, shorthand):
# """Loads *module* as a library and assigns it to *shorthand*."""
#
# module = dynamicloader.load(module)
# self.libraries[shorthand] = module
#
# def run(self, name, config, data):
# """
# Run the loadable specified by *name*, giving it *config* and *data*.
#
# If the loadable only takes one argument, it will only be given *data*.
# *config* is ignored in this case.
#
# Returns the result of running the loadable.
# """
#
# name_parts = name.split('.')
# library_name = '.'.join(name_parts[0:-1])
# function_name = name_parts[-1]
#
# if library_name not in self.libraries:
# raise ValueError("Invalid library name: " + library_name)
#
# library = self.libraries[library_name]
#
# if function_name not in library.__all__:
# raise ValueError("Invalid function name: " + function_name)
#
# func = getattr(library, function_name)
#
# if dynamicloader.hasnumargs(func, 1):
# return func(data)
# else:
# return func(config, data)
#
# _repr_format = "<habitat.LoadableManager: {l} libraries loaded>"
#
# def __repr__(self):
# return self._repr_format.format(l=len(self.libraries))
#
# Path: habitat/parser_modules/simple_binary_parser.py
# class SimpleBinaryParser(ParserModule):
# """The Simple Binary Parser Module"""
#
# def pre_parse(self, string):
# """
# As no callsign is provided by the protocol, assume any string we are
# given is potentially parseable binary data.
# """
# raise CantExtractCallsign()
#
# def _verify_config(self, config):
# """
# Checks that the provided *config* dict is appropriate for this parser.
# """
# if config["protocol"] != "simple_binary":
# raise ValueError("Configuration document has wrong protocol.")
# if "fields" not in config:
# raise ValueError("Config document missing required key `fields'")
# field_names = []
# for idx, field in enumerate(config["fields"]):
# if "name" not in field or "format" not in field:
# raise ValueError("Field {0} config missing name or format."
# .format(idx))
# if field["name"][0] == "_":
# raise ValueError("Field {0} name starts with an underscore."
# .format(idx))
# field_names.append(field["name"])
# if len(field_names) != len(set(field_names)):
# raise ValueError("Duplicate field name.")
#
# def _parse_field(self, field, config):
# """
# Pass off the data from unpacking the binary to the sensor given in
# the configuration for actual parsing.
# """
# name = config["name"]
# if 'sensor' not in config:
# return name, field
# sensor = 'sensors.' + config["sensor"]
# try:
# data = self.loadable_manager.run(sensor, config, field)
# except (ValueError, KeyError) as e:
# error_type = type(e)
# raise error_type("(field {f}): {e!s}".format(f=name, e=e))
#
# return name, data
#
# def parse(self, data, config):
# """
# Parse *string*, extracting processed field data.
#
# *config* is the relevant sentence dictionary from the payload's
# configuration document, containing the required binary format and field
# details.
#
# Returns a dictionary of the parsed data.
#
# ValueError is raised on invalid messages.
# """
# self._verify_config(config)
# prefix = [config["format_prefix"]] if "format_prefix" in config else []
# fmtstring = ''.join(prefix + [f["format"] for f in config["fields"]])
#
# try:
# data = struct.unpack(str(fmtstring), data)
# except struct.error as exp:
# raise ValueError("Could not unpack binary data: {0}".format(exp))
#
# if len(data) != len(config["fields"]):
# raise ValueError(
# "Number of extracted fields does not match config"
# " (got {0}, expected {1}).".format(
# len(data), len(config["fields"])))
#
# output = {}
# for field, field_config in zip(data, config["fields"]):
# name, data = self._parse_field(field, field_config)
# output[name] = data
#
# return output
#
# Path: habitat/parser.py
# class CantExtractCallsign(Exception):
# """
# Parser submodule cannot find a callsign, though in theory might be able
# to parse the sentence if one were provided.
# """
# pass
. Output only the next line. | self.loadable_manager = LoadableManager(fake_sensors_config) |
Here is a snippet: <|code_start|>}
class FakeParser:
def __init__(self):
self.loadable_manager = LoadableManager(fake_sensors_config)
# A 'standard' config. Other configs can copy this and change parts.
base_config = {
"protocol": "simple_binary",
"fields": [
{
"format": "i",
"name": "sentence_id"
}, {
"format": "d",
"name": "latitude"
}, {
"format": "d",
"name": "longitude"
}, {
"format": "I",
"name": "altitude"
}
]
}
class TestSimpleBinaryParser:
def setup(self):
<|code_end|>
. Write the next line using the current file imports:
import struct
from nose.tools import assert_raises
from copy import deepcopy
from ...loadable_manager import LoadableManager
from ...parser_modules.simple_binary_parser import SimpleBinaryParser
from ...parser import CantExtractCallsign
and context from other files:
# Path: habitat/loadable_manager.py
# class LoadableManager:
# """
# The main Loadable Manager class.
# """
#
# def __init__(self, config):
# """
# On construction, all modules listed in config["loadables"] will be
# loaded using :py:meth:`load`.
# """
#
# self.libraries = {}
#
# for loadable in config["loadables"]:
# self.load(loadable["class"], loadable["name"])
#
# def load(self, module, shorthand):
# """Loads *module* as a library and assigns it to *shorthand*."""
#
# module = dynamicloader.load(module)
# self.libraries[shorthand] = module
#
# def run(self, name, config, data):
# """
# Run the loadable specified by *name*, giving it *config* and *data*.
#
# If the loadable only takes one argument, it will only be given *data*.
# *config* is ignored in this case.
#
# Returns the result of running the loadable.
# """
#
# name_parts = name.split('.')
# library_name = '.'.join(name_parts[0:-1])
# function_name = name_parts[-1]
#
# if library_name not in self.libraries:
# raise ValueError("Invalid library name: " + library_name)
#
# library = self.libraries[library_name]
#
# if function_name not in library.__all__:
# raise ValueError("Invalid function name: " + function_name)
#
# func = getattr(library, function_name)
#
# if dynamicloader.hasnumargs(func, 1):
# return func(data)
# else:
# return func(config, data)
#
# _repr_format = "<habitat.LoadableManager: {l} libraries loaded>"
#
# def __repr__(self):
# return self._repr_format.format(l=len(self.libraries))
#
# Path: habitat/parser_modules/simple_binary_parser.py
# class SimpleBinaryParser(ParserModule):
# """The Simple Binary Parser Module"""
#
# def pre_parse(self, string):
# """
# As no callsign is provided by the protocol, assume any string we are
# given is potentially parseable binary data.
# """
# raise CantExtractCallsign()
#
# def _verify_config(self, config):
# """
# Checks that the provided *config* dict is appropriate for this parser.
# """
# if config["protocol"] != "simple_binary":
# raise ValueError("Configuration document has wrong protocol.")
# if "fields" not in config:
# raise ValueError("Config document missing required key `fields'")
# field_names = []
# for idx, field in enumerate(config["fields"]):
# if "name" not in field or "format" not in field:
# raise ValueError("Field {0} config missing name or format."
# .format(idx))
# if field["name"][0] == "_":
# raise ValueError("Field {0} name starts with an underscore."
# .format(idx))
# field_names.append(field["name"])
# if len(field_names) != len(set(field_names)):
# raise ValueError("Duplicate field name.")
#
# def _parse_field(self, field, config):
# """
# Pass off the data from unpacking the binary to the sensor given in
# the configuration for actual parsing.
# """
# name = config["name"]
# if 'sensor' not in config:
# return name, field
# sensor = 'sensors.' + config["sensor"]
# try:
# data = self.loadable_manager.run(sensor, config, field)
# except (ValueError, KeyError) as e:
# error_type = type(e)
# raise error_type("(field {f}): {e!s}".format(f=name, e=e))
#
# return name, data
#
# def parse(self, data, config):
# """
# Parse *string*, extracting processed field data.
#
# *config* is the relevant sentence dictionary from the payload's
# configuration document, containing the required binary format and field
# details.
#
# Returns a dictionary of the parsed data.
#
# ValueError is raised on invalid messages.
# """
# self._verify_config(config)
# prefix = [config["format_prefix"]] if "format_prefix" in config else []
# fmtstring = ''.join(prefix + [f["format"] for f in config["fields"]])
#
# try:
# data = struct.unpack(str(fmtstring), data)
# except struct.error as exp:
# raise ValueError("Could not unpack binary data: {0}".format(exp))
#
# if len(data) != len(config["fields"]):
# raise ValueError(
# "Number of extracted fields does not match config"
# " (got {0}, expected {1}).".format(
# len(data), len(config["fields"])))
#
# output = {}
# for field, field_config in zip(data, config["fields"]):
# name, data = self._parse_field(field, field_config)
# output[name] = data
#
# return output
#
# Path: habitat/parser.py
# class CantExtractCallsign(Exception):
# """
# Parser submodule cannot find a callsign, though in theory might be able
# to parse the sentence if one were provided.
# """
# pass
, which may include functions, classes, or code. Output only the next line. | self.p = SimpleBinaryParser(FakeParser()) |
Using the snippet: <|code_start|>class FakeParser:
def __init__(self):
self.loadable_manager = LoadableManager(fake_sensors_config)
# A 'standard' config. Other configs can copy this and change parts.
base_config = {
"protocol": "simple_binary",
"fields": [
{
"format": "i",
"name": "sentence_id"
}, {
"format": "d",
"name": "latitude"
}, {
"format": "d",
"name": "longitude"
}, {
"format": "I",
"name": "altitude"
}
]
}
class TestSimpleBinaryParser:
def setup(self):
self.p = SimpleBinaryParser(FakeParser())
def test_pre_parse_just_raises_cantextractcallsign(self):
<|code_end|>
, determine the next line of code. You have imports:
import struct
from nose.tools import assert_raises
from copy import deepcopy
from ...loadable_manager import LoadableManager
from ...parser_modules.simple_binary_parser import SimpleBinaryParser
from ...parser import CantExtractCallsign
and context (class names, function names, or code) available:
# Path: habitat/loadable_manager.py
# class LoadableManager:
# """
# The main Loadable Manager class.
# """
#
# def __init__(self, config):
# """
# On construction, all modules listed in config["loadables"] will be
# loaded using :py:meth:`load`.
# """
#
# self.libraries = {}
#
# for loadable in config["loadables"]:
# self.load(loadable["class"], loadable["name"])
#
# def load(self, module, shorthand):
# """Loads *module* as a library and assigns it to *shorthand*."""
#
# module = dynamicloader.load(module)
# self.libraries[shorthand] = module
#
# def run(self, name, config, data):
# """
# Run the loadable specified by *name*, giving it *config* and *data*.
#
# If the loadable only takes one argument, it will only be given *data*.
# *config* is ignored in this case.
#
# Returns the result of running the loadable.
# """
#
# name_parts = name.split('.')
# library_name = '.'.join(name_parts[0:-1])
# function_name = name_parts[-1]
#
# if library_name not in self.libraries:
# raise ValueError("Invalid library name: " + library_name)
#
# library = self.libraries[library_name]
#
# if function_name not in library.__all__:
# raise ValueError("Invalid function name: " + function_name)
#
# func = getattr(library, function_name)
#
# if dynamicloader.hasnumargs(func, 1):
# return func(data)
# else:
# return func(config, data)
#
# _repr_format = "<habitat.LoadableManager: {l} libraries loaded>"
#
# def __repr__(self):
# return self._repr_format.format(l=len(self.libraries))
#
# Path: habitat/parser_modules/simple_binary_parser.py
# class SimpleBinaryParser(ParserModule):
# """The Simple Binary Parser Module"""
#
# def pre_parse(self, string):
# """
# As no callsign is provided by the protocol, assume any string we are
# given is potentially parseable binary data.
# """
# raise CantExtractCallsign()
#
# def _verify_config(self, config):
# """
# Checks that the provided *config* dict is appropriate for this parser.
# """
# if config["protocol"] != "simple_binary":
# raise ValueError("Configuration document has wrong protocol.")
# if "fields" not in config:
# raise ValueError("Config document missing required key `fields'")
# field_names = []
# for idx, field in enumerate(config["fields"]):
# if "name" not in field or "format" not in field:
# raise ValueError("Field {0} config missing name or format."
# .format(idx))
# if field["name"][0] == "_":
# raise ValueError("Field {0} name starts with an underscore."
# .format(idx))
# field_names.append(field["name"])
# if len(field_names) != len(set(field_names)):
# raise ValueError("Duplicate field name.")
#
# def _parse_field(self, field, config):
# """
# Pass off the data from unpacking the binary to the sensor given in
# the configuration for actual parsing.
# """
# name = config["name"]
# if 'sensor' not in config:
# return name, field
# sensor = 'sensors.' + config["sensor"]
# try:
# data = self.loadable_manager.run(sensor, config, field)
# except (ValueError, KeyError) as e:
# error_type = type(e)
# raise error_type("(field {f}): {e!s}".format(f=name, e=e))
#
# return name, data
#
# def parse(self, data, config):
# """
# Parse *string*, extracting processed field data.
#
# *config* is the relevant sentence dictionary from the payload's
# configuration document, containing the required binary format and field
# details.
#
# Returns a dictionary of the parsed data.
#
# ValueError is raised on invalid messages.
# """
# self._verify_config(config)
# prefix = [config["format_prefix"]] if "format_prefix" in config else []
# fmtstring = ''.join(prefix + [f["format"] for f in config["fields"]])
#
# try:
# data = struct.unpack(str(fmtstring), data)
# except struct.error as exp:
# raise ValueError("Could not unpack binary data: {0}".format(exp))
#
# if len(data) != len(config["fields"]):
# raise ValueError(
# "Number of extracted fields does not match config"
# " (got {0}, expected {1}).".format(
# len(data), len(config["fields"])))
#
# output = {}
# for field, field_config in zip(data, config["fields"]):
# name, data = self._parse_field(field, field_config)
# output[name] = data
#
# return output
#
# Path: habitat/parser.py
# class CantExtractCallsign(Exception):
# """
# Parser submodule cannot find a callsign, though in theory might be able
# to parse the sentence if one were provided.
# """
# pass
. Output only the next line. | assert_raises(CantExtractCallsign, self.p.pre_parse, "test") |
Here is a snippet: <|code_start|> synth_r_mag = np.array(tab["GAIA_PHOT_G_MEAN_MAG"])
for order,c in enumerate(coeffs):
synth_r_mag += c * color**order
synth_r_flux = np.power(10, (22.5-synth_r_mag)/2.5)
# -99 is the FLUX_R placeholder that GFA_TARGETS uses
synth_r_flux[badcolor] = -99.0
return synth_r_flux
def get_gfa_targets(tiles, gfafile, faintlim=99):
"""Returns a list of tables of GFA targets on each tile
Args:
tiles: table with columns TILEID, RA, DEC; or Tiles object
targets: table of targets with columsn RA, DEC
Returns:
list of tables (one row per input tile) with the subset of targets
that are covered by GFAs on each tile. Each table has additional
`GFA_LOC` column indicating 0-9 which GFA was covered.
Note that a given target could be covered by GFAs on more than one tile.
Output is a list of astropy Tables; inputs can be numpy structured arrays
or astropy Tables
"""
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import fitsio
import desimodel.focalplane.gfa
import astropy.units as u
from astropy.table import Table
from desitarget.gaiamatch import gaia_psflike
from ._internal import Tiles
from .utils import Logger, Timer
from astropy.coordinates import SkyCoord
and context from other files:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
, which may include functions, classes, or code. Output only the next line. | log = Logger.get() |
Given the code snippet: <|code_start|>
for order,c in enumerate(coeffs):
synth_r_mag += c * color**order
synth_r_flux = np.power(10, (22.5-synth_r_mag)/2.5)
# -99 is the FLUX_R placeholder that GFA_TARGETS uses
synth_r_flux[badcolor] = -99.0
return synth_r_flux
def get_gfa_targets(tiles, gfafile, faintlim=99):
"""Returns a list of tables of GFA targets on each tile
Args:
tiles: table with columns TILEID, RA, DEC; or Tiles object
targets: table of targets with columsn RA, DEC
Returns:
list of tables (one row per input tile) with the subset of targets
that are covered by GFAs on each tile. Each table has additional
`GFA_LOC` column indicating 0-9 which GFA was covered.
Note that a given target could be covered by GFAs on more than one tile.
Output is a list of astropy Tables; inputs can be numpy structured arrays
or astropy Tables
"""
log = Logger.get()
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import fitsio
import desimodel.focalplane.gfa
import astropy.units as u
from astropy.table import Table
from desitarget.gaiamatch import gaia_psflike
from ._internal import Tiles
from .utils import Logger, Timer
from astropy.coordinates import SkyCoord
and context (functions, classes, or occasionally code) from other files:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
. Output only the next line. | tm = Timer() |
Continue the code snippet: <|code_start|> 4 types used internally in assignment (science, standard, sky, safe).
This classification is controlled by applying bitmasks to the specified
data column. Alternatively, all targets in the file can be forced to one
type.
Args:
tgs (Targets): The targets object on which to append this data.
tgdata (Table): A table or recarray with the target properties.
survey (str): The survey type. If None, query from columns.
typeforce (int): If specified, it must equal one of the TARGET_TYPE_*
values. All targets read from the file will be assigned this type.
typecol (str): Optional column to use for bitmask matching (default
uses the result of main_cmx_or_sv from desitarget).
sciencemask (int): Bitmask for classifying targets as science.
stdmask (int): Bitmask for classifying targets as a standard.
skymask (int): Bitmask for classifying targets as sky.
suppskymask (int): Bitmask for classifying targets as suppsky.
safemask (int): Bitmask for classifying targets as a safe location.
excludemask (int): Bitmask for excluding targets.
gaia_stdmask (int): Bitmask for classifying targets as a Gaia standard.
rundate (optional, defaults to None): yyyy-mm-ddThh:mm:ss+00:00 rundate
for focalplane with UTC timezone formatting (string)
Returns:
None
Notes:
20210930 : include rundate argument, for default_main_stdmask().
"""
<|code_end|>
. Use current file imports:
import numpy as np
import fitsio
from desitarget.targetmask import desi_mask, mws_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask
from desitarget.targets import main_cmx_or_sv
from .utils import Logger, Timer
from .hardware import radec2xy, cs52xy
from ._internal import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
TARGET_TYPE_SUPPSKY,
Target, Targets, TargetsAvailable,
LocationsAvailable)
from fiberassign.utils import assert_isoformat_utc, get_date_cutoff
from datetime import datetime
from astropy.time import Time
from scipy.spatial import KDTree
and context (classes, functions, or code) from other files:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/hardware.py
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# '''
# For the tile pointed at (tilera, tiledec), project the (ra, dec)
# value into X/Y mm.
#
# Args:
# hw: Hardware object
# tile_ra (float): Tile RA
# tile_dec (float): Tile Dec
# tile_obstime (string): Tile observation time, YYYY-MM-DDTHH:MM:SS / Astropy "isot" format.
# tile_obstheta (float): Tile "fieldrot" rotation angle.
# tile_obsha (float): Tile designed Hour Angle, in degrees.
# ra (numpy array): RA to project, in degrees
# dec (numpy array): Dec to project, in degrees
# use_CS5 (bool): If True, return CS5 coordinates, else curved.
# threads=0 (int): currently unused; for backward compatibility.
#
# Returns:
# x, y: numpy arrays: the (X, Y) projected locations.
# '''
# #xy = hw.radec2xy_multi(
# # tile_ra, tile_dec, tile_obstheta, ra, dec, use_cs5, threads=0
# #)
# #x = np.array([x for x,y in xy])
# #y = np.array([y for x,y in xy])
# from astropy.time import Time
# from desimeter.fiberassign import fiberassign_radec2xy_cs5, fiberassign_radec2xy_flat
# # Note that MJD is only used for precession, so no need for
# # high precision.
# t = Time(tile_obstime, format='isot')
# mjd = t.mjd
#
# # Don't pass adc[12]: Let desimeter use its pm-alike routines
# if use_cs5:
# x, y = fiberassign_radec2xy_cs5(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# else:
# x, y = fiberassign_radec2xy_flat(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# return x,y
#
# def cs52xy(x, y):
# '''
# Converts from CS5 coordinates (mm) into curved focal-plane X,Y coordinates in mm.
#
# Args:
# cs5x (numpy array): CS5 X coord (mm)
# cs5y (numpy array): CS5 Y coord (mm)
#
# Returns:
# x (numpy array): X coord (mm)
# y (numpy array): Y coord (mm)
# '''
# # There's a change in terminology between the focal-plane team and
# # the outside world here...
# from desimeter.transform.pos2ptl import ptl2flat
# return ptl2flat(x, y)
. Output only the next line. | log = Logger.get() |
Given the following code snippet before the placeholder: <|code_start|> data column. Alternatively, all targets in the file can be forced to one
type.
Args:
tgs (Targets): The targets object on which to append this data.
tfile (str): The path to the target catalog.
survey (str): The survey type. If None, query from columns and
the FITS header.
typeforce (int): If specified, it must equal one of the TARGET_TYPE_*
values. All targets read from the file will be assigned this type.
typecol (str): Optional column to use for bitmask matching (default
uses the result of main_cmx_or_sv from desitarget).
sciencemask (int): Bitmask for classifying targets as science.
stdmask (int): Bitmask for classifying targets as a standard.
skymask (int): Bitmask for classifying targets as sky.
suppskymask (int): Bitmask for classifying targets as suppsky.
safemask (int): Bitmask for classifying targets as a safe location.
excludemask (int): Bitmask for excluding targets.
rowbuffer (int): Optional number of rows to read at once when loading
very large files.
gaia_stdmask (int): Bitmask for classifying targets as a Gaia standard.
rundate (optional, defaults to None): yyyy-mm-ddThh:mm:ss+00:00 rundate
for focalplane with UTC timezone formatting (string)
Returns:
(str): The survey type.
Notes:
20210930 : include rundate argument, for default_main_stdmask().
"""
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import fitsio
from desitarget.targetmask import desi_mask, mws_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask
from desitarget.targets import main_cmx_or_sv
from .utils import Logger, Timer
from .hardware import radec2xy, cs52xy
from ._internal import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
TARGET_TYPE_SUPPSKY,
Target, Targets, TargetsAvailable,
LocationsAvailable)
from fiberassign.utils import assert_isoformat_utc, get_date_cutoff
from datetime import datetime
from astropy.time import Time
from scipy.spatial import KDTree
and context including class names, function names, and sometimes code from other files:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/hardware.py
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# '''
# For the tile pointed at (tilera, tiledec), project the (ra, dec)
# value into X/Y mm.
#
# Args:
# hw: Hardware object
# tile_ra (float): Tile RA
# tile_dec (float): Tile Dec
# tile_obstime (string): Tile observation time, YYYY-MM-DDTHH:MM:SS / Astropy "isot" format.
# tile_obstheta (float): Tile "fieldrot" rotation angle.
# tile_obsha (float): Tile designed Hour Angle, in degrees.
# ra (numpy array): RA to project, in degrees
# dec (numpy array): Dec to project, in degrees
# use_CS5 (bool): If True, return CS5 coordinates, else curved.
# threads=0 (int): currently unused; for backward compatibility.
#
# Returns:
# x, y: numpy arrays: the (X, Y) projected locations.
# '''
# #xy = hw.radec2xy_multi(
# # tile_ra, tile_dec, tile_obstheta, ra, dec, use_cs5, threads=0
# #)
# #x = np.array([x for x,y in xy])
# #y = np.array([y for x,y in xy])
# from astropy.time import Time
# from desimeter.fiberassign import fiberassign_radec2xy_cs5, fiberassign_radec2xy_flat
# # Note that MJD is only used for precession, so no need for
# # high precision.
# t = Time(tile_obstime, format='isot')
# mjd = t.mjd
#
# # Don't pass adc[12]: Let desimeter use its pm-alike routines
# if use_cs5:
# x, y = fiberassign_radec2xy_cs5(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# else:
# x, y = fiberassign_radec2xy_flat(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# return x,y
#
# def cs52xy(x, y):
# '''
# Converts from CS5 coordinates (mm) into curved focal-plane X,Y coordinates in mm.
#
# Args:
# cs5x (numpy array): CS5 X coord (mm)
# cs5y (numpy array): CS5 Y coord (mm)
#
# Returns:
# x (numpy array): X coord (mm)
# y (numpy array): Y coord (mm)
# '''
# # There's a change in terminology between the focal-plane team and
# # the outside world here...
# from desimeter.transform.pos2ptl import ptl2flat
# return ptl2flat(x, y)
. Output only the next line. | tm = Timer() |
Given the following code snippet before the placeholder: <|code_start|> which are maps from tileid to numpy arrays.
'''
tile_targetids = {}
tile_x = {}
tile_y = {}
tile_xy_cs5 = {}
target_ids = tgs.ids()
target_ra, target_dec, target_obscond = tagalong.get_for_ids(
target_ids, ['RA', 'DEC', 'OBSCOND'])
kd = _radec2kd(target_ra, target_dec)
for (tile_id, tile_ra, tile_dec, tile_obscond, tile_ha, tile_obstheta,
tile_obstime) in zip(
tiles.id, tiles.ra, tiles.dec, tiles.obscond, tiles.obshourang,
tiles.obstheta, tiles.obstime):
print('Tile', tile_id, 'at RA,Dec', tile_ra, tile_dec, 'obscond:', tile_obscond, 'HA', tile_ha, 'obstime', tile_obstime)
inds = _kd_query_radec(kd, tile_ra, tile_dec, hw.focalplane_radius_deg)
match = np.flatnonzero(target_obscond[inds] & tile_obscond)
inds = inds[match]
del match
ras = target_ra [inds]
decs = target_dec[inds]
tids = target_ids[inds]
del inds
print('Found', len(tids), 'targets near tile and matching obscond')
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import fitsio
from desitarget.targetmask import desi_mask, mws_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask
from desitarget.targets import main_cmx_or_sv
from .utils import Logger, Timer
from .hardware import radec2xy, cs52xy
from ._internal import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
TARGET_TYPE_SUPPSKY,
Target, Targets, TargetsAvailable,
LocationsAvailable)
from fiberassign.utils import assert_isoformat_utc, get_date_cutoff
from datetime import datetime
from astropy.time import Time
from scipy.spatial import KDTree
and context including class names, function names, and sometimes code from other files:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/hardware.py
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# '''
# For the tile pointed at (tilera, tiledec), project the (ra, dec)
# value into X/Y mm.
#
# Args:
# hw: Hardware object
# tile_ra (float): Tile RA
# tile_dec (float): Tile Dec
# tile_obstime (string): Tile observation time, YYYY-MM-DDTHH:MM:SS / Astropy "isot" format.
# tile_obstheta (float): Tile "fieldrot" rotation angle.
# tile_obsha (float): Tile designed Hour Angle, in degrees.
# ra (numpy array): RA to project, in degrees
# dec (numpy array): Dec to project, in degrees
# use_CS5 (bool): If True, return CS5 coordinates, else curved.
# threads=0 (int): currently unused; for backward compatibility.
#
# Returns:
# x, y: numpy arrays: the (X, Y) projected locations.
# '''
# #xy = hw.radec2xy_multi(
# # tile_ra, tile_dec, tile_obstheta, ra, dec, use_cs5, threads=0
# #)
# #x = np.array([x for x,y in xy])
# #y = np.array([y for x,y in xy])
# from astropy.time import Time
# from desimeter.fiberassign import fiberassign_radec2xy_cs5, fiberassign_radec2xy_flat
# # Note that MJD is only used for precession, so no need for
# # high precision.
# t = Time(tile_obstime, format='isot')
# mjd = t.mjd
#
# # Don't pass adc[12]: Let desimeter use its pm-alike routines
# if use_cs5:
# x, y = fiberassign_radec2xy_cs5(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# else:
# x, y = fiberassign_radec2xy_flat(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# return x,y
#
# def cs52xy(x, y):
# '''
# Converts from CS5 coordinates (mm) into curved focal-plane X,Y coordinates in mm.
#
# Args:
# cs5x (numpy array): CS5 X coord (mm)
# cs5y (numpy array): CS5 Y coord (mm)
#
# Returns:
# x (numpy array): X coord (mm)
# y (numpy array): Y coord (mm)
# '''
# # There's a change in terminology between the focal-plane team and
# # the outside world here...
# from desimeter.transform.pos2ptl import ptl2flat
# return ptl2flat(x, y)
. Output only the next line. | x, y = radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, |
Predict the next line for this snippet: <|code_start|> tile_y = {}
tile_xy_cs5 = {}
target_ids = tgs.ids()
target_ra, target_dec, target_obscond = tagalong.get_for_ids(
target_ids, ['RA', 'DEC', 'OBSCOND'])
kd = _radec2kd(target_ra, target_dec)
for (tile_id, tile_ra, tile_dec, tile_obscond, tile_ha, tile_obstheta,
tile_obstime) in zip(
tiles.id, tiles.ra, tiles.dec, tiles.obscond, tiles.obshourang,
tiles.obstheta, tiles.obstime):
print('Tile', tile_id, 'at RA,Dec', tile_ra, tile_dec, 'obscond:', tile_obscond, 'HA', tile_ha, 'obstime', tile_obstime)
inds = _kd_query_radec(kd, tile_ra, tile_dec, hw.focalplane_radius_deg)
match = np.flatnonzero(target_obscond[inds] & tile_obscond)
inds = inds[match]
del match
ras = target_ra [inds]
decs = target_dec[inds]
tids = target_ids[inds]
del inds
print('Found', len(tids), 'targets near tile and matching obscond')
x, y = radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta,
tile_ha, ras, decs, True)
# Save CS5 mapping
tile_xy_cs5[tile_id] = dict((tid,(xi,yi)) for tid,xi,yi in zip(tids, x, y))
<|code_end|>
with the help of current file imports:
import numpy as np
import fitsio
from desitarget.targetmask import desi_mask, mws_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask
from desitarget.targets import main_cmx_or_sv
from .utils import Logger, Timer
from .hardware import radec2xy, cs52xy
from ._internal import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
TARGET_TYPE_SUPPSKY,
Target, Targets, TargetsAvailable,
LocationsAvailable)
from fiberassign.utils import assert_isoformat_utc, get_date_cutoff
from datetime import datetime
from astropy.time import Time
from scipy.spatial import KDTree
and context from other files:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/hardware.py
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# '''
# For the tile pointed at (tilera, tiledec), project the (ra, dec)
# value into X/Y mm.
#
# Args:
# hw: Hardware object
# tile_ra (float): Tile RA
# tile_dec (float): Tile Dec
# tile_obstime (string): Tile observation time, YYYY-MM-DDTHH:MM:SS / Astropy "isot" format.
# tile_obstheta (float): Tile "fieldrot" rotation angle.
# tile_obsha (float): Tile designed Hour Angle, in degrees.
# ra (numpy array): RA to project, in degrees
# dec (numpy array): Dec to project, in degrees
# use_CS5 (bool): If True, return CS5 coordinates, else curved.
# threads=0 (int): currently unused; for backward compatibility.
#
# Returns:
# x, y: numpy arrays: the (X, Y) projected locations.
# '''
# #xy = hw.radec2xy_multi(
# # tile_ra, tile_dec, tile_obstheta, ra, dec, use_cs5, threads=0
# #)
# #x = np.array([x for x,y in xy])
# #y = np.array([y for x,y in xy])
# from astropy.time import Time
# from desimeter.fiberassign import fiberassign_radec2xy_cs5, fiberassign_radec2xy_flat
# # Note that MJD is only used for precession, so no need for
# # high precision.
# t = Time(tile_obstime, format='isot')
# mjd = t.mjd
#
# # Don't pass adc[12]: Let desimeter use its pm-alike routines
# if use_cs5:
# x, y = fiberassign_radec2xy_cs5(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# else:
# x, y = fiberassign_radec2xy_flat(ra, dec, tile_ra, tile_dec, mjd,
# tile_obsha, tile_obstheta)
# return x,y
#
# def cs52xy(x, y):
# '''
# Converts from CS5 coordinates (mm) into curved focal-plane X,Y coordinates in mm.
#
# Args:
# cs5x (numpy array): CS5 X coord (mm)
# cs5y (numpy array): CS5 Y coord (mm)
#
# Returns:
# x (numpy array): X coord (mm)
# y (numpy array): Y coord (mm)
# '''
# # There's a change in terminology between the focal-plane team and
# # the outside world here...
# from desimeter.transform.pos2ptl import ptl2flat
# return ptl2flat(x, y)
, which may contain function names, class names, or code. Output only the next line. | x, y = cs52xy(x, y) |
Based on the snippet: <|code_start|> cls.binDir = os.path.join(cls.topDir, "bin")
if not os.path.isdir(cls.binDir):
# We are running from some other directory from an installed package
cls.topDir = os.path.dirname( # top-level
os.path.dirname( # lib/
os.path.dirname( # python3.x/
os.path.dirname( # site-packages/
os.path.dirname( # egg/
os.path.dirname( # fiberassign/
os.path.dirname(os.path.abspath(__file__)) # test/
)
)
)
)
)
)
cls.binDir = os.path.join(cls.topDir, "bin")
def setUp(self):
self.density_science = 5000
self.density_standards = 5000
self.density_sky = 10
self.density_suppsky = 5000
pass
def tearDown(self):
pass
def test_science(self):
set_matplotlib_pdf_backend()
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import subprocess
import re
import shutil
import unittest
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable,
LocationsAvailable, load_target_file, targets_in_tiles, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
and context (classes, functions, sometimes code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | test_dir = test_subdir_create("qa_test_science") |
Given the code snippet: <|code_start|> set_matplotlib_pdf_backend()
test_dir = test_subdir_create("qa_test_science")
log_file = os.path.join(test_dir, "log.txt")
np.random.seed(123456789)
input_mtl = os.path.join(test_dir, "mtl.fits")
# For this test, we will use just 2 science target classes, in order to verify
# we get approximately the correct distribution
sdist = [
(3000, 1, 0.25, "QSO"),
(2000, 1, 0.75, "ELG")
]
nscience = sim_targets(
input_mtl,
TARGET_TYPE_SCIENCE,
0,
density=self.density_science,
science_frac=sdist
)
log_msg = "Simulated {} science targets\n".format(nscience)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
# Read hardware properties
fp, exclude, state = sim_focalplane(rundate=test_assign_date)
hw = load_hardware(focalplane=(fp, exclude, state))
tfile = os.path.join(test_dir, "footprint.fits")
<|code_end|>
, generate the next line using the imports in this file:
import os
import subprocess
import re
import shutil
import unittest
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable,
LocationsAvailable, load_target_file, targets_in_tiles, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
and context (functions, classes, or occasionally code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | sim_tiles(tfile) |
Given snippet: <|code_start|> )
)
)
)
)
cls.binDir = os.path.join(cls.topDir, "bin")
def setUp(self):
self.density_science = 5000
self.density_standards = 5000
self.density_sky = 10
self.density_suppsky = 5000
pass
def tearDown(self):
pass
def test_science(self):
set_matplotlib_pdf_backend()
test_dir = test_subdir_create("qa_test_science")
log_file = os.path.join(test_dir, "log.txt")
np.random.seed(123456789)
input_mtl = os.path.join(test_dir, "mtl.fits")
# For this test, we will use just 2 science target classes, in order to verify
# we get approximately the correct distribution
sdist = [
(3000, 1, 0.25, "QSO"),
(2000, 1, 0.75, "ELG")
]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import subprocess
import re
import shutil
import unittest
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable,
LocationsAvailable, load_target_file, targets_in_tiles, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
and context:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
which might include code, classes, or functions. Output only the next line. | nscience = sim_targets( |
Predict the next line after this snippet: <|code_start|> pass
def test_science(self):
set_matplotlib_pdf_backend()
test_dir = test_subdir_create("qa_test_science")
log_file = os.path.join(test_dir, "log.txt")
np.random.seed(123456789)
input_mtl = os.path.join(test_dir, "mtl.fits")
# For this test, we will use just 2 science target classes, in order to verify
# we get approximately the correct distribution
sdist = [
(3000, 1, 0.25, "QSO"),
(2000, 1, 0.75, "ELG")
]
nscience = sim_targets(
input_mtl,
TARGET_TYPE_SCIENCE,
0,
density=self.density_science,
science_frac=sdist
)
log_msg = "Simulated {} science targets\n".format(nscience)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
# Read hardware properties
<|code_end|>
using the current file's imports:
import os
import subprocess
import re
import shutil
import unittest
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable,
LocationsAvailable, load_target_file, targets_in_tiles, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
and any relevant context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | fp, exclude, state = sim_focalplane(rundate=test_assign_date) |
Using the snippet: <|code_start|> pass
def test_science(self):
set_matplotlib_pdf_backend()
test_dir = test_subdir_create("qa_test_science")
log_file = os.path.join(test_dir, "log.txt")
np.random.seed(123456789)
input_mtl = os.path.join(test_dir, "mtl.fits")
# For this test, we will use just 2 science target classes, in order to verify
# we get approximately the correct distribution
sdist = [
(3000, 1, 0.25, "QSO"),
(2000, 1, 0.75, "ELG")
]
nscience = sim_targets(
input_mtl,
TARGET_TYPE_SCIENCE,
0,
density=self.density_science,
science_frac=sdist
)
log_msg = "Simulated {} science targets\n".format(nscience)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
# Read hardware properties
<|code_end|>
, determine the next line of code. You have imports:
import os
import subprocess
import re
import shutil
import unittest
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable,
LocationsAvailable, load_target_file, targets_in_tiles, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
and context (class names, function names, or code) available:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | fp, exclude, state = sim_focalplane(rundate=test_assign_date) |
Given the following code snippet before the placeholder: <|code_start|> # we use curved focal surface internally.
tg_x,tg_y = radec2xy(
hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
tg_ra, tg_dec, True)
del tg_ra, tg_dec
# tm.stop()
# tm.report(" extract target props for {}".format(tile_id))
# tm.clear()
# tm.start()
# Write a header-only HDU 0 with some basic keywords
header = dict()
header["TILEID"] = tile_id
header["TILERA"] = tile_ra
header["TILEDEC"] = tile_dec
header["FIELDROT"] = tile_obstheta
header["FA_PLAN"] = tile_obstime
header["FA_HA"] = tile_obsha
header["FA_RUN"] = hw.time()
margins = hw.added_margins
keys = list(margins.keys())
keys.sort()
for k in keys:
header["FA_M_%s" % k[:3]] = margins[k]
header["REQRA"] = tile_ra
header["REQDEC"] = tile_dec
header["FIELDNUM"] = 0
<|code_end|>
, predict the next line using imports from the current file:
import os
import re
import numpy as np
import multiprocessing as mp
import fitsio
import desimodel.focalplane
from astropy import units
from astropy.coordinates import SkyCoord
from multiprocessing.sharedctypes import RawArray
from functools import partial
from collections import OrderedDict
from types import SimpleNamespace
from desiutil.depend import add_dependencies, setdep
from desiutil.dust import SFDMap
from desitarget.targetmask import desi_mask
from ._version import __version__
from .utils import Logger, Timer, default_mp_proc, GlobalTimers
from .targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY, TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE, desi_target_type,
default_target_masks, default_survey_target_masks)
from .hardware import (FIBER_STATE_UNASSIGNED, FIBER_STATE_STUCK,
FIBER_STATE_BROKEN, FIBER_STATE_RESTRICT,
radec2xy, xy2radec, xy2cs5)
from ._internal import Assignment
and context including class names, function names, and sometimes code from other files:
# Path: py/fiberassign/_version.py
#
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/targets.py
# class TargetTagalong(object):
# def __init__(self, columns, outnames={}, aliases={}):
# def get_default(self, column):
# def get_output_name(self, column):
# def add_data(self, targetids, tabledata, fake={}):
# def set_data(self, targetids, tabledata):
# def get_for_ids(self, targetids, names):
# def create_tagalong(plate_radec=True):
# def str_to_target_type(input):
# def default_main_sciencemask():
# def default_main_stdmask(rundate=None):
# def default_main_gaia_stdmask():
# def default_main_skymask():
# def default_main_suppskymask():
# def default_main_safemask():
# def default_main_excludemask():
# def default_sv3_sciencemask():
# def default_sv3_stdmask():
# def default_sv3_skymask():
# def default_sv3_suppskymask():
# def default_sv3_safemask():
# def default_sv3_excludemask():
# def default_sv2_sciencemask():
# def default_sv2_stdmask():
# def default_sv2_skymask():
# def default_sv2_suppskymask():
# def default_sv2_safemask():
# def default_sv2_excludemask():
# def default_sv1_sciencemask():
# def default_sv1_stdmask():
# def default_sv1_skymask():
# def default_sv1_suppskymask():
# def default_sv1_safemask():
# def default_sv1_excludemask():
# def default_cmx_sciencemask():
# def default_cmx_stdmask():
# def default_cmx_skymask():
# def default_cmx_suppskymask():
# def default_cmx_safemask():
# def default_cmx_excludemask():
# def desi_target_type(desi_target, mws_target, sciencemask, stdmask,
# skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def default_survey_target_masks(survey, rundate=None):
# def default_target_masks(data, rundate=None):
# def append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol,
# sciencemask,
# stdmask, skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def load_target_table(tgs, tagalong, tgdata, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None, gaia_stdmask=None,
# rundate=None):
# def load_target_file(tgs, tagalong, tfile, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None,
# rowbuffer=1000000, gaia_stdmask=None, rundate=None):
# def targets_in_tiles(hw, tgs, tiles, tagalong):
# def _radec2kd(ra, dec):
# def _radec2xyz(ra, dec):
# def _kd_query_radec(kd, ra, dec, radius_deg):
#
# Path: py/fiberassign/hardware.py
# def expand_closed_curve(xx, yy, margin):
# def load_hardware(focalplane=None, rundate=None,
# add_margins={}):
# def get_exclusions(exclname):
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# def xy2radec(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# x, y, use_cs5, threads=0):
# def xy2cs5(x, y):
# def cs52xy(x, y):
# N = len(xx)
. Output only the next line. | header["FA_VER"] = __version__ |
Predict the next line after this snippet: <|code_start|> if split:
tilegroup = tile_id // 1000
tiledir = os.path.join(dir, "{:02d}".format(tilegroup))
if create:
os.makedirs(tiledir, exist_ok=True)
path = os.path.join(tiledir,
"{}{:06d}.{}".format(prefix, tile_id, ext))
return path
def write_assignment_fits_tile(asgn, tagalong, fulltarget, overwrite, params):
"""Write a single tile assignment to a FITS file.
Args:
outroot (str): full path of the output root file name.
asgn (Assignment): the assignment class instance.
fulltarget (bool): if True, dump the target information for all
available targets, not just the ones that are assigned.
overwrite (bool): overwrite output files or not
params (tuple): tuple containing the tile ID, RA, DEC, rotation,
output path, and GFA targets
Returns:
None
"""
tm = Timer()
tm.start()
tile_id, tile_ra, tile_dec, tile_obstheta, tile_obstime, tile_obsha, \
tile_file, gfa_targets, stuck_sky_tile, tile_xy_cs5 = params
<|code_end|>
using the current file's imports:
import os
import re
import numpy as np
import multiprocessing as mp
import fitsio
import desimodel.focalplane
from astropy import units
from astropy.coordinates import SkyCoord
from multiprocessing.sharedctypes import RawArray
from functools import partial
from collections import OrderedDict
from types import SimpleNamespace
from desiutil.depend import add_dependencies, setdep
from desiutil.dust import SFDMap
from desitarget.targetmask import desi_mask
from ._version import __version__
from .utils import Logger, Timer, default_mp_proc, GlobalTimers
from .targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY, TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE, desi_target_type,
default_target_masks, default_survey_target_masks)
from .hardware import (FIBER_STATE_UNASSIGNED, FIBER_STATE_STUCK,
FIBER_STATE_BROKEN, FIBER_STATE_RESTRICT,
radec2xy, xy2radec, xy2cs5)
from ._internal import Assignment
and any relevant context from other files:
# Path: py/fiberassign/_version.py
#
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/targets.py
# class TargetTagalong(object):
# def __init__(self, columns, outnames={}, aliases={}):
# def get_default(self, column):
# def get_output_name(self, column):
# def add_data(self, targetids, tabledata, fake={}):
# def set_data(self, targetids, tabledata):
# def get_for_ids(self, targetids, names):
# def create_tagalong(plate_radec=True):
# def str_to_target_type(input):
# def default_main_sciencemask():
# def default_main_stdmask(rundate=None):
# def default_main_gaia_stdmask():
# def default_main_skymask():
# def default_main_suppskymask():
# def default_main_safemask():
# def default_main_excludemask():
# def default_sv3_sciencemask():
# def default_sv3_stdmask():
# def default_sv3_skymask():
# def default_sv3_suppskymask():
# def default_sv3_safemask():
# def default_sv3_excludemask():
# def default_sv2_sciencemask():
# def default_sv2_stdmask():
# def default_sv2_skymask():
# def default_sv2_suppskymask():
# def default_sv2_safemask():
# def default_sv2_excludemask():
# def default_sv1_sciencemask():
# def default_sv1_stdmask():
# def default_sv1_skymask():
# def default_sv1_suppskymask():
# def default_sv1_safemask():
# def default_sv1_excludemask():
# def default_cmx_sciencemask():
# def default_cmx_stdmask():
# def default_cmx_skymask():
# def default_cmx_suppskymask():
# def default_cmx_safemask():
# def default_cmx_excludemask():
# def desi_target_type(desi_target, mws_target, sciencemask, stdmask,
# skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def default_survey_target_masks(survey, rundate=None):
# def default_target_masks(data, rundate=None):
# def append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol,
# sciencemask,
# stdmask, skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def load_target_table(tgs, tagalong, tgdata, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None, gaia_stdmask=None,
# rundate=None):
# def load_target_file(tgs, tagalong, tfile, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None,
# rowbuffer=1000000, gaia_stdmask=None, rundate=None):
# def targets_in_tiles(hw, tgs, tiles, tagalong):
# def _radec2kd(ra, dec):
# def _radec2xyz(ra, dec):
# def _kd_query_radec(kd, ra, dec, radius_deg):
#
# Path: py/fiberassign/hardware.py
# def expand_closed_curve(xx, yy, margin):
# def load_hardware(focalplane=None, rundate=None,
# add_margins={}):
# def get_exclusions(exclname):
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# def xy2radec(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# x, y, use_cs5, threads=0):
# def xy2cs5(x, y):
# def cs52xy(x, y):
# N = len(xx)
. Output only the next line. | log = Logger.get() |
Given the code snippet: <|code_start|>
def result_path(tile_id, dir=".", prefix="fba-",
ext="fits", create=False, split=False):
tiledir = dir
if split:
tilegroup = tile_id // 1000
tiledir = os.path.join(dir, "{:02d}".format(tilegroup))
if create:
os.makedirs(tiledir, exist_ok=True)
path = os.path.join(tiledir,
"{}{:06d}.{}".format(prefix, tile_id, ext))
return path
def write_assignment_fits_tile(asgn, tagalong, fulltarget, overwrite, params):
"""Write a single tile assignment to a FITS file.
Args:
outroot (str): full path of the output root file name.
asgn (Assignment): the assignment class instance.
fulltarget (bool): if True, dump the target information for all
available targets, not just the ones that are assigned.
overwrite (bool): overwrite output files or not
params (tuple): tuple containing the tile ID, RA, DEC, rotation,
output path, and GFA targets
Returns:
None
"""
<|code_end|>
, generate the next line using the imports in this file:
import os
import re
import numpy as np
import multiprocessing as mp
import fitsio
import desimodel.focalplane
from astropy import units
from astropy.coordinates import SkyCoord
from multiprocessing.sharedctypes import RawArray
from functools import partial
from collections import OrderedDict
from types import SimpleNamespace
from desiutil.depend import add_dependencies, setdep
from desiutil.dust import SFDMap
from desitarget.targetmask import desi_mask
from ._version import __version__
from .utils import Logger, Timer, default_mp_proc, GlobalTimers
from .targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY, TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE, desi_target_type,
default_target_masks, default_survey_target_masks)
from .hardware import (FIBER_STATE_UNASSIGNED, FIBER_STATE_STUCK,
FIBER_STATE_BROKEN, FIBER_STATE_RESTRICT,
radec2xy, xy2radec, xy2cs5)
from ._internal import Assignment
and context (functions, classes, or occasionally code) from other files:
# Path: py/fiberassign/_version.py
#
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/targets.py
# class TargetTagalong(object):
# def __init__(self, columns, outnames={}, aliases={}):
# def get_default(self, column):
# def get_output_name(self, column):
# def add_data(self, targetids, tabledata, fake={}):
# def set_data(self, targetids, tabledata):
# def get_for_ids(self, targetids, names):
# def create_tagalong(plate_radec=True):
# def str_to_target_type(input):
# def default_main_sciencemask():
# def default_main_stdmask(rundate=None):
# def default_main_gaia_stdmask():
# def default_main_skymask():
# def default_main_suppskymask():
# def default_main_safemask():
# def default_main_excludemask():
# def default_sv3_sciencemask():
# def default_sv3_stdmask():
# def default_sv3_skymask():
# def default_sv3_suppskymask():
# def default_sv3_safemask():
# def default_sv3_excludemask():
# def default_sv2_sciencemask():
# def default_sv2_stdmask():
# def default_sv2_skymask():
# def default_sv2_suppskymask():
# def default_sv2_safemask():
# def default_sv2_excludemask():
# def default_sv1_sciencemask():
# def default_sv1_stdmask():
# def default_sv1_skymask():
# def default_sv1_suppskymask():
# def default_sv1_safemask():
# def default_sv1_excludemask():
# def default_cmx_sciencemask():
# def default_cmx_stdmask():
# def default_cmx_skymask():
# def default_cmx_suppskymask():
# def default_cmx_safemask():
# def default_cmx_excludemask():
# def desi_target_type(desi_target, mws_target, sciencemask, stdmask,
# skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def default_survey_target_masks(survey, rundate=None):
# def default_target_masks(data, rundate=None):
# def append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol,
# sciencemask,
# stdmask, skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def load_target_table(tgs, tagalong, tgdata, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None, gaia_stdmask=None,
# rundate=None):
# def load_target_file(tgs, tagalong, tfile, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None,
# rowbuffer=1000000, gaia_stdmask=None, rundate=None):
# def targets_in_tiles(hw, tgs, tiles, tagalong):
# def _radec2kd(ra, dec):
# def _radec2xyz(ra, dec):
# def _kd_query_radec(kd, ra, dec, radius_deg):
#
# Path: py/fiberassign/hardware.py
# def expand_closed_curve(xx, yy, margin):
# def load_hardware(focalplane=None, rundate=None,
# add_margins={}):
# def get_exclusions(exclname):
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# def xy2radec(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# x, y, use_cs5, threads=0):
# def xy2cs5(x, y):
# def cs52xy(x, y):
# N = len(xx)
. Output only the next line. | tm = Timer() |
Given snippet: <|code_start|> sky_per_petal=40,
sky_per_slitblock=0,
start_tile=-1,
stop_tile=-1,
redistribute=True,
use_zero_obsremain=True
):
"""Run fiber assignment.
Given an already-constructed Assignment class instance, run the assignment in a
standard way.
This is designed to be the main "driver" function used by higher-level code or
commandline tools. The purpose of this function is to ensure that all of those
tools are assigning targets in the same way.
Args:
asgn (Assignment): The assignment class
std_per_petal (int): The number of standards to assign per petal
sky_per_petal (int): The number of sky to assign per petal
sky_per_slitblock (int): The number of sky to assign per slitblock
start_tile (int): If specified, the first tile ID to assign.
stop_tile (int): If specified, the last tile ID to assign.
redistribute (bool): If True, attempt to shift science targets to unassigned
fibers on later tiles in order to balance the number per petal.
Returns:
None
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import re
import numpy as np
import multiprocessing as mp
import fitsio
import desimodel.focalplane
from astropy import units
from astropy.coordinates import SkyCoord
from multiprocessing.sharedctypes import RawArray
from functools import partial
from collections import OrderedDict
from types import SimpleNamespace
from desiutil.depend import add_dependencies, setdep
from desiutil.dust import SFDMap
from desitarget.targetmask import desi_mask
from ._version import __version__
from .utils import Logger, Timer, default_mp_proc, GlobalTimers
from .targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY, TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE, desi_target_type,
default_target_masks, default_survey_target_masks)
from .hardware import (FIBER_STATE_UNASSIGNED, FIBER_STATE_STUCK,
FIBER_STATE_BROKEN, FIBER_STATE_RESTRICT,
radec2xy, xy2radec, xy2cs5)
from ._internal import Assignment
and context:
# Path: py/fiberassign/_version.py
#
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/targets.py
# class TargetTagalong(object):
# def __init__(self, columns, outnames={}, aliases={}):
# def get_default(self, column):
# def get_output_name(self, column):
# def add_data(self, targetids, tabledata, fake={}):
# def set_data(self, targetids, tabledata):
# def get_for_ids(self, targetids, names):
# def create_tagalong(plate_radec=True):
# def str_to_target_type(input):
# def default_main_sciencemask():
# def default_main_stdmask(rundate=None):
# def default_main_gaia_stdmask():
# def default_main_skymask():
# def default_main_suppskymask():
# def default_main_safemask():
# def default_main_excludemask():
# def default_sv3_sciencemask():
# def default_sv3_stdmask():
# def default_sv3_skymask():
# def default_sv3_suppskymask():
# def default_sv3_safemask():
# def default_sv3_excludemask():
# def default_sv2_sciencemask():
# def default_sv2_stdmask():
# def default_sv2_skymask():
# def default_sv2_suppskymask():
# def default_sv2_safemask():
# def default_sv2_excludemask():
# def default_sv1_sciencemask():
# def default_sv1_stdmask():
# def default_sv1_skymask():
# def default_sv1_suppskymask():
# def default_sv1_safemask():
# def default_sv1_excludemask():
# def default_cmx_sciencemask():
# def default_cmx_stdmask():
# def default_cmx_skymask():
# def default_cmx_suppskymask():
# def default_cmx_safemask():
# def default_cmx_excludemask():
# def desi_target_type(desi_target, mws_target, sciencemask, stdmask,
# skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def default_survey_target_masks(survey, rundate=None):
# def default_target_masks(data, rundate=None):
# def append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol,
# sciencemask,
# stdmask, skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def load_target_table(tgs, tagalong, tgdata, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None, gaia_stdmask=None,
# rundate=None):
# def load_target_file(tgs, tagalong, tfile, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None,
# rowbuffer=1000000, gaia_stdmask=None, rundate=None):
# def targets_in_tiles(hw, tgs, tiles, tagalong):
# def _radec2kd(ra, dec):
# def _radec2xyz(ra, dec):
# def _kd_query_radec(kd, ra, dec, radius_deg):
#
# Path: py/fiberassign/hardware.py
# def expand_closed_curve(xx, yy, margin):
# def load_hardware(focalplane=None, rundate=None,
# add_margins={}):
# def get_exclusions(exclname):
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# def xy2radec(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# x, y, use_cs5, threads=0):
# def xy2cs5(x, y):
# def cs52xy(x, y):
# N = len(xx)
which might include code, classes, or functions. Output only the next line. | gt = GlobalTimers.get() |
Given snippet: <|code_start|> """
gt = GlobalTimers.get()
log = Logger.get()
def print_counts(when=None):
counts = asgn.get_counts(start_tile, stop_tile)
tiles = list(counts.keys())
tiles.sort()
for tile in tiles:
msg = 'Tile %i: ' % tile
if when is not None:
msg += when
tilecounts = counts[tile]
keys = [('SCIENCE',True), ('SCIENCE not STANDARD',False), ('STANDARD',True),
('SKY',True), ('SUPPSKY',False), ('SAFE',False)]
ss = []
for k,always in keys:
n = tilecounts.get(k, None)
if n is None:
log.warning('Key', k, 'missing from Assignment.get_counts return value')
else:
if n>0 or always:
ss.append('%s: %i' % (k,n))
log.info(msg + ', '.join(ss))
print_counts('Start: ')
# First-pass assignment of science targets
gt.start("Assign unused fibers to science targets")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import re
import numpy as np
import multiprocessing as mp
import fitsio
import desimodel.focalplane
from astropy import units
from astropy.coordinates import SkyCoord
from multiprocessing.sharedctypes import RawArray
from functools import partial
from collections import OrderedDict
from types import SimpleNamespace
from desiutil.depend import add_dependencies, setdep
from desiutil.dust import SFDMap
from desitarget.targetmask import desi_mask
from ._version import __version__
from .utils import Logger, Timer, default_mp_proc, GlobalTimers
from .targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY, TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE, desi_target_type,
default_target_masks, default_survey_target_masks)
from .hardware import (FIBER_STATE_UNASSIGNED, FIBER_STATE_STUCK,
FIBER_STATE_BROKEN, FIBER_STATE_RESTRICT,
radec2xy, xy2radec, xy2cs5)
from ._internal import Assignment
and context:
# Path: py/fiberassign/_version.py
#
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
#
# Path: py/fiberassign/targets.py
# class TargetTagalong(object):
# def __init__(self, columns, outnames={}, aliases={}):
# def get_default(self, column):
# def get_output_name(self, column):
# def add_data(self, targetids, tabledata, fake={}):
# def set_data(self, targetids, tabledata):
# def get_for_ids(self, targetids, names):
# def create_tagalong(plate_radec=True):
# def str_to_target_type(input):
# def default_main_sciencemask():
# def default_main_stdmask(rundate=None):
# def default_main_gaia_stdmask():
# def default_main_skymask():
# def default_main_suppskymask():
# def default_main_safemask():
# def default_main_excludemask():
# def default_sv3_sciencemask():
# def default_sv3_stdmask():
# def default_sv3_skymask():
# def default_sv3_suppskymask():
# def default_sv3_safemask():
# def default_sv3_excludemask():
# def default_sv2_sciencemask():
# def default_sv2_stdmask():
# def default_sv2_skymask():
# def default_sv2_suppskymask():
# def default_sv2_safemask():
# def default_sv2_excludemask():
# def default_sv1_sciencemask():
# def default_sv1_stdmask():
# def default_sv1_skymask():
# def default_sv1_suppskymask():
# def default_sv1_safemask():
# def default_sv1_excludemask():
# def default_cmx_sciencemask():
# def default_cmx_stdmask():
# def default_cmx_skymask():
# def default_cmx_suppskymask():
# def default_cmx_safemask():
# def default_cmx_excludemask():
# def desi_target_type(desi_target, mws_target, sciencemask, stdmask,
# skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def default_survey_target_masks(survey, rundate=None):
# def default_target_masks(data, rundate=None):
# def append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol,
# sciencemask,
# stdmask, skymask, suppskymask, safemask, excludemask, gaia_stdmask):
# def load_target_table(tgs, tagalong, tgdata, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None, gaia_stdmask=None,
# rundate=None):
# def load_target_file(tgs, tagalong, tfile, survey=None, typeforce=None, typecol=None,
# sciencemask=None, stdmask=None, skymask=None,
# suppskymask=None, safemask=None, excludemask=None,
# rowbuffer=1000000, gaia_stdmask=None, rundate=None):
# def targets_in_tiles(hw, tgs, tiles, tagalong):
# def _radec2kd(ra, dec):
# def _radec2xyz(ra, dec):
# def _kd_query_radec(kd, ra, dec, radius_deg):
#
# Path: py/fiberassign/hardware.py
# def expand_closed_curve(xx, yy, margin):
# def load_hardware(focalplane=None, rundate=None,
# add_margins={}):
# def get_exclusions(exclname):
# def radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# ra, dec, use_cs5, threads=0):
# def xy2radec(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
# x, y, use_cs5, threads=0):
# def xy2cs5(x, y):
# def cs52xy(x, y):
# N = len(xx)
which might include code, classes, or functions. Output only the next line. | asgn.assign_unused(TARGET_TYPE_SCIENCE, -1, -1, "POS", start_tile, stop_tile) |
Predict the next line for this snippet: <|code_start|>"""
Test fiberassign target operations.
"""
class TestHardware(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_read(self):
<|code_end|>
with the help of current file imports:
import unittest
import numpy as np
import desimodel.io as dmio
from datetime import datetime
from fiberassign.utils import Timer
from fiberassign.hardware import load_hardware
from .simulate import test_assign_date
and context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
, which may contain function names, class names, or code. Output only the next line. | hw = load_hardware(rundate=test_assign_date) |
Using the snippet: <|code_start|>
class TestAssign(unittest.TestCase):
def setUp(self):
self.saved_skybricks = os.environ.get('STUCKSKY_DIR')
if self.saved_skybricks is not None:
del os.environ['SKYBRICKS_DIR']
self.density_science = 5000
self.density_standards = 5000
self.density_sky = 100
self.density_suppsky = 5000
pass
def tearDown(self):
if self.saved_skybricks is not None:
os.environ['STUCKSKY_DIR'] = self.saved_skybricks
def test_io(self):
np.random.seed(123456789)
<|code_end|>
, determine the next line of code. You have imports:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and context (class names, function names, or code) available:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | test_dir = test_subdir_create("assign_test_io") |
Predict the next line after this snippet: <|code_start|> )
tgoff += nstd
nsky = sim_targets(
input_sky,
TARGET_TYPE_SKY,
tgoff,
density=self.density_sky
)
tgoff += nsky
nsuppsky = sim_targets(
input_suppsky,
TARGET_TYPE_SUPPSKY,
tgoff,
density=self.density_suppsky
)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
load_target_file(tgs, tagalong, input_std)
load_target_file(tgs, tagalong, input_sky)
load_target_file(tgs, tagalong, input_suppsky)
# Compute the targets available to each fiber for each tile.
fp, exclude, state = sim_focalplane(rundate=test_assign_date)
hw = load_hardware(
focalplane=(fp, exclude, state),
rundate=test_assign_date
)
tfile = os.path.join(test_dir, "footprint.fits")
<|code_end|>
using the current file's imports:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and any relevant context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | sim_tiles(tfile) |
Given the code snippet: <|code_start|>
class TestAssign(unittest.TestCase):
def setUp(self):
self.saved_skybricks = os.environ.get('STUCKSKY_DIR')
if self.saved_skybricks is not None:
del os.environ['SKYBRICKS_DIR']
self.density_science = 5000
self.density_standards = 5000
self.density_sky = 100
self.density_suppsky = 5000
pass
def tearDown(self):
if self.saved_skybricks is not None:
os.environ['STUCKSKY_DIR'] = self.saved_skybricks
def test_io(self):
np.random.seed(123456789)
test_dir = test_subdir_create("assign_test_io")
input_mtl = os.path.join(test_dir, "mtl.fits")
input_std = os.path.join(test_dir, "standards.fits")
input_sky = os.path.join(test_dir, "sky.fits")
input_suppsky = os.path.join(test_dir, "suppsky.fits")
tgoff = 0
<|code_end|>
, generate the next line using the imports in this file:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and context (functions, classes, or occasionally code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | nscience = sim_targets( |
Continue the code snippet: <|code_start|> tgoff += nscience
nstd = sim_targets(
input_std,
TARGET_TYPE_STANDARD,
tgoff,
density=self.density_standards
)
tgoff += nstd
nsky = sim_targets(
input_sky,
TARGET_TYPE_SKY,
tgoff,
density=self.density_sky
)
tgoff += nsky
nsuppsky = sim_targets(
input_suppsky,
TARGET_TYPE_SUPPSKY,
tgoff,
density=self.density_suppsky
)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
load_target_file(tgs, tagalong, input_std)
load_target_file(tgs, tagalong, input_sky)
load_target_file(tgs, tagalong, input_suppsky)
# Compute the targets available to each fiber for each tile.
<|code_end|>
. Use current file imports:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and context (classes, functions, or code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | fp, exclude, state = sim_focalplane(rundate=test_assign_date) |
Given the following code snippet before the placeholder: <|code_start|> tgoff,
density=self.density_science
)
tgoff += nscience
nstd = sim_targets(
input_std,
TARGET_TYPE_STANDARD,
tgoff,
density=self.density_standards
)
tgoff += nstd
nsky = sim_targets(
input_sky,
TARGET_TYPE_SKY,
tgoff,
density=self.density_sky
)
tgoff += nsky
nsuppsky = sim_targets(
input_suppsky,
TARGET_TYPE_SUPPSKY,
tgoff,
density=self.density_suppsky
)
# Simulate the tiles
tfile = os.path.join(test_dir, "footprint.fits")
sim_tiles(tfile)
# petal mapping
<|code_end|>
, predict the next line using imports from the current file:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and context including class names, function names, and sometimes code from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | rotator = petal_rotation(1, reverse=False) |
Given snippet: <|code_start|> tgoff += nscience
nstd = sim_targets(
input_std,
TARGET_TYPE_STANDARD,
tgoff,
density=self.density_standards
)
tgoff += nstd
nsky = sim_targets(
input_sky,
TARGET_TYPE_SKY,
tgoff,
density=self.density_sky
)
tgoff += nsky
nsuppsky = sim_targets(
input_suppsky,
TARGET_TYPE_SUPPSKY,
tgoff,
density=self.density_suppsky
)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
load_target_file(tgs, tagalong, input_std)
load_target_file(tgs, tagalong, input_sky)
load_target_file(tgs, tagalong, input_suppsky)
# Compute the targets available to each fiber for each tile.
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and context:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
which might include code, classes, or functions. Output only the next line. | fp, exclude, state = sim_focalplane(rundate=test_assign_date) |
Predict the next line after this snippet: <|code_start|> tgoff += nstd
nsky = sim_targets(
input_sky,
TARGET_TYPE_SKY,
tgoff,
density=self.density_sky
)
tgoff += nsky
nsuppsky = sim_targets(
input_suppsky,
TARGET_TYPE_SUPPSKY,
tgoff,
density=self.density_suppsky
)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
load_target_file(tgs, tagalong, input_std)
load_target_file(tgs, tagalong, input_sky)
load_target_file(tgs, tagalong, input_suppsky)
# Read hardware properties
fp, exclude, state = sim_focalplane(rundate=test_assign_date)
hw = load_hardware(focalplane=(fp, exclude, state), rundate=test_assign_date)
tfile = os.path.join(test_dir, "footprint.fits")
sim_tiles(tfile)
tiles = load_tiles(tiles_file=tfile)
if do_stucksky:
<|code_end|>
using the current file's imports:
import os
import shutil
import unittest
import glob
import json
import numpy as np
import fitsio
import desimodel
from datetime import datetime
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, targets_in_tiles,
LocationsAvailable, load_target_file, create_tagalong)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile, run)
from fiberassign.stucksky import stuck_on_sky
from fiberassign.qa import qa_tiles
from fiberassign.vis import plot_tiles, plot_qa
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date,
sim_stuck_sky)
and any relevant context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | sim_stuck_sky(test_dir, hw, tiles) |
Based on the snippet: <|code_start|>"""
Test fiberassign tile operations.
"""
class TestTiles(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_read(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import unittest
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from .simulate import (
test_subdir_create,
sim_tiles,
test_assign_date
)
from astropy.table import Table
from astropy.time import Time
and context (classes, functions, sometimes code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | test_dir = test_subdir_create("tiles_test_read") |
Here is a snippet: <|code_start|>"""
Test fiberassign tile operations.
"""
class TestTiles(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_read(self):
test_dir = test_subdir_create("tiles_test_read")
print('test_dir', test_dir)
hw = load_hardware()
tfile = os.path.join(test_dir, "footprint.fits")
sfile = os.path.join(test_dir, "footprint_keep.txt")
<|code_end|>
. Write the next line using the current file imports:
import os
import unittest
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from .simulate import (
test_subdir_create,
sim_tiles,
test_assign_date
)
from astropy.table import Table
from astropy.time import Time
and context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
, which may include functions, classes, or code. Output only the next line. | sim_tiles(tfile, selectfile=sfile) |
Based on the snippet: <|code_start|> args = parser.parse_args()
else:
args = parser.parse_args(optlist)
# Check directory
if not os.path.isfile(args.qafile):
raise RuntimeError("Input file {} does not exist".format(args.qafile))
if args.outroot is None:
args.outroot = os.path.splitext(args.qafile)[0]
return args
def run_plot_qa(args):
"""Run QA plotting.
This uses the previously parsed options to read input data and make a
plot of the QA results.
Args:
args (namespace): The parsed arguments.
Returns:
None
"""
qadata = None
with open(args.qafile, "r") as f:
qadata = json.load(f)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import argparse
import json
from ..vis import plot_qa
and context (classes, functions, sometimes code) from other files:
# Path: py/fiberassign/vis.py
# def plot_qa(data, outroot, outformat="pdf", labels=False):
# """Make plots of QA data.
# """
# set_matplotlib_pdf_backend()
# # Imported here, to ensure that the backend has been set.
# from matplotlib.patches import Patch
#
# hw = load_hardware()
# tile_radius = hw.focalplane_radius_deg
#
# fontpt = 1
# linewidth = 0.1
#
# fig = plt.figure(figsize=(12, 10))
#
# plot_param = [
# ("Total Fibers Assigned Per Tile", ["assign_total"], 5000, 5),
# ("Standards Assigned Per Tile", ["assign_std"], 100, 2),
# ("Sky Assigned Per Tile", ["assign_sky", "assign_suppsky"], 400, 2),
# ]
#
# pindx = 1
# for title, key, desired, incr in plot_param:
# ax = fig.add_subplot(3, 1, pindx)
# ax.set_aspect("equal")
# xmin = 360.0
# xmax = 0.0
# ymin = 90.0
# ymax = -90.0
# for tid, props in data.items():
# xcent = props["tile_ra"]
# ycent = props["tile_dec"]
# if xcent > xmax:
# xmax = xcent
# if xcent < xmin:
# xmin = xcent
# if ycent > ymax:
# ymax = ycent
# if ycent < ymin:
# ymin = ycent
# keytot = np.sum([props[x] for x in key])
# color = plot_qa_tile_color(desired, keytot, incr)
# circ = plt.Circle((xcent, ycent), radius=tile_radius, fc="none",
# ec=color, linewidth=linewidth)
# ax.add_artist(circ)
# if labels:
# ax.text(xcent, ycent, "{}".format(tid),
# color=color, fontsize=fontpt,
# horizontalalignment='center',
# verticalalignment='center',
# bbox=None)
#
# margin = 1.1 * tile_radius
#
# xmin -= margin
# xmax += margin
# ymin -= margin
# ymax += margin
# if xmin < 0.0:
# xmin = 0.0
# if xmax > 360.0:
# xmax = 360.0
# if ymin < -90.0:
# ymin = -90.0
# if ymax > 90.0:
# ymax = 90.0
#
# ax.set_xlim(left=xmin, right=xmax)
# ax.set_ylim(bottom=ymin, top=ymax)
# ax.set_xlabel("RA (degrees)", fontsize="large")
# ax.set_ylabel("DEC (degrees)", fontsize="large")
# ax.set_title(title)
#
# c_high = plot_qa_tile_color(desired, desired+1, incr)
# c_exact = plot_qa_tile_color(desired, desired, incr)
# c_low_one = plot_qa_tile_color(desired, desired-incr, incr)
# c_low_two = plot_qa_tile_color(desired, desired-2*incr, incr)
# c_low = plot_qa_tile_color(desired, 0, incr)
#
# c_low_two_val = desired - incr
# c_low_val = desired - 2 * incr
#
# legend_elements = [
# Patch(facecolor=c_high, edgecolor="none",
# label="> {} assigned".format(desired)),
# Patch(facecolor=c_exact, edgecolor="none",
# label="Exactly {} assigned".format(desired)),
# Patch(facecolor=c_low_one, edgecolor="none",
# label="< {} assigned".format(desired)),
# Patch(facecolor=c_low_two, edgecolor="none",
# label="< {} assigned".format(c_low_two_val)),
# Patch(facecolor=c_low, edgecolor="none",
# label="< {} assigned".format(c_low_val)),
# ]
# ax.legend(handles=legend_elements, loc="best",
# fontsize="x-small")
# pindx += 1
#
# outfile = "{}.{}".format(outroot, outformat)
# plt.savefig(outfile, dpi=300, format="pdf")
#
# return
. Output only the next line. | plot_qa(qadata, args.outroot, labels=args.labels) |
Continue the code snippet: <|code_start|>"""
Test fiberassign target operations.
"""
class TestTargets(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_available(self):
<|code_end|>
. Use current file imports:
import os
import unittest
import numpy as np
from desitarget.targetmask import desi_mask, mws_mask
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, load_target_file,
desi_target_type, default_main_sciencemask,
default_main_skymask, default_main_stdmask,
default_main_suppskymask,
default_main_safemask,
default_main_excludemask,
default_main_gaia_stdmask,
Targets, TargetsAvailable,
LocationsAvailable, targets_in_tiles, create_tagalong)
from .simulate import (test_subdir_create, sim_tiles, sim_targets, test_assign_date)
and context (classes, functions, or code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | test_dir = test_subdir_create("targets_test_available") |
Predict the next line for this snippet: <|code_start|> test_dir = test_subdir_create("targets_test_available")
input_mtl = os.path.join(test_dir, "mtl.fits")
input_std = os.path.join(test_dir, "standards.fits")
input_sky = os.path.join(test_dir, "sky.fits")
input_suppsky = os.path.join(test_dir, "suppsky.fits")
tgoff = 0
nscience = sim_targets(input_mtl, TARGET_TYPE_SCIENCE, tgoff)
tgoff += nscience
nstd = sim_targets(input_std, TARGET_TYPE_STANDARD, tgoff)
tgoff += nstd
nsky = sim_targets(input_sky, TARGET_TYPE_SKY, tgoff)
tgoff += nsky
nsuppsky = sim_targets(input_suppsky, TARGET_TYPE_SUPPSKY, tgoff)
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
load_target_file(tgs, tagalong, input_mtl)
load_target_file(tgs, tagalong, input_std)
load_target_file(tgs, tagalong, input_sky)
load_target_file(tgs, tagalong, input_suppsky)
print(tgs)
# Test access
ids = tgs.ids()
tt = tgs.get(ids[0])
tt.subpriority = 0.99
# Compute the targets available to each fiber for each tile.
hw = load_hardware()
tfile = os.path.join(test_dir, "footprint.fits")
<|code_end|>
with the help of current file imports:
import os
import unittest
import numpy as np
from desitarget.targetmask import desi_mask, mws_mask
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, load_target_file,
desi_target_type, default_main_sciencemask,
default_main_skymask, default_main_stdmask,
default_main_suppskymask,
default_main_safemask,
default_main_excludemask,
default_main_gaia_stdmask,
Targets, TargetsAvailable,
LocationsAvailable, targets_in_tiles, create_tagalong)
from .simulate import (test_subdir_create, sim_tiles, sim_targets, test_assign_date)
and context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
, which may contain function names, class names, or code. Output only the next line. | sim_tiles(tfile) |
Given snippet: <|code_start|>"""
Test fiberassign target operations.
"""
class TestTargets(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_available(self):
test_dir = test_subdir_create("targets_test_available")
input_mtl = os.path.join(test_dir, "mtl.fits")
input_std = os.path.join(test_dir, "standards.fits")
input_sky = os.path.join(test_dir, "sky.fits")
input_suppsky = os.path.join(test_dir, "suppsky.fits")
tgoff = 0
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import unittest
import numpy as np
from desitarget.targetmask import desi_mask, mws_mask
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, load_target_file,
desi_target_type, default_main_sciencemask,
default_main_skymask, default_main_stdmask,
default_main_suppskymask,
default_main_safemask,
default_main_excludemask,
default_main_gaia_stdmask,
Targets, TargetsAvailable,
LocationsAvailable, targets_in_tiles, create_tagalong)
from .simulate import (test_subdir_create, sim_tiles, sim_targets, test_assign_date)
and context:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
which might include code, classes, or functions. Output only the next line. | nscience = sim_targets(input_mtl, TARGET_TYPE_SCIENCE, tgoff) |
Here is a snippet: <|code_start|> if simple:
plot_positioner_simple(
ax, patrol_mm, lid, center, theta, theta_arm[lid],
phi, phi_arm[lid], color=col, linewidth=linewidth
)
else:
plot_positioner(
ax, patrol_mm, lid, center, shptheta, shpphi,
color=col, linewidth=linewidth
)
xend = xoff
yend = yoff
ax.text(xend, yend, "{}".format(inc),
color='k', fontsize=fontpt,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(fc='w', ec='none', pad=1, alpha=1.0))
pxcent = center[0]
pycent = center[1]
half_width = 0.5 * width
half_height = 0.5 * height
ax.set_xlabel("Millimeters", fontsize="large")
ax.set_ylabel("Millimeters", fontsize="large")
ax.set_xlim([pxcent-half_width, pxcent+half_width])
ax.set_ylim([pycent-half_height, pycent+half_height])
outfile = os.path.join(dir, "test_plotpos_{}.pdf".format(suffix))
plt.savefig(outfile, dpi=300, format="pdf")
plt.close()
def test_plotpos(self):
<|code_end|>
. Write the next line using the current file imports:
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.hardware import load_hardware
from fiberassign.vis import (
plot_positioner,
plot_positioner_simple,
Shape,
set_matplotlib_pdf_backend
)
from .simulate import test_subdir_create, sim_focalplane, test_assign_date
and context from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
, which may include functions, classes, or code. Output only the next line. | test_dir = test_subdir_create("vis_test_plotpos") |
Given the code snippet: <|code_start|> color=color, linewidth=linewidth
)
outfile = os.path.join(dir, "test_plotfp_{}.pdf".format(suffix))
ax.set_xlabel("Millimeters", fontsize="large")
ax.set_ylabel("Millimeters", fontsize="large")
plt.savefig(outfile, dpi=300, format="pdf")
plt.close()
def test_plotfp(self):
test_dir = test_subdir_create("vis_test_plotfp")
time = test_assign_date
hw = load_hardware(rundate=time)
suffix = "{}_simple".format(time)
self._load_and_plotfp(hw, test_dir, suffix, simple=True)
suffix = "{}".format(time)
self._load_and_plotfp(hw, test_dir, suffix, simple=False)
# time = "2012-12-12T00:00:00"
# hw = load_hardware(rundate=time)
# suffix = "{}_simple".format(time)
# self._load_and_plotfp(hw, test_dir, suffix, simple=True)
# suffix = "{}".format(time)
# self._load_and_plotfp(hw, test_dir, suffix, simple=False)
return
def test_plot_fakefp(self):
test_dir = test_subdir_create("vis_test_fakefp")
time = test_assign_date
# Simulate a fake focalplane
<|code_end|>
, generate the next line using the imports in this file:
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.hardware import load_hardware
from fiberassign.vis import (
plot_positioner,
plot_positioner_simple,
Shape,
set_matplotlib_pdf_backend
)
from .simulate import test_subdir_create, sim_focalplane, test_assign_date
and context (functions, classes, or occasionally code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | fp, exclude, state = sim_focalplane(rundate=test_assign_date, fakepos=True) |
Given the code snippet: <|code_start|> plot_positioner_simple(
ax, patrol_mm, lid, center, theta, theta_arm[lid],
phi, phi_arm[lid], color=col, linewidth=linewidth
)
else:
plot_positioner(
ax, patrol_mm, lid, center, shptheta, shpphi,
color=col, linewidth=linewidth
)
xend = xoff
yend = yoff
ax.text(xend, yend, "{}".format(inc),
color='k', fontsize=fontpt,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(fc='w', ec='none', pad=1, alpha=1.0))
pxcent = center[0]
pycent = center[1]
half_width = 0.5 * width
half_height = 0.5 * height
ax.set_xlabel("Millimeters", fontsize="large")
ax.set_ylabel("Millimeters", fontsize="large")
ax.set_xlim([pxcent-half_width, pxcent+half_width])
ax.set_ylim([pycent-half_height, pycent+half_height])
outfile = os.path.join(dir, "test_plotpos_{}.pdf".format(suffix))
plt.savefig(outfile, dpi=300, format="pdf")
plt.close()
def test_plotpos(self):
test_dir = test_subdir_create("vis_test_plotpos")
<|code_end|>
, generate the next line using the imports in this file:
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from fiberassign.hardware import load_hardware
from fiberassign.vis import (
plot_positioner,
plot_positioner_simple,
Shape,
set_matplotlib_pdf_backend
)
from .simulate import test_subdir_create, sim_focalplane, test_assign_date
and context (functions, classes, or occasionally code) from other files:
# Path: py/fiberassign/test/simulate.py
# def sim_data_dir():
# def test_subdir_create(name):
# def sim_science_fractions():
# def sim_focalplane(rundate=None, fakepos=False):
# def sim_stuck_sky(dirnm, hw, tiles):
# def radec2xyz(r, d):
# def sim_tiles(path, selectfile=None):
# def sim_targets(path, tgtype, tgoffset, density=5000.0, science_frac=None):
# def petal_rotation(npos, reverse=False):
# J = np.flatnonzero(r2 < maxr2)
. Output only the next line. | time = test_assign_date |
Using the snippet: <|code_start|>
def expand_closed_curve(xx, yy, margin):
'''
For the --margin-{pos,petal,gfa} options, we can add a buffer zone
around the positioner keep-out polygons. This function implements
the geometry to achieve this.
Given a RIGHT-HANDED closed polygon xx,yy and margin, returns new
x,y coordinates expanded by a margin of `margin`.
(By right-handed, I mean that the points are listed
counter-clockwise, and if you walk the boundary, the inside of the
shape is to the left; the expanded points will be to the right.)
If the order of the polygon is reversed, the "expanded" points
will actually be on the *inside* of the polygon. Setting the
margin negative counteracts this.
Note that we strictly require a closed curve. Collinear polygon
segments will cause problems!
'''
ex, ey = [],[]
# These are closed curves (last point = first point)
# (this isn't strictly required by the fundamental algorithm, but is assumed
# in the way we select previous and next points in the loop below.)
if (xx[0] != xx[-1]) or (yy[0] != yy[-1]):
<|code_end|>
, determine the next line of code. You have imports:
from datetime import datetime, timezone
from scipy.interpolate import interp1d
from .utils import Logger
from ._internal import (
Hardware,
FIBER_STATE_OK,
FIBER_STATE_UNASSIGNED,
FIBER_STATE_STUCK,
FIBER_STATE_BROKEN,
FIBER_STATE_RESTRICT,
Circle,
Segments,
Shape,
)
from astropy.time import Time
from desimeter.fiberassign import fiberassign_radec2xy_cs5, fiberassign_radec2xy_flat
from desimeter.fiberassign import fiberassign_cs5_xy2radec, fiberassign_flat_xy2radec
from astropy.time import Time
from desimeter.transform.pos2ptl import flat2ptl
from desimeter.transform.pos2ptl import ptl2flat
import numpy as np
import desimodel.io as dmio
and context (class names, function names, or code) available:
# Path: py/fiberassign/utils.py
# def option_list(opts):
# def assert_isoformat_utc(time_str):
# def get_date_cutoff(datetype, cutoff_case):
# def get_svn_version(svn_dir):
# def get_last_line(fn):
# def read_ecsv_keys(fn):
. Output only the next line. | log = Logger.get() |
Given snippet: <|code_start|> STRUCT(2 AS row_num, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' AS string_col),
STRUCT(3 AS row_num, 'γγγ«γ‘γ―' AS string_col) ]) AS `strings`
INNER JOIN
UNNEST([
STRUCT(1 AS row_num, CAST('00:00:00.000000' AS TIME) AS time_col),
STRUCT(2 AS row_num, CAST('09:08:07.654321' AS TIME) AS time_col),
STRUCT(3 AS row_num, CAST('23:59:59.999999' AS TIME) AS time_col) ]) AS `times`
INNER JOIN
UNNEST([
STRUCT(1 AS row_num, TIMESTAMP('1998-09-04 12:34:56.789101') AS timestamp_col),
STRUCT(2 AS row_num, TIMESTAMP('2011-10-01 00:01:02.345678') AS timestamp_col),
STRUCT(3 AS row_num, TIMESTAMP('2018-04-11 23:59:59.999999') AS timestamp_col) ]) AS `timestamps`
WHERE
`bools`.row_num = `dates`.row_num
AND `bools`.row_num = `bytes`.row_num
AND `bools`.row_num = `datetimes`.row_num
AND `bools`.row_num = `floats`.row_num
AND `bools`.row_num = `ints`.row_num
AND `bools`.row_num = `numerics`.row_num
AND `bools`.row_num = `strings`.row_num
AND `bools`.row_num = `times`.row_num
AND `bools`.row_num = `timestamps`.row_num
ORDER BY row_num ASC
""",
expected=pandas.DataFrame(
{
"row_num": pandas.Series([1, 2, 3], dtype="Int64"),
"bool_col": pandas.Series(
[True, False, True],
dtype="boolean"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import collections
import datetime
import decimal
import random
import db_dtypes
import pandas
import pandas.testing
import pytest
from google.cloud import bigquery
from pandas_gbq.features import FEATURES
and context:
# Path: pandas_gbq/features.py
# FEATURES = Features()
which might include code, classes, or functions. Output only the next line. | if FEATURES.pandas_has_boolean_dtype |
Next line prediction: <|code_start|># Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# -*- coding: utf-8 -*-
def test_get_credentials_private_key_raises_notimplementederror(monkeypatch):
private_key = json.dumps(
{
"private_key": "some_key",
"client_email": "service-account@example.com",
"project_id": "private-key-project",
}
)
with pytest.raises(NotImplementedError, match="private_key"):
<|code_end|>
. Use current file imports:
(import json
import pytest
import google.auth
import google.auth.credentials
import google.cloud.bigquery
import pydata_google_auth
import google.auth
import google.auth.credentials
import pydata_google_auth
import pydata_google_auth.cache
import pydata_google_auth.cache
from unittest import mock
from pandas_gbq import auth)
and context including class names, function names, or small code snippets from other files:
# Path: pandas_gbq/auth.py
# CREDENTIALS_CACHE_DIRNAME = "pandas_gbq"
# CREDENTIALS_CACHE_FILENAME = "bigquery_credentials.dat"
# SCOPES = ["https://www.googleapis.com/auth/bigquery"]
# CLIENT_ID = "725825577420-unm2gnkiprugilg743tkbig250f4sfsj.apps.googleusercontent.com"
# CLIENT_SECRET = "4hqze9yI8fxShls8eJWkeMdJ"
# def get_credentials(
# private_key=None, project_id=None, reauth=False, auth_local_webserver=False
# ):
# def get_credentials_cache(reauth):
. Output only the next line. | auth.get_credentials(private_key=private_key) |
Next line prediction: <|code_start|> dataframe = dataframe.assign(**{column_name: cast_column})
return dataframe
def load_parquet(
client: bigquery.Client,
dataframe: pandas.DataFrame,
destination_table_ref: bigquery.TableReference,
location: Optional[str],
schema: Optional[Dict[str, Any]],
billing_project: Optional[str] = None,
):
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = "WRITE_APPEND"
job_config.source_format = "PARQUET"
if schema is not None:
schema = pandas_gbq.schema.remove_policy_tags(schema)
job_config.schema = pandas_gbq.schema.to_google_cloud_bigquery(schema)
dataframe = cast_dataframe_for_parquet(dataframe, schema)
try:
client.load_table_from_dataframe(
dataframe,
destination_table_ref,
job_config=job_config,
location=location,
project=billing_project,
).result()
except pyarrow.lib.ArrowInvalid as exc:
<|code_end|>
. Use current file imports:
(import decimal
import io
import db_dtypes
import pandas
import pyarrow.lib
import pandas_gbq.schema
from typing import Any, Callable, Dict, List, Optional
from google.cloud import bigquery
from pandas_gbq import exceptions
from pandas_gbq.features import FEATURES)
and context including class names, function names, or small code snippets from other files:
# Path: pandas_gbq/exceptions.py
# class GenericGBQException(ValueError):
# class AccessDenied(ValueError):
# class ConversionError(GenericGBQException):
# class InvalidPrivateKeyFormat(ValueError):
# class PerformanceWarning(RuntimeWarning):
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
. Output only the next line. | raise exceptions.ConversionError( |
Next line prediction: <|code_start|> project=billing_project,
).result()
finally:
chunk_buffer.close()
return load_csv(dataframe, chunksize, bq_schema, load_chunk)
def load_chunks(
client,
dataframe,
destination_table_ref,
chunksize=None,
schema=None,
location=None,
api_method="load_parquet",
billing_project: Optional[str] = None,
):
if api_method == "load_parquet":
load_parquet(
client,
dataframe,
destination_table_ref,
location,
schema,
billing_project=billing_project,
)
# TODO: yield progress depending on result() with timeout
return [0]
elif api_method == "load_csv":
<|code_end|>
. Use current file imports:
(import decimal
import io
import db_dtypes
import pandas
import pyarrow.lib
import pandas_gbq.schema
from typing import Any, Callable, Dict, List, Optional
from google.cloud import bigquery
from pandas_gbq import exceptions
from pandas_gbq.features import FEATURES)
and context including class names, function names, or small code snippets from other files:
# Path: pandas_gbq/exceptions.py
# class GenericGBQException(ValueError):
# class AccessDenied(ValueError):
# class ConversionError(GenericGBQException):
# class InvalidPrivateKeyFormat(ValueError):
# class PerformanceWarning(RuntimeWarning):
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
. Output only the next line. | if FEATURES.bigquery_has_from_dataframe_with_csv: |
Using the snippet: <|code_start|> except self.http_error as ex:
self.process_http_error(ex)
def run_query(self, query, max_results=None, progress_bar_type=None, **kwargs):
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
<|code_end|>
, determine the next line of code. You have imports:
import copy
import concurrent.futures
import logging
import re
import time
import typing
import warnings
import numpy as np
import pandas
import pandas_gbq.schema
import pandas_gbq.timestamp
import tqdm # noqa
import pkg_resources # noqa
import db_dtypes # noqa
import pydata_google_auth # noqa
import google.auth # noqa
import google.api_core.client_info
import pandas
import pandas
import db_dtypes
import db_dtypes
import pandas.api.types
from datetime import datetime
from typing import Any, Dict, Optional, Sequence, Union
from pandas_gbq.exceptions import (
AccessDenied,
GenericGBQException,
)
from pandas_gbq.features import FEATURES
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
from google.cloud import bigquery # noqa
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
from google.cloud import bigquery
from google.cloud import bigquery
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
from pandas_gbq import load
from google.api_core import exceptions as google_exceptions
from google.cloud import bigquery
from pandas_gbq import schema
from google.cloud.bigquery import DatasetReference
from google.cloud.bigquery import TableReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import DatasetReference
from google.cloud.bigquery import Table
from google.cloud.bigquery import TableReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import DatasetReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import Dataset
and context (class names, function names, or code) available:
# Path: pandas_gbq/exceptions.py
# class AccessDenied(ValueError):
# """
# Raised when invalid credentials are provided, or tokens have expired.
# """
#
# class GenericGBQException(ValueError):
# """
# Raised when an unrecognized Google API Error occurs.
# """
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
. Output only the next line. | raise AccessDenied("The service account credentials are not valid") |
Predict the next line for this snippet: <|code_start|>
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
client_info = google.api_core.client_info.ClientInfo(
user_agent="pandas-{}".format(pandas.__version__)
)
return bigquery.Client(
project=self.project_id,
credentials=self.credentials,
client_info=client_info,
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
if "cancelled" in ex.message:
raise QueryTimeout("Reason: {0}".format(ex))
<|code_end|>
with the help of current file imports:
import copy
import concurrent.futures
import logging
import re
import time
import typing
import warnings
import numpy as np
import pandas
import pandas_gbq.schema
import pandas_gbq.timestamp
import tqdm # noqa
import pkg_resources # noqa
import db_dtypes # noqa
import pydata_google_auth # noqa
import google.auth # noqa
import google.api_core.client_info
import pandas
import pandas
import db_dtypes
import db_dtypes
import pandas.api.types
from datetime import datetime
from typing import Any, Dict, Optional, Sequence, Union
from pandas_gbq.exceptions import (
AccessDenied,
GenericGBQException,
)
from pandas_gbq.features import FEATURES
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
from google.cloud import bigquery # noqa
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
from google.cloud import bigquery
from google.cloud import bigquery
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
from pandas_gbq import load
from google.api_core import exceptions as google_exceptions
from google.cloud import bigquery
from pandas_gbq import schema
from google.cloud.bigquery import DatasetReference
from google.cloud.bigquery import TableReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import DatasetReference
from google.cloud.bigquery import Table
from google.cloud.bigquery import TableReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import DatasetReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import Dataset
and context from other files:
# Path: pandas_gbq/exceptions.py
# class AccessDenied(ValueError):
# """
# Raised when invalid credentials are provided, or tokens have expired.
# """
#
# class GenericGBQException(ValueError):
# """
# Raised when an unrecognized Google API Error occurs.
# """
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
, which may contain function names, class names, or code. Output only the next line. | raise GenericGBQException("Reason: {0}".format(ex)) |
Given the following code snippet before the placeholder: <|code_start|> # Avoid attempting to download results from DML queries, which have no
# destination.
if query_reply.destination is None:
return pandas.DataFrame()
rows_iter = self.client.list_rows(
query_reply.destination, max_results=max_results
)
return self._download_results(
rows_iter,
max_results=max_results,
progress_bar_type=progress_bar_type,
user_dtypes=dtypes,
)
def _download_results(
self, rows_iter, max_results=None, progress_bar_type=None, user_dtypes=None,
):
# No results are desired, so don't bother downloading anything.
if max_results == 0:
return None
if user_dtypes is None:
user_dtypes = {}
create_bqstorage_client = self.use_bqstorage_api
if max_results is not None:
create_bqstorage_client = False
to_dataframe_kwargs = {}
<|code_end|>
, predict the next line using imports from the current file:
import copy
import concurrent.futures
import logging
import re
import time
import typing
import warnings
import numpy as np
import pandas
import pandas_gbq.schema
import pandas_gbq.timestamp
import tqdm # noqa
import pkg_resources # noqa
import db_dtypes # noqa
import pydata_google_auth # noqa
import google.auth # noqa
import google.api_core.client_info
import pandas
import pandas
import db_dtypes
import db_dtypes
import pandas.api.types
from datetime import datetime
from typing import Any, Dict, Optional, Sequence, Union
from pandas_gbq.exceptions import (
AccessDenied,
GenericGBQException,
)
from pandas_gbq.features import FEATURES
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
from google.cloud import bigquery # noqa
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
from google.cloud import bigquery
from google.cloud import bigquery
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
from pandas_gbq import load
from google.api_core import exceptions as google_exceptions
from google.cloud import bigquery
from pandas_gbq import schema
from google.cloud.bigquery import DatasetReference
from google.cloud.bigquery import TableReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import DatasetReference
from google.cloud.bigquery import Table
from google.cloud.bigquery import TableReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import DatasetReference
from google.api_core.exceptions import NotFound
from google.cloud.bigquery import Dataset
and context including class names, function names, and sometimes code from other files:
# Path: pandas_gbq/exceptions.py
# class AccessDenied(ValueError):
# """
# Raised when invalid credentials are provided, or tokens have expired.
# """
#
# class GenericGBQException(ValueError):
# """
# Raised when an unrecognized Google API Error occurs.
# """
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
. Output only the next line. | if FEATURES.bigquery_needs_date_as_object: |
Based on the snippet: <|code_start|># Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""System tests for fetching Google BigQuery credentials."""
IS_RUNNING_ON_CI = "CIRCLE_BUILD_NUM" in os.environ or "KOKORO_BUILD_ID" in os.environ
def mock_default_credentials(scopes=None, request=None):
return (None, None)
def test_should_be_able_to_get_valid_credentials(project_id):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import pytest
import google.auth
import google.auth
import google.auth
from unittest import mock
from pandas_gbq import auth
from google.auth.credentials import Credentials
and context (classes, functions, sometimes code) from other files:
# Path: pandas_gbq/auth.py
# CREDENTIALS_CACHE_DIRNAME = "pandas_gbq"
# CREDENTIALS_CACHE_FILENAME = "bigquery_credentials.dat"
# SCOPES = ["https://www.googleapis.com/auth/bigquery"]
# CLIENT_ID = "725825577420-unm2gnkiprugilg743tkbig250f4sfsj.apps.googleusercontent.com"
# CLIENT_SECRET = "4hqze9yI8fxShls8eJWkeMdJ"
# def get_credentials(
# private_key=None, project_id=None, reauth=False, auth_local_webserver=False
# ):
# def get_credentials_cache(reauth):
. Output only the next line. | credentials, _ = auth.get_credentials(project_id=project_id) |
Here is a snippet: <|code_start|> with pytest.raises(ValueError, match="Got unexpected api_method:"):
load.load_chunks(None, None, None, api_method="not_a_thing")
def test_load_parquet_allows_client_to_generate_schema(mock_bigquery_client):
df = pandas.DataFrame({"int_col": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
load.load_parquet(mock_bigquery_client, df, destination, None, None)
mock_load = mock_bigquery_client.load_table_from_dataframe
assert mock_load.called
_, kwargs = mock_load.call_args
assert "job_config" in kwargs
assert kwargs["job_config"].schema is None
def test_load_parquet_with_bad_conversion(mock_bigquery_client):
mock_bigquery_client.load_table_from_dataframe.side_effect = (
pyarrow.lib.ArrowInvalid()
)
df = pandas.DataFrame({"int_col": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
<|code_end|>
. Write the next line using the current file imports:
import datetime
import decimal
import textwrap
import db_dtypes
import numpy
import pandas
import pandas.testing
import pytest
import google.cloud.bigquery
import google.cloud.bigquery
import google.cloud.bigquery
import google.cloud.bigquery
import google.cloud.bigquery
import pyarrow
from io import StringIO
from unittest import mock
from pandas_gbq import exceptions
from pandas_gbq.features import FEATURES
from pandas_gbq import load
and context from other files:
# Path: pandas_gbq/exceptions.py
# class GenericGBQException(ValueError):
# class AccessDenied(ValueError):
# class ConversionError(GenericGBQException):
# class InvalidPrivateKeyFormat(ValueError):
# class PerformanceWarning(RuntimeWarning):
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
#
# Path: pandas_gbq/load.py
# def encode_chunk(dataframe):
# def split_dataframe(dataframe, chunksize=None):
# def cast_dataframe_for_parquet(
# dataframe: pandas.DataFrame, schema: Optional[Dict[str, Any]],
# ) -> pandas.DataFrame:
# def load_parquet(
# client: bigquery.Client,
# dataframe: pandas.DataFrame,
# destination_table_ref: bigquery.TableReference,
# location: Optional[str],
# schema: Optional[Dict[str, Any]],
# billing_project: Optional[str] = None,
# ):
# def load_csv(
# dataframe: pandas.DataFrame,
# chunksize: Optional[int],
# bq_schema: Optional[List[bigquery.SchemaField]],
# load_chunk: Callable,
# ):
# def load_csv_from_dataframe(
# client: bigquery.Client,
# dataframe: pandas.DataFrame,
# destination_table_ref: bigquery.TableReference,
# location: Optional[str],
# chunksize: Optional[int],
# schema: Optional[Dict[str, Any]],
# billing_project: Optional[str] = None,
# ):
# def load_chunk(chunk, job_config):
# def load_csv_from_file(
# client: bigquery.Client,
# dataframe: pandas.DataFrame,
# destination_table_ref: bigquery.TableReference,
# location: Optional[str],
# chunksize: Optional[int],
# schema: Optional[Dict[str, Any]],
# billing_project: Optional[str] = None,
# ):
# def load_chunk(chunk, job_config):
# def load_chunks(
# client,
# dataframe,
# destination_table_ref,
# chunksize=None,
# schema=None,
# location=None,
# api_method="load_parquet",
# billing_project: Optional[str] = None,
# ):
, which may include functions, classes, or code. Output only the next line. | with pytest.raises(exceptions.ConversionError): |
Given the following code snippet before the placeholder: <|code_start|># Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# -*- coding: utf-8 -*-
def load_method(bqclient, api_method):
if not FEATURES.bigquery_has_from_dataframe_with_csv and api_method == "load_csv":
return bqclient.load_table_from_file
return bqclient.load_table_from_dataframe
def test_encode_chunk_with_unicode():
"""Test that a dataframe containing unicode can be encoded as a file.
See: https://github.com/pydata/pandas-gbq/issues/106
"""
df = pandas.DataFrame(
numpy.random.randn(6, 4), index=range(6), columns=list("ABCD")
)
df["s"] = u"δΏ‘η¨ε‘"
<|code_end|>
, predict the next line using imports from the current file:
import datetime
import decimal
import textwrap
import db_dtypes
import numpy
import pandas
import pandas.testing
import pytest
import google.cloud.bigquery
import google.cloud.bigquery
import google.cloud.bigquery
import google.cloud.bigquery
import google.cloud.bigquery
import pyarrow
from io import StringIO
from unittest import mock
from pandas_gbq import exceptions
from pandas_gbq.features import FEATURES
from pandas_gbq import load
and context including class names, function names, and sometimes code from other files:
# Path: pandas_gbq/exceptions.py
# class GenericGBQException(ValueError):
# class AccessDenied(ValueError):
# class ConversionError(GenericGBQException):
# class InvalidPrivateKeyFormat(ValueError):
# class PerformanceWarning(RuntimeWarning):
#
# Path: pandas_gbq/features.py
# FEATURES = Features()
#
# Path: pandas_gbq/load.py
# def encode_chunk(dataframe):
# def split_dataframe(dataframe, chunksize=None):
# def cast_dataframe_for_parquet(
# dataframe: pandas.DataFrame, schema: Optional[Dict[str, Any]],
# ) -> pandas.DataFrame:
# def load_parquet(
# client: bigquery.Client,
# dataframe: pandas.DataFrame,
# destination_table_ref: bigquery.TableReference,
# location: Optional[str],
# schema: Optional[Dict[str, Any]],
# billing_project: Optional[str] = None,
# ):
# def load_csv(
# dataframe: pandas.DataFrame,
# chunksize: Optional[int],
# bq_schema: Optional[List[bigquery.SchemaField]],
# load_chunk: Callable,
# ):
# def load_csv_from_dataframe(
# client: bigquery.Client,
# dataframe: pandas.DataFrame,
# destination_table_ref: bigquery.TableReference,
# location: Optional[str],
# chunksize: Optional[int],
# schema: Optional[Dict[str, Any]],
# billing_project: Optional[str] = None,
# ):
# def load_chunk(chunk, job_config):
# def load_csv_from_file(
# client: bigquery.Client,
# dataframe: pandas.DataFrame,
# destination_table_ref: bigquery.TableReference,
# location: Optional[str],
# chunksize: Optional[int],
# schema: Optional[Dict[str, Any]],
# billing_project: Optional[str] = None,
# ):
# def load_chunk(chunk, job_config):
# def load_chunks(
# client,
# dataframe,
# destination_table_ref,
# chunksize=None,
# schema=None,
# location=None,
# api_method="load_parquet",
# billing_project: Optional[str] = None,
# ):
. Output only the next line. | csv_buffer = load.encode_chunk(df) |
Predict the next line for this snippet: <|code_start|>"""
KAIST λ¨μΌμΈμ¦μλΉμ€ μ΄λλ―Ό νμ΄μ§ μ€μ .
Django λ΄μ₯ μΈμ¦ 체κ³μ μ΄λλ―Ό νμ΄μ§μ λ³ν©νμ¬ λ¨μΌν μΈμ¦ μμ€ν
κ΄λ¦¬λ‘
ꡬνν©λλ€.
"""
def portal_info_protection(func):
"""
μ¬μ©μ 리μ€νΈμμ μ 보μ 곡 λ―Έλμ μ μ μ ν¬ν κ³μ μ 보λ₯Ό μΆλ ₯νμ§ μλλ‘
μ²λ¦¬νλ λ°μ½λ μ΄ν°.
"""
def decorator(self, e):
if not e.portal_info.is_signed_up:
raise ObjectDoesNotExist
return func(self, e)
return decorator
class PortalInfoInline(admin.StackedInline):
"""
μ¬μ©μ μΈμ€ν΄μ€ μμ μ ν¬ν κ³μ μ 보 μΆλ ₯μ μν μΈλΌμΈ.
"""
<|code_end|>
with the help of current file imports:
from django import forms
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from .models import PortalInfo
and context from other files:
# Path: apps/ksso/models.py
# class PortalInfo(models.Model):
# """
# μ¬μ©μμ ν¬ν κ³μ μ 보λ₯Ό μ μ₯νλ λͺ¨λΈ.
# """
#
# user = models.OneToOneField(
# 'auth.User',
# primary_key=True, related_name='portal_info',
# verbose_name=_("μ μ μΈμ€ν΄μ€"))
#
# kaist_uid = models.CharField(
# _("KAIST UID"),
# max_length=128, unique=True)
#
# ku_kname = models.CharField(
# _("μ΄λ¦"),
# max_length=128, blank=True)
#
# ku_acad_prog = models.CharField(
# _("κ³Όμ "),
# max_length=32, blank=True)
#
# ku_std_no = models.CharField(
# _("νλ²"),
# max_length=32, blank=True)
#
# ku_psft_user_status_kor = models.CharField(
# _("νμ μν"),
# max_length=32, blank=True)
#
# ku_born_date = models.CharField(
# _("μλ
μμΌ"),
# max_length=32, blank=True)
#
# ku_sex = models.CharField(
# _("μ±λ³"),
# max_length=32, blank=True)
#
# ou = models.CharField(
# _("νκ³Ό"),
# max_length=32, blank=True)
#
# mail = models.CharField(
# _("λ©μΌμ£Όμ"),
# max_length=32, blank=True)
#
# mobile = models.CharField(
# _("μ νλ²νΈ"),
# max_length=32, blank=True)
#
# is_signed_up = models.BooleanField(
# _("μ 보μ 곡 λμμ¬λΆ"),
# default=False,
# help_text=_(
# "μ 보μ 곡μ λ°λνλ©΄ κ³μ μμ μ²λ¦¬κ° λλ, μμ§ λμμ¬λΆλ₯Ό "
# "μ ννμ§ μμ μ΅μ΄κ°μ
μμ κ²½μ° μμ¬νν μκΉμ§ μ λ³΄κ° "
# "μμμ μ₯λ©λλ€. μ΄ νΉμκ²½μ°μλ μ 보λ₯Ό νμ©νμ§ μμμΌ ν©λλ€."))
#
# #: μ 보μ 곡 λμν μ¬μ©μλ§ λ€λ£¨λ 컀μ€ν
λ§€λμ .
# objects = PortalInfoManager()
#
# #: λͺ¨λ μ¬μ©μλ₯Ό λ€λ£¨λ κΈ°μ‘΄ λ§€λμ .
# all_objects = models.Manager()
#
# def __str__(self):
# return self.ku_kname
#
# @classmethod
# def create(cls, user, kaist_uid):
# """
# ν΄λμ€ μΈμ€ν΄μ€ μμ± λ©μλ.
#
# μ¬μ©μ μΈμ€ν΄μ€μ μ¬μ©μ UIDλ₯Ό μ
λ ₯λ°μ΅λλ€.
# """
# return cls(user=user, kaist_uid=kaist_uid)
#
# @property
# def enter_year(self):
# if self.ku_std_no and len(self.ku_std_no) == 8:
# return self.ku_std_no[2:4]
# return None
, which may contain function names, class names, or code. Output only the next line. | model = PortalInfo |
Given the following code snippet before the placeholder: <|code_start|>
class MainView(TemplateView):
template_name = 'main.html'
def get_context_data(self, **kwargs):
context = super(MainView, self).get_context_data(**kwargs)
<|code_end|>
, predict the next line using imports from the current file:
from django.views.generic import TemplateView
from ..util import vote_available
from ..models.user import Freshman
and context including class names, function names, and sometimes code from other files:
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
#
# Path: apps/ot/models/user.py
# class Freshman(models.Model):
# def __str__(self):
# return self.user.username
#
# user = models.OneToOneField(User, related_name='freshman')
# voted_clubs = models.ManyToManyField(Club, related_name='votes')
#
# sizes = (
# ('S', 'S'),
# ('M', 'M'),
# ('L', 'L'),
# ('XL', 'XL'),
# ('2XL', '2XL'),
# ('3XL', '3XL'),
# )
# tsize = models.CharField(null=False, max_length=5, choices=sizes)
#
# BAND_VOTE_LIMIT = 4
# NON_BAND_VOTE_LIMIT = 3
#
# def vote_limit_exceeded(self, is_band):
# return self.voted_clubs.filter(is_band=is_band).count() > 5
. Output only the next line. | context['available'] = vote_available(self.request.user) |
Using the snippet: <|code_start|>
class MainView(TemplateView):
template_name = 'main.html'
def get_context_data(self, **kwargs):
context = super(MainView, self).get_context_data(**kwargs)
context['available'] = vote_available(self.request.user)
if hasattr(self.request.user, 'freshman'):
voted_clubs = self.request.user.freshman.voted_clubs
context['voted_band'] = voted_clubs.filter(is_band=True)
context['voted_non_band'] = voted_clubs.filter(is_band=False)
else:
context['voted_band'] = []
context['voted_non_band'] = []
<|code_end|>
, determine the next line of code. You have imports:
from django.views.generic import TemplateView
from ..util import vote_available
from ..models.user import Freshman
and context (class names, function names, or code) available:
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
#
# Path: apps/ot/models/user.py
# class Freshman(models.Model):
# def __str__(self):
# return self.user.username
#
# user = models.OneToOneField(User, related_name='freshman')
# voted_clubs = models.ManyToManyField(Club, related_name='votes')
#
# sizes = (
# ('S', 'S'),
# ('M', 'M'),
# ('L', 'L'),
# ('XL', 'XL'),
# ('2XL', '2XL'),
# ('3XL', '3XL'),
# )
# tsize = models.CharField(null=False, max_length=5, choices=sizes)
#
# BAND_VOTE_LIMIT = 4
# NON_BAND_VOTE_LIMIT = 3
#
# def vote_limit_exceeded(self, is_band):
# return self.voted_clubs.filter(is_band=is_band).count() > 5
. Output only the next line. | context['band_limit'] = Freshman.BAND_VOTE_LIMIT |
Using the snippet: <|code_start|> fields = '__all__'
class PostSerializer(serializers.ModelSerializer):
attachedfile_set = AttachedFileSerializer(many=True, read_only=True)
is_permitted_to_read = serializers.SerializerMethodField()
is_permitted_to_edit = serializers.SerializerMethodField()
is_permitted_to_delete = serializers.SerializerMethodField()
absolute_url = serializers.SerializerMethodField()
def get_request_user(self):
user = None
request = self.context.get("request")
if request and hasattr(request, "user"):
user = request.user
return user
def get_is_permitted_to_read(self, obj):
return obj.is_permitted(self.get_request_user(), PERM_READ)
def get_is_permitted_to_edit(self, obj):
return obj.is_permitted(self.get_request_user(), PERM_EDIT)
def get_is_permitted_to_delete(self, obj):
return obj.is_permitted(self.get_request_user(), PERM_DELETE)
def get_absolute_url(self, obj):
return obj.get_absolute_url()
class Meta:
<|code_end|>
, determine the next line of code. You have imports:
from rest_framework import serializers
from apps.board.models import Post, AttachedFile
from apps.manager.constants import *
and context (class names, function names, or code) available:
# Path: apps/board/models.py
# class Post(BasePost):
# """
# κ²μκΈμ ꡬνν λͺ¨λΈ.
# """
#
# board = models.ForeignKey(
# Board,
# verbose_name=_("λ±λ‘ κ²μν"))
#
# board_tab = models.ManyToManyField(
# BoardTab,
# blank=True,
# verbose_name=_("λ±λ‘ ν"))
#
# title = models.CharField(
# _("μ λͺ©"),
# max_length=128)
#
# tag = models.ForeignKey(
# Tag,
# verbose_name=_("νκ·Έ"),
# null=True, blank=True)
#
# is_notice = models.BooleanField(
# _("곡μ§κΈ"),
# default=False)
#
# class Meta:
# verbose_name = _('ν¬μ€νΈ')
# verbose_name_plural = _('ν¬μ€νΈ(λ€)')
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# # return os.path.join(self.board.get_absolute_url(), str(self.id))
# return self.board.get_absolute_url()+'/'+str(self.id)
#
# def get_first_tab(self):
# return self.board_tab.all().first()
#
# def pre_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄μ μ κ²μν μ κ·ΌκΆνμ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, PERM_ACCESS)
#
# def post_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄νμ λμΌ κΆνμ΄ κ²μνμλ μλμ§ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, permission)
#
# class AttachedFile(models.Model):
# """
# ν¬μ€νΈ, λκΈ μ²¨λΆνμΌμ ꡬνν λͺ¨λΈ.
# """
#
# post = models.ForeignKey(
# BasePost,
# verbose_name=_("μ°κ²°λ ν¬μ€νΈ"))
#
# file = models.FileField(
# _("첨λΆνμΌ"),
# upload_to=get_upload_path)
#
# class Meta:
# verbose_name = _('첨λΆνμΌ')
# verbose_name_plural = _('첨λΆνμΌ(λ€)')
#
# def __str__(self):
# return os.path.basename(self.file.name)
#
# def get_absolute_url(self):
# return os.path.join(MEDIA_URL, self.file.name)
#
# def get_file_size(self):
# """
# νμΌ ν¬κΈ°λ₯Ό λ°ννλ λ©μλ.
# """
# try:
# return self.file.size
# except:
# return 0
. Output only the next line. | model = Post |
Predict the next line for this snippet: <|code_start|>
urlpatterns = [
url(r'^$', MainView.as_view(), name='main'),
url(r'^result/$', ResultView.as_view()),
<|code_end|>
with the help of current file imports:
from django.conf.urls import url
from .views.main import MainView
from .views.club import ClubDetailView, ClubListView
from .views.user import TSizeView, ResultView
and context from other files:
# Path: apps/ot/views/main.py
# class MainView(TemplateView):
# template_name = 'main.html'
#
# def get_context_data(self, **kwargs):
# context = super(MainView, self).get_context_data(**kwargs)
#
# context['available'] = vote_available(self.request.user)
#
# if hasattr(self.request.user, 'freshman'):
# voted_clubs = self.request.user.freshman.voted_clubs
# context['voted_band'] = voted_clubs.filter(is_band=True)
# context['voted_non_band'] = voted_clubs.filter(is_band=False)
# else:
# context['voted_band'] = []
# context['voted_non_band'] = []
#
# context['band_limit'] = Freshman.BAND_VOTE_LIMIT
# context['non_band_limit'] = Freshman.NON_BAND_VOTE_LIMIT
# context['total_limit'] = Freshman.BAND_VOTE_LIMIT + Freshman.NON_BAND_VOTE_LIMIT
#
# return context
#
# def post(self, request, *args, **kwargs):
# pass
#
# Path: apps/ot/views/club.py
# class ClubDetailView(DetailView):
# template_name = 'club_detail.html'
# model = Club
#
# def get_context_data(self, **kwargs):
# context = super(ClubDetailView, self).get_context_data(**kwargs)
#
# user = self.request.user
# available = vote_available(user)
# context['available'] = available
#
# if hasattr(user, 'freshman'):
# club = self.object
#
# # μ μ κ° μ΄λ―Έ ν΄λΉ λμ리μ ν¬ννλμ§ μ¬λΆ
# context['voted'] = bool(user.freshman.voted_clubs.filter(pk=club.pk))
# # μ μ κ° ν΄λΉ λμ리 κ³μ΄(λ°΄λ/λΉλ°΄λ) μ΅λ ν¬νμλ₯Ό λμλμ§ μ¬λΆ
# context['exceeded'] = user.freshman.vote_limit_exceeded(club.is_band)
#
# return context
#
# @method_decorator(login_required())
# def post(self, request, *args, **kwargs):
# """
# νμ¬ λμ리μ ν¬ν
# """
# if vote_available(request.user):
# club = self.get_object()
# freshman = request.user.freshman
#
# voted = freshman.voted_clubs.filter(pk=club.pk)
#
# if voted:
# freshman.voted_clubs.remove(club)
# freshman.save()
# elif not freshman.vote_limit_exceeded(club.is_band):
# freshman.voted_clubs.add(club)
# freshman.save()
#
# return redirect('.')
#
# class ClubListView(ListView):
# template_name = 'club_list.html'
# context_object_name = 'clubs'
#
# def get_queryset(self):
# if 'is_band' in self.request.GET:
# if self.request.GET['is_band'] == '0':
# queryset = Club.objects.filter(is_band=False)
# elif self.request.GET['is_band'] == '1':
# queryset = Club.objects.filter(is_band=True)
# else:
# raise Http404
# else:
# queryset = Club.objects.all()
#
# sorted_queryset = list()
#
# for pk in [20, 18, 15, 17, 9, 21, 16, 8, 23, 13, 11, 10, 14]:
# sorted_queryset.append(queryset.get(pk=pk))
#
# return sorted_queryset
#
# # return queryset.order_by('?')
#
# Path: apps/ot/views/user.py
# class TSizeView(FormView):
# template_name = 'tsize.html'
# form_class = TSizeForm
# success_url = '/ot/'
#
# def form_valid(self, form):
# if hasattr(self.request.user, 'freshman'):
# self.request.user.freshman.tsize = form.cleaned_data['tsize']
# self.request.user.freshman.save()
# else:
# freshman = form.save(commit=False)
# freshman.user = self.request.user
# freshman.save()
#
# return super(TSizeView, self).form_valid(form)
#
# def get_initial(self):
# initial = super(TSizeView, self).get_initial()
#
# if hasattr(self.request.user, 'freshman'):
# initial['tsize'] = self.request.user.freshman.tsize
#
# return initial
#
# class ResultView(TemplateView):
# template_name = 'result.html'
#
# def get_context_data(self, **kwargs):
# context = super(ResultView, self).get_context_data(**kwargs)
#
# context['clubs'] = Club.objects.all().annotate(cnt=Count('votes')).order_by('-cnt')
# context['bands'] = Club.objects.filter(is_band=True).annotate(cnt=Count('votes')).order_by('-cnt')
# context['non_bands'] = Club.objects.filter(is_band=False).annotate(cnt=Count('votes')).order_by('-cnt')
#
# context['cnt_voted'] = Freshman.objects.all().count()
#
# # Personal T shirts size
# freshmen = Freshman.objects.all()
#
# size_list = ""
# for freshman in freshmen:
# info = freshman.user.portal_info
# size_list += "<p>%s,%s,%s</p>" % (info.ku_kname, info.ku_std_no, freshman.tsize)
# context['size_list'] = size_list
#
# return context
, which may contain function names, class names, or code. Output only the next line. | url(r'^tshirt/$', TSizeView.as_view(), name='t_size'), |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.