Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|>
# check config
if not config:
config = DefaultConfig()
if config.__class__ is type:
err = 'Config must be an object, got class instead.'
raise x.BootstrapException(err)
# check flask params
flask_params = flask_params or dict()
flask_params['import_name'] = name
# configure static assets
if config.get('FLASK_STATIC_URL') is not None:
flask_params['static_url_path'] = config.get('FLASK_STATIC_URL')
if config.get('FLASK_STATIC_PATH') is not None:
flask_params['static_folder'] = config.get('FLASK_STATIC_PATH')
# create an app with default config
app = Flask(**flask_params)
app.config.from_object(DefaultConfig())
# apply custom config
if config:
app.config.from_object(config)
# enable csrf protection
CSRFProtect(app)
# register error handler
<|code_end|>
, generate the next line using the imports in this file:
import os
from os import path
from flask import Flask
from flask import g
from flask import request
from werkzeug.utils import import_string
from werkzeug.utils import ImportStringError
from jinja2 import ChoiceLoader, FileSystemLoader
from flask_wtf import CSRFProtect
from boiler.config import DefaultConfig
from boiler.timer import restart_timer
from boiler.errors import register_error_handler
from boiler.jinja import functions as jinja_functions
from boiler import exceptions as x
from boiler.feature.routing import routing_feature
from boiler.feature.mail import mail_feature
from boiler.feature.orm import orm_feature
from boiler.feature.logging import logging_feature
from boiler.feature.localization import localization_feature
and context (functions, classes, or occasionally code) from other files:
# Path: boiler/config.py
# class DefaultConfig(Config):
# """
# Default project configuration
# Sets up defaults used and/or overridden in environments and deployments
# """
# ENV = 'production'
#
# SERVER_NAME = None
#
# # secret key
# SECRET_KEY = os.getenv('APP_SECRET_KEY')
#
# TIME_RESTARTS = False
# TESTING = False
# DEBUG = False
# DEBUG_TB_ENABLED = False
# DEBUG_TB_PROFILER_ENABLED = False
# DEBUG_TB_INTERCEPT_REDIRECTS = False
#
# # where built-in server and url_for look for static files (None for default)
# FLASK_STATIC_URL = None
# FLASK_STATIC_PATH = None
#
# # asset helper settings (server must be capable of serving these files)
# ASSETS_VERSION = None
# ASSETS_PATH = None # None falls back to url_for('static')
#
# # do not expose our urls on 404s
# ERROR_404_HELP = False
#
# # uploads
# MAX_CONTENT_LENGTH = 1024 * 1024 * 16 # megabytes
#
# # database
# # 'mysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+pymysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+mysqlconnector://user:password@host:3306/database?charset=utf8mb4'
# SQLALCHEMY_ECHO = False
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# MIGRATIONS_PATH = os.path.join(os.getcwd(), 'migrations')
# SQLALCHEMY_DATABASE_URI = os.getenv('APP_DATABASE_URI')
# TEST_DB_PATH = os.path.join(
# os.getcwd(), 'var', 'data', 'test-db', 'sqlite.db'
# )
#
# # mail server settings
# MAIL_DEBUG = False
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USE_SSL = False
# MAIL_USERNAME = None
# MAIL_PASSWORD = None
# MAIL_DEFAULT_SENDER = ('Webapp Mailer', 'mygmail@gmail.com')
#
# # logging
# ADMINS = ['you@domain']
# LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS = False
#
# # localization (babel)
# DEFAULT_LOCALE = 'en_GB'
# DEFAULT_TIMEZONE = 'UTC'
#
# # csrf protection
# WTF_CSRF_ENABLED = True
#
# # recaptcha
# RECAPTCHA_PUBLIC_KEY = os.getenv('APP_RECAPTCHA_PUBLIC_KEY')
# RECAPTCHA_PRIVATE_KEY = os.getenv('APP_RECAPTCHA_PRIVATE_KEY')
#
# Path: boiler/timer/restart_timer.py
# def time_restarts(data_path):
#
# Path: boiler/errors.py
# def register_error_handler(app, handler=None):
# """
# Register error handler
# Registers an exception handler on the app instance for every type of
# exception code werkzeug is aware about.
#
# :param app: flask.Flask - flask application instance
# :param handler: function - the handler
# :return: None
# """
# if not handler:
# handler = default_error_handler
#
# for code in exceptions.default_exceptions.keys():
# app.register_error_handler(code, handler)
#
# Path: boiler/jinja/functions.py
# def asset(url=None):
# def dev_proxy():
#
# Path: boiler/exceptions.py
# class BoilerException(Exception):
# class BootstrapException(BoilerException, RuntimeError):
. Output only the next line. | register_error_handler(app) |
Next line prediction: <|code_start|> flask_params['import_name'] = name
# configure static assets
if config.get('FLASK_STATIC_URL') is not None:
flask_params['static_url_path'] = config.get('FLASK_STATIC_URL')
if config.get('FLASK_STATIC_PATH') is not None:
flask_params['static_folder'] = config.get('FLASK_STATIC_PATH')
# create an app with default config
app = Flask(**flask_params)
app.config.from_object(DefaultConfig())
# apply custom config
if config:
app.config.from_object(config)
# enable csrf protection
CSRFProtect(app)
# register error handler
register_error_handler(app)
# use kernel templates
kernel_templates_path = path.realpath(path.dirname(__file__)+'/templates')
fallback_loader = FileSystemLoader([kernel_templates_path])
custom_loader = ChoiceLoader([app.jinja_loader, fallback_loader])
app.jinja_loader = custom_loader
# register custom jinja functions
app.jinja_env.globals.update(dict(
<|code_end|>
. Use current file imports:
(import os
from os import path
from flask import Flask
from flask import g
from flask import request
from werkzeug.utils import import_string
from werkzeug.utils import ImportStringError
from jinja2 import ChoiceLoader, FileSystemLoader
from flask_wtf import CSRFProtect
from boiler.config import DefaultConfig
from boiler.timer import restart_timer
from boiler.errors import register_error_handler
from boiler.jinja import functions as jinja_functions
from boiler import exceptions as x
from boiler.feature.routing import routing_feature
from boiler.feature.mail import mail_feature
from boiler.feature.orm import orm_feature
from boiler.feature.logging import logging_feature
from boiler.feature.localization import localization_feature)
and context including class names, function names, or small code snippets from other files:
# Path: boiler/config.py
# class DefaultConfig(Config):
# """
# Default project configuration
# Sets up defaults used and/or overridden in environments and deployments
# """
# ENV = 'production'
#
# SERVER_NAME = None
#
# # secret key
# SECRET_KEY = os.getenv('APP_SECRET_KEY')
#
# TIME_RESTARTS = False
# TESTING = False
# DEBUG = False
# DEBUG_TB_ENABLED = False
# DEBUG_TB_PROFILER_ENABLED = False
# DEBUG_TB_INTERCEPT_REDIRECTS = False
#
# # where built-in server and url_for look for static files (None for default)
# FLASK_STATIC_URL = None
# FLASK_STATIC_PATH = None
#
# # asset helper settings (server must be capable of serving these files)
# ASSETS_VERSION = None
# ASSETS_PATH = None # None falls back to url_for('static')
#
# # do not expose our urls on 404s
# ERROR_404_HELP = False
#
# # uploads
# MAX_CONTENT_LENGTH = 1024 * 1024 * 16 # megabytes
#
# # database
# # 'mysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+pymysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+mysqlconnector://user:password@host:3306/database?charset=utf8mb4'
# SQLALCHEMY_ECHO = False
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# MIGRATIONS_PATH = os.path.join(os.getcwd(), 'migrations')
# SQLALCHEMY_DATABASE_URI = os.getenv('APP_DATABASE_URI')
# TEST_DB_PATH = os.path.join(
# os.getcwd(), 'var', 'data', 'test-db', 'sqlite.db'
# )
#
# # mail server settings
# MAIL_DEBUG = False
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USE_SSL = False
# MAIL_USERNAME = None
# MAIL_PASSWORD = None
# MAIL_DEFAULT_SENDER = ('Webapp Mailer', 'mygmail@gmail.com')
#
# # logging
# ADMINS = ['you@domain']
# LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS = False
#
# # localization (babel)
# DEFAULT_LOCALE = 'en_GB'
# DEFAULT_TIMEZONE = 'UTC'
#
# # csrf protection
# WTF_CSRF_ENABLED = True
#
# # recaptcha
# RECAPTCHA_PUBLIC_KEY = os.getenv('APP_RECAPTCHA_PUBLIC_KEY')
# RECAPTCHA_PRIVATE_KEY = os.getenv('APP_RECAPTCHA_PRIVATE_KEY')
#
# Path: boiler/timer/restart_timer.py
# def time_restarts(data_path):
#
# Path: boiler/errors.py
# def register_error_handler(app, handler=None):
# """
# Register error handler
# Registers an exception handler on the app instance for every type of
# exception code werkzeug is aware about.
#
# :param app: flask.Flask - flask application instance
# :param handler: function - the handler
# :return: None
# """
# if not handler:
# handler = default_error_handler
#
# for code in exceptions.default_exceptions.keys():
# app.register_error_handler(code, handler)
#
# Path: boiler/jinja/functions.py
# def asset(url=None):
# def dev_proxy():
#
# Path: boiler/exceptions.py
# class BoilerException(Exception):
# class BootstrapException(BoilerException, RuntimeError):
. Output only the next line. | asset=jinja_functions.asset, |
Based on the snippet: <|code_start|>
def get_config():
"""
Imports config based on environment.
:return:
"""
flask_config = os.getenv('FLASK_CONFIG')
if not flask_config:
err = 'Unable to bootstrap application FLASK_CONFIG is not defined'
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from os import path
from flask import Flask
from flask import g
from flask import request
from werkzeug.utils import import_string
from werkzeug.utils import ImportStringError
from jinja2 import ChoiceLoader, FileSystemLoader
from flask_wtf import CSRFProtect
from boiler.config import DefaultConfig
from boiler.timer import restart_timer
from boiler.errors import register_error_handler
from boiler.jinja import functions as jinja_functions
from boiler import exceptions as x
from boiler.feature.routing import routing_feature
from boiler.feature.mail import mail_feature
from boiler.feature.orm import orm_feature
from boiler.feature.logging import logging_feature
from boiler.feature.localization import localization_feature
and context (classes, functions, sometimes code) from other files:
# Path: boiler/config.py
# class DefaultConfig(Config):
# """
# Default project configuration
# Sets up defaults used and/or overridden in environments and deployments
# """
# ENV = 'production'
#
# SERVER_NAME = None
#
# # secret key
# SECRET_KEY = os.getenv('APP_SECRET_KEY')
#
# TIME_RESTARTS = False
# TESTING = False
# DEBUG = False
# DEBUG_TB_ENABLED = False
# DEBUG_TB_PROFILER_ENABLED = False
# DEBUG_TB_INTERCEPT_REDIRECTS = False
#
# # where built-in server and url_for look for static files (None for default)
# FLASK_STATIC_URL = None
# FLASK_STATIC_PATH = None
#
# # asset helper settings (server must be capable of serving these files)
# ASSETS_VERSION = None
# ASSETS_PATH = None # None falls back to url_for('static')
#
# # do not expose our urls on 404s
# ERROR_404_HELP = False
#
# # uploads
# MAX_CONTENT_LENGTH = 1024 * 1024 * 16 # megabytes
#
# # database
# # 'mysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+pymysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+mysqlconnector://user:password@host:3306/database?charset=utf8mb4'
# SQLALCHEMY_ECHO = False
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# MIGRATIONS_PATH = os.path.join(os.getcwd(), 'migrations')
# SQLALCHEMY_DATABASE_URI = os.getenv('APP_DATABASE_URI')
# TEST_DB_PATH = os.path.join(
# os.getcwd(), 'var', 'data', 'test-db', 'sqlite.db'
# )
#
# # mail server settings
# MAIL_DEBUG = False
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USE_SSL = False
# MAIL_USERNAME = None
# MAIL_PASSWORD = None
# MAIL_DEFAULT_SENDER = ('Webapp Mailer', 'mygmail@gmail.com')
#
# # logging
# ADMINS = ['you@domain']
# LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS = False
#
# # localization (babel)
# DEFAULT_LOCALE = 'en_GB'
# DEFAULT_TIMEZONE = 'UTC'
#
# # csrf protection
# WTF_CSRF_ENABLED = True
#
# # recaptcha
# RECAPTCHA_PUBLIC_KEY = os.getenv('APP_RECAPTCHA_PUBLIC_KEY')
# RECAPTCHA_PRIVATE_KEY = os.getenv('APP_RECAPTCHA_PRIVATE_KEY')
#
# Path: boiler/timer/restart_timer.py
# def time_restarts(data_path):
#
# Path: boiler/errors.py
# def register_error_handler(app, handler=None):
# """
# Register error handler
# Registers an exception handler on the app instance for every type of
# exception code werkzeug is aware about.
#
# :param app: flask.Flask - flask application instance
# :param handler: function - the handler
# :return: None
# """
# if not handler:
# handler = default_error_handler
#
# for code in exceptions.default_exceptions.keys():
# app.register_error_handler(code, handler)
#
# Path: boiler/jinja/functions.py
# def asset(url=None):
# def dev_proxy():
#
# Path: boiler/exceptions.py
# class BoilerException(Exception):
# class BootstrapException(BoilerException, RuntimeError):
. Output only the next line. | raise x.BootstrapException(err) |
Based on the snippet: <|code_start|>
"""
Create app for testing
This is not a real application, we only use it to run tests against.
Templates resolution
Default template location for flask apps is wherever the application module is
located. This is alright for regular applications because we bootstrap them from
their root, but for testing application our template root becomes
boiler/tests/templates. There is however a way to set up application root path
on the flask app.
In order for it to be able to find default boiler templates, we will need to set
templates directory, otherwise it will automagically resolve to this file's
parent dir.
On how flask resolves template path see 'template_folder' here:
@see http://flask.pocoo.org/docs/0.12/api/
"""
# set path to boiler templates (test app only)
flask_params = dict(template_folder='../../templates')
# create app
<|code_end|>
, predict the immediate next line with the help of imports:
from boiler import bootstrap
from boiler.config import TestingConfig
and context (classes, functions, sometimes code) from other files:
# Path: boiler/bootstrap.py
# def get_config():
# def get_app():
# def test_import_name(name):
# def create_app(name, config=None, flask_params=None):
# def detect_dev_proxy():
# def add_routing(app):
# def add_mail(app):
# def add_orm(app):
# def add_logging(app):
# def add_localization(app):
#
# Path: boiler/config.py
# class TestingConfig(Config):
# """ Default testing config """
# ENV = 'testing'
# TESTING = True
# MAIL_DEBUG = True
#
# # use sqlite in testing
# test_db = 'sqlite:///{}'.format(DefaultConfig.TEST_DB_PATH)
# SQLALCHEMY_DATABASE_URI = test_db
#
# # hash quickly in testing
# WTF_CSRF_ENABLED = False
# PASSLIB_ALGO = 'md5_crypt'
. Output only the next line. | app = bootstrap.create_app( |
Next line prediction: <|code_start|>
"""
Create app for testing
This is not a real application, we only use it to run tests against.
Templates resolution
Default template location for flask apps is wherever the application module is
located. This is alright for regular applications because we bootstrap them from
their root, but for testing application our template root becomes
boiler/tests/templates. There is however a way to set up application root path
on the flask app.
In order for it to be able to find default boiler templates, we will need to set
templates directory, otherwise it will automagically resolve to this file's
parent dir.
On how flask resolves template path see 'template_folder' here:
@see http://flask.pocoo.org/docs/0.12/api/
"""
# set path to boiler templates (test app only)
flask_params = dict(template_folder='../../templates')
# create app
app = bootstrap.create_app(
__name__,
<|code_end|>
. Use current file imports:
(from boiler import bootstrap
from boiler.config import TestingConfig)
and context including class names, function names, or small code snippets from other files:
# Path: boiler/bootstrap.py
# def get_config():
# def get_app():
# def test_import_name(name):
# def create_app(name, config=None, flask_params=None):
# def detect_dev_proxy():
# def add_routing(app):
# def add_mail(app):
# def add_orm(app):
# def add_logging(app):
# def add_localization(app):
#
# Path: boiler/config.py
# class TestingConfig(Config):
# """ Default testing config """
# ENV = 'testing'
# TESTING = True
# MAIL_DEBUG = True
#
# # use sqlite in testing
# test_db = 'sqlite:///{}'.format(DefaultConfig.TEST_DB_PATH)
# SQLALCHEMY_DATABASE_URI = test_db
#
# # hash quickly in testing
# WTF_CSRF_ENABLED = False
# PASSLIB_ALGO = 'md5_crypt'
. Output only the next line. | config=TestingConfig(), |
Based on the snippet: <|code_start|>
def create_fake_data(self, how_many=1):
""" Create a fake data set to test our collection """
fake = Factory.create()
items = []
for i in range(how_many):
user = User(
email=fake.email(),
password=fake.password()
)
db.session.add(user)
db.session.commit()
items.append(user)
return items
def serializer(self, obj):
"""
Serializer
To test serialization capabilities we'll use this simple serializer
"""
return obj.__repr__()
# ------------------------------------------------------------------------
# General
# ------------------------------------------------------------------------
def test_can_create_instance(self):
""" Can create an instance of collection """
serializer = self.serializer
<|code_end|>
, predict the immediate next line with the help of imports:
from unittest import mock
from nose.plugins.attrib import attr
from tests.base_testcase import BoilerTestCase
from faker import Factory
from boiler.collections import ApiCollection
from boiler.feature.orm import db
from tests.boiler_test_app.models import User
and context (classes, functions, sometimes code) from other files:
# Path: tests/base_testcase.py
# class BoilerTestCase(FlaskTestCase):
# """
# Boiler test case
# Every boiler test should extend from this base class as it sets up
# boiler-specific test app
# """
# def setUp(self):
# super().setUp(app)
#
# Path: boiler/collections/api_collection.py
# class ApiCollection(PaginatedCollection):
# """
# API Collection
# Works the same way as a paginated collection, but also applies
# serializer to each item. Useful in API responses.
# """
# def __init__(self, query, *_, serialize_function, **kwargs):
# self.serializer = serialize_function
# super().__init__(query, **kwargs)
#
# def __iter__(self):
# """ Performs generator-based iteration through page items """
# offset = 0
# while offset < len(self.items):
# item = self.items[offset]
# offset += 1
# yield self.serializer(item)
#
# def dict(self):
# """ Returns current collection as a dictionary """
# collection = super().dict()
# serialized_items = []
# for item in collection['items']:
# serialized_items.append(self.serializer(item))
#
# collection['items'] = serialized_items
# return collection
#
# def json(self):
# """ Returns a json representation of collection """
# return dumps(self.dict())
#
# Path: boiler/feature/orm.py
# def orm_feature(app):
#
# Path: tests/boiler_test_app/models.py
# class User(db.Model):
# """
# User model
# Represents a very basic user entity with the functionality to register,
# via email and password or an OAuth provider, login, recover password and
# be authorised and authenticated.
#
# Please not this object must only be instantiated from flask app context
# as it will try to pull config settings from current_app.config.
# """
#
# id = db.Column(db.Integer, primary_key=True, nullable=False)
# _email = db.Column('email', db.String(128), nullable=False, unique=True)
# _password = db.Column('password', db.String(256))
#
# # -------------------------------------------------------------------------
# # Public API
# # -------------------------------------------------------------------------
#
# def __init__(self, *args, **kwargs):
# """ Instantiate with optional keyword data to set """
# if 'id' in kwargs:
# del kwargs['id']
# super().__init__(*args, **kwargs)
#
# def __repr__(self):
# """ Printable representation of user """
# u = '<User id="{}" email="{}">'
# return u.format(self.id, self.email_secure)
#
# def generate_hash(self, length=30):
# """ Generate random string of given length """
# import random, string
# chars = string.ascii_letters + string.digits
# ran = random.SystemRandom().choice
# hash = ''.join(ran(chars) for i in range(length))
# return hash
#
# # -------------------------------------------------------------------------
# # Email
# # -------------------------------------------------------------------------
#
# @hybrid_property
# def email(self):
# """ Hybrid getter """
# return self._email
#
# @property
# def email_secure(self):
# """ Obfuscated email used for display """
# email = self._email
# if not email: return ''
# address, host = email.split('@')
# if len(address) <= 2: return ('*' * len(address)) + '@' + host
#
# import re
# host = '@' + host
# obfuscated = re.sub(r'[a-zA-z0-9]', '*', address[1:-1])
# return address[:1] + obfuscated + address[-1:] + host
#
# @email.setter
# def email(self, email):
# """ Set email and generate confirmation """
# if email == self.email:
# return
#
# email = email.lower()
# if self._email is None:
# self._email = email
# else:
# self.email_new = email
# self.require_email_confirmation()
#
# # -------------------------------------------------------------------------
# # Password
# # -------------------------------------------------------------------------
#
# @hybrid_property
# def password(self):
# """ Hybrid password getter """
# return self._password
#
# @password.setter
# def password(self, password):
# """ Encode a string and set as password """
# self._password = password
. Output only the next line. | collection = ApiCollection(User.query, serialize_function=serializer) |
Predict the next line after this snippet: <|code_start|>
@attr('kernel', 'collections', 'api_collection')
class ApiCollectionTests(BoilerTestCase):
"""
API collection tests
These test pretty much repeat what we did for paginated collection.
Once again these are integration tests and will require an actual database.
"""
def setUp(self):
super().setUp()
self.create_db()
def create_fake_data(self, how_many=1):
""" Create a fake data set to test our collection """
fake = Factory.create()
items = []
for i in range(how_many):
user = User(
email=fake.email(),
password=fake.password()
)
<|code_end|>
using the current file's imports:
from unittest import mock
from nose.plugins.attrib import attr
from tests.base_testcase import BoilerTestCase
from faker import Factory
from boiler.collections import ApiCollection
from boiler.feature.orm import db
from tests.boiler_test_app.models import User
and any relevant context from other files:
# Path: tests/base_testcase.py
# class BoilerTestCase(FlaskTestCase):
# """
# Boiler test case
# Every boiler test should extend from this base class as it sets up
# boiler-specific test app
# """
# def setUp(self):
# super().setUp(app)
#
# Path: boiler/collections/api_collection.py
# class ApiCollection(PaginatedCollection):
# """
# API Collection
# Works the same way as a paginated collection, but also applies
# serializer to each item. Useful in API responses.
# """
# def __init__(self, query, *_, serialize_function, **kwargs):
# self.serializer = serialize_function
# super().__init__(query, **kwargs)
#
# def __iter__(self):
# """ Performs generator-based iteration through page items """
# offset = 0
# while offset < len(self.items):
# item = self.items[offset]
# offset += 1
# yield self.serializer(item)
#
# def dict(self):
# """ Returns current collection as a dictionary """
# collection = super().dict()
# serialized_items = []
# for item in collection['items']:
# serialized_items.append(self.serializer(item))
#
# collection['items'] = serialized_items
# return collection
#
# def json(self):
# """ Returns a json representation of collection """
# return dumps(self.dict())
#
# Path: boiler/feature/orm.py
# def orm_feature(app):
#
# Path: tests/boiler_test_app/models.py
# class User(db.Model):
# """
# User model
# Represents a very basic user entity with the functionality to register,
# via email and password or an OAuth provider, login, recover password and
# be authorised and authenticated.
#
# Please not this object must only be instantiated from flask app context
# as it will try to pull config settings from current_app.config.
# """
#
# id = db.Column(db.Integer, primary_key=True, nullable=False)
# _email = db.Column('email', db.String(128), nullable=False, unique=True)
# _password = db.Column('password', db.String(256))
#
# # -------------------------------------------------------------------------
# # Public API
# # -------------------------------------------------------------------------
#
# def __init__(self, *args, **kwargs):
# """ Instantiate with optional keyword data to set """
# if 'id' in kwargs:
# del kwargs['id']
# super().__init__(*args, **kwargs)
#
# def __repr__(self):
# """ Printable representation of user """
# u = '<User id="{}" email="{}">'
# return u.format(self.id, self.email_secure)
#
# def generate_hash(self, length=30):
# """ Generate random string of given length """
# import random, string
# chars = string.ascii_letters + string.digits
# ran = random.SystemRandom().choice
# hash = ''.join(ran(chars) for i in range(length))
# return hash
#
# # -------------------------------------------------------------------------
# # Email
# # -------------------------------------------------------------------------
#
# @hybrid_property
# def email(self):
# """ Hybrid getter """
# return self._email
#
# @property
# def email_secure(self):
# """ Obfuscated email used for display """
# email = self._email
# if not email: return ''
# address, host = email.split('@')
# if len(address) <= 2: return ('*' * len(address)) + '@' + host
#
# import re
# host = '@' + host
# obfuscated = re.sub(r'[a-zA-z0-9]', '*', address[1:-1])
# return address[:1] + obfuscated + address[-1:] + host
#
# @email.setter
# def email(self, email):
# """ Set email and generate confirmation """
# if email == self.email:
# return
#
# email = email.lower()
# if self._email is None:
# self._email = email
# else:
# self.email_new = email
# self.require_email_confirmation()
#
# # -------------------------------------------------------------------------
# # Password
# # -------------------------------------------------------------------------
#
# @hybrid_property
# def password(self):
# """ Hybrid password getter """
# return self._password
#
# @password.setter
# def password(self, password):
# """ Encode a string and set as password """
# self._password = password
. Output only the next line. | db.session.add(user) |
Here is a snippet: <|code_start|>
@attr('kernel', 'collections', 'api_collection')
class ApiCollectionTests(BoilerTestCase):
"""
API collection tests
These test pretty much repeat what we did for paginated collection.
Once again these are integration tests and will require an actual database.
"""
def setUp(self):
super().setUp()
self.create_db()
def create_fake_data(self, how_many=1):
""" Create a fake data set to test our collection """
fake = Factory.create()
items = []
for i in range(how_many):
<|code_end|>
. Write the next line using the current file imports:
from unittest import mock
from nose.plugins.attrib import attr
from tests.base_testcase import BoilerTestCase
from faker import Factory
from boiler.collections import ApiCollection
from boiler.feature.orm import db
from tests.boiler_test_app.models import User
and context from other files:
# Path: tests/base_testcase.py
# class BoilerTestCase(FlaskTestCase):
# """
# Boiler test case
# Every boiler test should extend from this base class as it sets up
# boiler-specific test app
# """
# def setUp(self):
# super().setUp(app)
#
# Path: boiler/collections/api_collection.py
# class ApiCollection(PaginatedCollection):
# """
# API Collection
# Works the same way as a paginated collection, but also applies
# serializer to each item. Useful in API responses.
# """
# def __init__(self, query, *_, serialize_function, **kwargs):
# self.serializer = serialize_function
# super().__init__(query, **kwargs)
#
# def __iter__(self):
# """ Performs generator-based iteration through page items """
# offset = 0
# while offset < len(self.items):
# item = self.items[offset]
# offset += 1
# yield self.serializer(item)
#
# def dict(self):
# """ Returns current collection as a dictionary """
# collection = super().dict()
# serialized_items = []
# for item in collection['items']:
# serialized_items.append(self.serializer(item))
#
# collection['items'] = serialized_items
# return collection
#
# def json(self):
# """ Returns a json representation of collection """
# return dumps(self.dict())
#
# Path: boiler/feature/orm.py
# def orm_feature(app):
#
# Path: tests/boiler_test_app/models.py
# class User(db.Model):
# """
# User model
# Represents a very basic user entity with the functionality to register,
# via email and password or an OAuth provider, login, recover password and
# be authorised and authenticated.
#
# Please not this object must only be instantiated from flask app context
# as it will try to pull config settings from current_app.config.
# """
#
# id = db.Column(db.Integer, primary_key=True, nullable=False)
# _email = db.Column('email', db.String(128), nullable=False, unique=True)
# _password = db.Column('password', db.String(256))
#
# # -------------------------------------------------------------------------
# # Public API
# # -------------------------------------------------------------------------
#
# def __init__(self, *args, **kwargs):
# """ Instantiate with optional keyword data to set """
# if 'id' in kwargs:
# del kwargs['id']
# super().__init__(*args, **kwargs)
#
# def __repr__(self):
# """ Printable representation of user """
# u = '<User id="{}" email="{}">'
# return u.format(self.id, self.email_secure)
#
# def generate_hash(self, length=30):
# """ Generate random string of given length """
# import random, string
# chars = string.ascii_letters + string.digits
# ran = random.SystemRandom().choice
# hash = ''.join(ran(chars) for i in range(length))
# return hash
#
# # -------------------------------------------------------------------------
# # Email
# # -------------------------------------------------------------------------
#
# @hybrid_property
# def email(self):
# """ Hybrid getter """
# return self._email
#
# @property
# def email_secure(self):
# """ Obfuscated email used for display """
# email = self._email
# if not email: return ''
# address, host = email.split('@')
# if len(address) <= 2: return ('*' * len(address)) + '@' + host
#
# import re
# host = '@' + host
# obfuscated = re.sub(r'[a-zA-z0-9]', '*', address[1:-1])
# return address[:1] + obfuscated + address[-1:] + host
#
# @email.setter
# def email(self, email):
# """ Set email and generate confirmation """
# if email == self.email:
# return
#
# email = email.lower()
# if self._email is None:
# self._email = email
# else:
# self.email_new = email
# self.require_email_confirmation()
#
# # -------------------------------------------------------------------------
# # Password
# # -------------------------------------------------------------------------
#
# @hybrid_property
# def password(self):
# """ Hybrid password getter """
# return self._password
#
# @password.setter
# def password(self, password):
# """ Encode a string and set as password """
# self._password = password
, which may include functions, classes, or code. Output only the next line. | user = User( |
Continue the code snippet: <|code_start|>
@attr('kernel', 'collections', 'pagination')
class PaginationTest(BoilerTestCase):
def test_first_doesnt_go_below_zero(self):
""" First page doesn't go below zero"""
<|code_end|>
. Use current file imports:
from unittest import mock
from nose.plugins.attrib import attr
from tests.base_testcase import BoilerTestCase
from boiler.collections.pagination import paginate
from pprint import pprint as pp
and context (classes, functions, or code) from other files:
# Path: tests/base_testcase.py
# class BoilerTestCase(FlaskTestCase):
# """
# Boiler test case
# Every boiler test should extend from this base class as it sets up
# boiler-specific test app
# """
# def setUp(self):
# super().setUp(app)
#
# Path: boiler/collections/pagination.py
# def paginate(page, total_items, total_pages, slice_size=5):
# """
# Paginate
# Does some maths to generate ranged pagination. Returns a dictionary
# of page numbers to be used in url builders that allows to go to first
# page, previous page, next page, last page and one of the pages in
# range around current page with possibility to jump in slices. The
# result will look like this:
#
# {
# page: 2,
# total_pages: 100,
# total_items: 1000,
# pagination: {
# first: 1
# previous: 1,
# previous_slice: 1
# pages: [1, 2, 3, 4, 5, 6, 7 ... etc]
# next_slice: 14
# next: 3,
# last: 100
# }
#
# }
# :return: boiler.collections.paginated_collection.PaginatedCollection
# """
# if slice_size > total_pages:
# slice_size = total_pages
#
# # paginate (can be out of bounds for now)
# first = 1
# previous = page - 1
# next = page + 1
# last = total_pages
# previous_slice = page - slice_size
# next_slice = page + slice_size
#
# # assemble
# links = dict(
# first=None,
# previous=None,
# next=None,
# last=None
# )
#
# # previous/next
# if total_pages > 1:
# if page == 1:
# links['next'] = next
# links['last'] = last
# elif page == total_pages:
# links['first'] = first
# links['previous'] = previous
# else:
# links['first'] = first
# links['previous'] = previous
# links['next'] = next
# links['last'] = last
#
# # previous_slice
# links['previous_slice'] = previous_slice
# if page - slice_size <= 0:
# links['previous_slice'] = None
# if page != 1:
# links['previous_slice'] = first
#
# # next slice
# links['next_slice'] = next_slice
# if page + slice_size > total_pages:
# links['next_slice'] = None
# if page != total_pages and total_pages != 0:
# links['next_slice'] = last
#
# # slice pages
# delta = math.ceil(slice_size / 2)
# if page - delta > total_pages - slice_size:
# left_bound = total_pages - slice_size + 1
# right_bound = total_pages
# else:
# if page - delta < 0:
# delta = page
#
# offset = page - delta
# left_bound = offset + 1
# right_bound = offset + slice_size
#
# # append page range
# links['pages'] = list(range(left_bound, right_bound + 1))
#
# # discard slice navigation if no next/prev slice
# if links['pages']:
# if links['previous_slice'] == links['pages'][0]:
# links['previous_slice'] = None
# if links['next_slice'] == links['pages'][-1]:
# links['next_slice'] = None
#
# # and return
# pagination = dict(
# page=page,
# total_pages=total_pages,
# total_items=total_items,
# pagination=links
# )
#
# return pagination
. Output only the next line. | pagination = paginate( |
Using the snippet: <|code_start|>"""
A note on URLs: please define your URLS with a trailing slash (unless it has
an extension of course)! This way they will work both with and without trailing
slash. If it's missing - Flask will just add it.
"""
urls = dict()
<|code_end|>
, determine the next line of code. You have imports:
from boiler.routes.route import route
and context (class names, function names, or code) available:
# Path: boiler/routes/route.py
# def route(view, endpoint=None, methods=None, defaults=None, **options):
# """
# Route: a shorthand for route declaration
# Import and use it in your app.urls file by calling:
# url['/path/to/view'] = route('module.views.view', 'route_name')
# """
# if not endpoint:
# endpoint = view
# if not methods:
# methods = ['GET']
# return dict(
# view_func=LazyView(view),
# endpoint=endpoint,
# methods=methods,
# defaults=defaults,
# **options
# )
. Output only the next line. | urls['/'] = route('backend.views.home', 'home') |
Given the following code snippet before the placeholder: <|code_start|>
def routing_feature(app):
"""
Add routing feature
Allows to define application routes un urls.py file and use lazy views.
Additionally enables regular exceptions in route definitions
"""
# enable regex routes
<|code_end|>
, predict the next line using imports from the current file:
from werkzeug.utils import import_string
from boiler.routes.regex import RegexConverter
and context including class names, function names, and sometimes code from other files:
# Path: boiler/routes/regex.py
# class RegexConverter(BaseConverter):
# """
# Regex converter
# Allows to use regular expressions in flask urls definitions.
# An example of route definition: '/<regex("[abcABC0-9]{4,6}"):user>-<slug>/
# Will produce: user and slug variables.
# """
# def __init__(self, url_map, *items):
# super(RegexConverter, self).__init__(url_map)
# self.regex = items[0]
. Output only the next line. | app.url_map.converters['regex'] = RegexConverter |
Continue the code snippet: <|code_start|>
class PaginatedCollection:
"""
Paginated collection
Accepts an SQLAlchemy query object on initialization along with some
pagination settings and then allows you to iterate over itself in a
paginated manner: iterate over items in current page then call next_page()
to fetch next slice of data.
"""
def __init__(self, query, *_, page=1, per_page=10, pagination_range=5):
"""
Initialise collection
Creates an instance of collection. Requires an query object to
iterate through. Will issue 2 queries: one to count total items and
second to fetch actual items. Optionally generates a page range
to print range-like paginations of a given slice size.
:param query:
:param _: args, ignored
:param page: int, page to fetch
:param per_page: int, items per page
:param pagination_range: int, number of pages in pagination
"""
self._query = query
self.page = page
self.per_page = per_page
self.total_items = self._query.count()
self.total_pages = ceil(self.total_items / per_page)
# paginate
<|code_end|>
. Use current file imports:
from math import ceil
from boiler.collections.pagination import paginate
from pprint import pprint as pp
and context (classes, functions, or code) from other files:
# Path: boiler/collections/pagination.py
# def paginate(page, total_items, total_pages, slice_size=5):
# """
# Paginate
# Does some maths to generate ranged pagination. Returns a dictionary
# of page numbers to be used in url builders that allows to go to first
# page, previous page, next page, last page and one of the pages in
# range around current page with possibility to jump in slices. The
# result will look like this:
#
# {
# page: 2,
# total_pages: 100,
# total_items: 1000,
# pagination: {
# first: 1
# previous: 1,
# previous_slice: 1
# pages: [1, 2, 3, 4, 5, 6, 7 ... etc]
# next_slice: 14
# next: 3,
# last: 100
# }
#
# }
# :return: boiler.collections.paginated_collection.PaginatedCollection
# """
# if slice_size > total_pages:
# slice_size = total_pages
#
# # paginate (can be out of bounds for now)
# first = 1
# previous = page - 1
# next = page + 1
# last = total_pages
# previous_slice = page - slice_size
# next_slice = page + slice_size
#
# # assemble
# links = dict(
# first=None,
# previous=None,
# next=None,
# last=None
# )
#
# # previous/next
# if total_pages > 1:
# if page == 1:
# links['next'] = next
# links['last'] = last
# elif page == total_pages:
# links['first'] = first
# links['previous'] = previous
# else:
# links['first'] = first
# links['previous'] = previous
# links['next'] = next
# links['last'] = last
#
# # previous_slice
# links['previous_slice'] = previous_slice
# if page - slice_size <= 0:
# links['previous_slice'] = None
# if page != 1:
# links['previous_slice'] = first
#
# # next slice
# links['next_slice'] = next_slice
# if page + slice_size > total_pages:
# links['next_slice'] = None
# if page != total_pages and total_pages != 0:
# links['next_slice'] = last
#
# # slice pages
# delta = math.ceil(slice_size / 2)
# if page - delta > total_pages - slice_size:
# left_bound = total_pages - slice_size + 1
# right_bound = total_pages
# else:
# if page - delta < 0:
# delta = page
#
# offset = page - delta
# left_bound = offset + 1
# right_bound = offset + slice_size
#
# # append page range
# links['pages'] = list(range(left_bound, right_bound + 1))
#
# # discard slice navigation if no next/prev slice
# if links['pages']:
# if links['previous_slice'] == links['pages'][0]:
# links['previous_slice'] = None
# if links['next_slice'] == links['pages'][-1]:
# links['next_slice'] = None
#
# # and return
# pagination = dict(
# page=page,
# total_pages=total_pages,
# total_items=total_items,
# pagination=links
# )
#
# return pagination
. Output only the next line. | self.pagination = paginate( |
Here is a snippet: <|code_start|>
def create_impulse_component(model, glm, latent):
typ = model['impulse']['type'].lower()
if typ.lower() == 'basis':
return LinearBasisImpulses(model)
elif typ.lower() == 'normalized':
return NormalizedBasisImpulses(model)
elif typ.lower() == 'dirichlet':
return DirichletImpulses(model)
elif typ.lower() == 'exponential':
return ExponentialImpulses(model)
<|code_end|>
. Write the next line using the current file imports:
import theano
import theano.tensor as T
from pyglm.utils.basis import *
from pyglm.components.component import Component
from pyglm.components.priors import create_prior
from theano.tensor.signal.conv import conv2d
and context from other files:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
#
# Path: pyglm/components/priors.py
# def create_prior(model, **kwargs):
# typ = model['type'].lower()
# if typ == 'normal' or \
# typ == 'gaussian':
# return Gaussian(model, **kwargs)
# elif typ == 'categorical':
# return Categorical(model, **kwargs)
# elif typ == 'jointcategorical' or \
# typ == 'joint_categorical':
# return JointCategorical(model, **kwargs)
# elif typ == 'spherical_gaussian':
# return SphericalGaussian(model, **kwargs)
# elif typ == 'group_lasso' or \
# typ == 'grouplasso':
# return GroupLasso(model, **kwargs)
# elif typ == 'dpp':
# return DeterminenalPointProcess(model, **kwargs)
# elif typ == 'dirichlet':
# return Dirichlet(model)
# else:
# raise Exception("Unrecognized prior type: %s" % typ)
, which may include functions, classes, or code. Output only the next line. | class LinearBasisImpulses(Component): |
Continue the code snippet: <|code_start|>
def create_impulse_component(model, glm, latent):
typ = model['impulse']['type'].lower()
if typ.lower() == 'basis':
return LinearBasisImpulses(model)
elif typ.lower() == 'normalized':
return NormalizedBasisImpulses(model)
elif typ.lower() == 'dirichlet':
return DirichletImpulses(model)
elif typ.lower() == 'exponential':
return ExponentialImpulses(model)
class LinearBasisImpulses(Component):
""" Linear impulse response functions. Here we make use of Theano's
tensordot to sum up the currents from each presynaptic neuron.
"""
def __init__(self, model):
self.model = model
self.imp_model = model['impulse']
<|code_end|>
. Use current file imports:
import theano
import theano.tensor as T
from pyglm.utils.basis import *
from pyglm.components.component import Component
from pyglm.components.priors import create_prior
from theano.tensor.signal.conv import conv2d
and context (classes, functions, or code) from other files:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
#
# Path: pyglm/components/priors.py
# def create_prior(model, **kwargs):
# typ = model['type'].lower()
# if typ == 'normal' or \
# typ == 'gaussian':
# return Gaussian(model, **kwargs)
# elif typ == 'categorical':
# return Categorical(model, **kwargs)
# elif typ == 'jointcategorical' or \
# typ == 'joint_categorical':
# return JointCategorical(model, **kwargs)
# elif typ == 'spherical_gaussian':
# return SphericalGaussian(model, **kwargs)
# elif typ == 'group_lasso' or \
# typ == 'grouplasso':
# return GroupLasso(model, **kwargs)
# elif typ == 'dpp':
# return DeterminenalPointProcess(model, **kwargs)
# elif typ == 'dirichlet':
# return Dirichlet(model)
# else:
# raise Exception("Unrecognized prior type: %s" % typ)
. Output only the next line. | self.prior = create_prior(self.imp_model['prior']) |
Here is a snippet: <|code_start|>
return data
def load_imports_on_client(dview):
""" Import required modules on client
"""
dview.execute('from population import Population')
dview.execute('popn = None')
def initialize_client(model, data):
""" Initialize a population objsect on the client
"""
# Initialize a model with N neurons
print "Initializing GLM"
global popn
popn = Population(model)
# Initialize the GLM with the data
popn.set_data(data)
def run_parallel_map():
""" Run a test with synthetic data and MCMC inference
"""
# Parse command line args
(options, args) = parse_cmd_line_args()
# Load the data
data = load_data(options)
# Get a model for the data
model_type = 'standard_glm'
<|code_end|>
. Write the next line using the current file imports:
import cPickle
from IPython.parallel import Client
from synth_harness import parse_cmd_line_args
from pyglm.models.model_factory import make_model
from population import Population
and context from other files:
# Path: pyglm/models/model_factory.py
# def make_model(template, N=None, dt=None):
# """ Construct a model from a template and update the specified parameters
# """
# if isinstance(template, str):
# # Create the specified model
# if template.lower() == 'standard_glm' or \
# template.lower() == 'standardglm':
# model = copy.deepcopy(StandardGlm)
# elif template.lower() == 'spatiotemporal_glm':
# model = copy.deepcopy(SpatiotemporalGlm)
# elif template.lower() == 'shared_tuning_curve':
# model = copy.deepcopy(SharedTuningCurveGlm)
# elif template.lower() == 'simple_weighted_model' or \
# template.lower() == 'simpleweightedmodel':
# model = copy.deepcopy(SimpleWeightedModel)
# elif template.lower() == 'simple_sparse_model' or \
# template.lower() == 'simplesparsemodel':
# model = copy.deepcopy(SimpleSparseModel)
# elif template.lower() == 'sparse_weighted_model' or \
# template.lower() == 'sparseweightedmodel':
# model = copy.deepcopy(SparseWeightedModel)
# elif template.lower() == 'sbm_weighted_model' or \
# template.lower() == 'sbmweightedmodel':
# model = copy.deepcopy(SbmWeightedModel)
# elif template.lower() == 'distance_weighted_model' or \
# template.lower() == 'distanceweightedmodel':
# model = copy.deepcopy(DistanceWeightedModel)
# else:
# raise Exception("Unrecognized template model: %s!" % template)
# elif isinstance(template, dict):
# model = copy.deepcopy(template)
# else:
# raise Exception("Unrecognized template model!")
#
# # Override template model parameters
# if N is not None:
# model['N'] = N
#
# if dt is not None:
# model['dt'] = dt
#
# # # Update other parameters as necessary
# # if template.lower() == 'distance_weighted_model' or \
# # template.lower() == 'distanceweightedmodel':
# # #model['network']['graph']['location_prior']['sigma'] = N/2.0/3.0
# # model['network']['graph']['location_prior']['mu'] = \
# # np.tile(np.arange(N).reshape((N,1)),
# # [1,model['network']['graph']['N_dims']]).ravel()
#
# return model
, which may include functions, classes, or code. Output only the next line. | model = make_model(model_type, N=data['N']) |
Given snippet: <|code_start|>
def create_bias_component(model, glm, latent):
type = model['bias']['type'].lower()
if type == 'constant':
bias = ConstantBias(model)
else:
raise Exception("Unrecognized bias model: %s" % type)
return bias
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import theano.tensor as T
from pyglm.components.component import Component
and context:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
which might include code, classes, or functions. Output only the next line. | class ConstantBias(Component): |
Next line prediction: <|code_start|> raise Exception("Unrecognized weight model: %s" % type)
return weight
class ConstantWeightModel(Component):
def __init__(self, model):
""" Initialize the filtered stim model
"""
self.model = model
N = model['N']
prms = model['network']['weight']
self.value = prms['value']
# Define weight matrix
self.W = self.value * T.ones((N,N))
# Define log probability
self.log_p = T.constant(0.0)
def get_state(self):
return {'W': self.W}
class GaussianWeightModel(Component):
def __init__(self, model):
""" Initialize the filtered stim model
"""
self.model = model
N = model['N']
prms = model['network']['weight']
<|code_end|>
. Use current file imports:
(import numpy as np
import theano.tensor as T
from component import Component
from pyglm.components.priors import create_prior)
and context including class names, function names, or small code snippets from other files:
# Path: pyglm/components/priors.py
# def create_prior(model, **kwargs):
# typ = model['type'].lower()
# if typ == 'normal' or \
# typ == 'gaussian':
# return Gaussian(model, **kwargs)
# elif typ == 'categorical':
# return Categorical(model, **kwargs)
# elif typ == 'jointcategorical' or \
# typ == 'joint_categorical':
# return JointCategorical(model, **kwargs)
# elif typ == 'spherical_gaussian':
# return SphericalGaussian(model, **kwargs)
# elif typ == 'group_lasso' or \
# typ == 'grouplasso':
# return GroupLasso(model, **kwargs)
# elif typ == 'dpp':
# return DeterminenalPointProcess(model, **kwargs)
# elif typ == 'dirichlet':
# return Dirichlet(model)
# else:
# raise Exception("Unrecognized prior type: %s" % typ)
. Output only the next line. | self.prior = create_prior(prms['prior']) |
Here is a snippet: <|code_start|> Check that the model satisfies whatever criteria are appropriate
for this model.
"""
self.population = population
def plot(self, sample, ax=None):
"""
Plot the sample or sequence of samples
"""
pass
class NetworkPlotProvider(PlotProvider):
"""
Class to plot the connectivity network
"""
def __init__(self, population):
super(NetworkPlotProvider, self).__init__(population)
# TODO: Check that the model has a network?
# All models should have a network
def plot(self, xs, ax=None, title=None, vmin=None, vmax=None, cmap=rwb_cmap):
# Ensure sample is a list
if not isinstance(xs, list):
xs = [xs]
# Get the weight matrix and adjacency matrix
wvars = self.population.network.weights.get_variables()
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import matplotlib.pyplot as plt
from hips.plotting.colormaps import gradient_cmap
from pyglm.utils.theano_func_wrapper import seval
and context from other files:
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
, which may include functions, classes, or code. Output only the next line. | Ws = np.array([seval(self.population.network.weights.W, |
Next line prediction: <|code_start|> ratios = np.zeros(B-1)
# Sample the intermediate distributions
for (n,beta) in zip(range(1,B), betas[1:]):
# print "M: %d\tBeta: %.3f" % (m,beta)
# Set the likelihood scale (beta) in the graph model
_set_beta(dview, beta)
# Take many samples to mix over this beta
for s in range(steps_per_B):
sys.stdout.write("M: %d\tBeta: %.3f\tSample: %d \r" % (m,beta,s))
sys.stdout.flush()
# Go through variables, sampling one at a time, in parallel where possible
for i in range(n_parallel_updates):
xs = dview.map_async(_parallel_update,
[i]*N,
[x]*N,
range(N))
# wait_watching_stdout(xs, interval=interval)
concatenate_parallel_updates(xs.get(), x)
# Sample serial updates
for i in range(n_serial_updates):
x = master.apply(_serial_update, i, x).get()
# Compute the ratio of this sample under this distribution and the previous distribution
<|code_end|>
. Use current file imports:
(import sys
import numpy as np
import os
import cPickle
from scipy.misc import logsumexp
from IPython.parallel.util import interactive
from pyglm.utils.parallel_util import parallel_compute_log_p)
and context including class names, function names, or small code snippets from other files:
# Path: pyglm/utils/parallel_util.py
# def parallel_compute_log_p(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing lp for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_p,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
. Output only the next line. | curr_lkhd = parallel_compute_log_p(dview, master, x, N) |
Given the following code snippet before the placeholder: <|code_start|> """
(T,D) = stim.shape
(Tb,Db) = basis.shape
# assert D==Db, "Spatial dimension of basis must match spatial dimension of stimulus."
# import scipy.signal as sig
# First, by convention, the impulse responses are apply to times
# (t-R:t-1). That means we need to prepend a row of zeros to make
# sure the basis remains causal
basis = np.vstack((np.zeros((1,Db)),basis))
# Flip the spatial dimension for convolution
# We are convolving the stimulus with the filter, so the temporal part does
# NOT need to be flipped
basis = basis[:,::-1]
# Compute convolution using FFT
if D==Db and shape[1] == 'valid':
raise Warning("Use low rank convolution when D==Db!")
# Look for fft_stim in _fft_cache
fft_stim = None
for (cache_stim, cache_fft_stim) in _fft_cache:
if np.allclose(stim[-128:],cache_stim[-128:]) and \
np.allclose(stim[:128],cache_stim[:128]):
fft_stim = cache_fft_stim
break
if not fft_stim is None:
<|code_end|>
, predict the next line using imports from the current file:
import os
import numpy as np
import scipy
import scipy.signal as sig
import scipy.signal as sig
import pdb; pdb.set_trace()
from pyglm.utils import fftconv
and context including class names, function names, and sometimes code from other files:
# Path: pyglm/utils/fftconv.py
# def fftconvolve(in1, in2, mode="full", fft_in1=None, fft_in2=None):
# def _centered(arr, newsize):
. Output only the next line. | fstim,_ = fftconv.fftconvolve(stim, basis, 'full', |
Here is a snippet: <|code_start|> def _create_samplers():
global serial_updates
global parallel_updates
serial_updates, parallel_updates = initialize_updates(popn)
# Return the number of parallel_updates
return len(serial_updates), len(parallel_updates)
n_serial_updates, n_parallel_updates = dview.apply(_create_samplers).get()[0]
# Create map-able functions to sample in parallel
@interactive
def _parallel_update(i, x, n):
return parallel_updates[i].update(x, n)
@interactive
def _serial_update(i, x):
return serial_updates[i].update(x)
## DEBUG Profile the Gibbs sampling loop
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
## END DEBUG
# Alternate fitting the network and fitting the GLMs
lp_smpls = np.zeros(N_samples+1)
lp_smpls[0] = parallel_compute_log_p(dview, master, x0, N)
ll_smpls = np.zeros(N_samples+1)
<|code_end|>
. Write the next line using the current file imports:
import copy
import numpy as np
import os
import cPickle
import time
from IPython.parallel.util import interactive
from pyglm.utils.parallel_util import parallel_compute_ll, \
parallel_compute_log_p, \
parallel_compute_log_prior
and context from other files:
# Path: pyglm/utils/parallel_util.py
# def parallel_compute_ll(dview,
# v,
# N):
# """ Compute the log prob in parallel
# """
# ll_tot = 0
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_ll(n, vs):
# print "Computing log lkhd for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.ll,
# syms,
# nvars)
# return lp
#
# ll_glms = dview.map_async(_compute_glm_ll,
# range(N),
# [v]*N)
#
# ll_tot += sum(ll_glms.get())
# return ll_tot
#
# def parallel_compute_log_p(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing lp for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_p,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
#
# def parallel_compute_log_prior(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing log prior for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_prior,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
, which may include functions, classes, or code. Output only the next line. | ll_smpls[0] = parallel_compute_ll(dview, x0, N) |
Given the code snippet: <|code_start|>
# Create parallel samplers
@interactive
def _create_samplers():
global serial_updates
global parallel_updates
serial_updates, parallel_updates = initialize_updates(popn)
# Return the number of parallel_updates
return len(serial_updates), len(parallel_updates)
n_serial_updates, n_parallel_updates = dview.apply(_create_samplers).get()[0]
# Create map-able functions to sample in parallel
@interactive
def _parallel_update(i, x, n):
return parallel_updates[i].update(x, n)
@interactive
def _serial_update(i, x):
return serial_updates[i].update(x)
## DEBUG Profile the Gibbs sampling loop
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
## END DEBUG
# Alternate fitting the network and fitting the GLMs
lp_smpls = np.zeros(N_samples+1)
<|code_end|>
, generate the next line using the imports in this file:
import copy
import numpy as np
import os
import cPickle
import time
from IPython.parallel.util import interactive
from pyglm.utils.parallel_util import parallel_compute_ll, \
parallel_compute_log_p, \
parallel_compute_log_prior
and context (functions, classes, or occasionally code) from other files:
# Path: pyglm/utils/parallel_util.py
# def parallel_compute_ll(dview,
# v,
# N):
# """ Compute the log prob in parallel
# """
# ll_tot = 0
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_ll(n, vs):
# print "Computing log lkhd for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.ll,
# syms,
# nvars)
# return lp
#
# ll_glms = dview.map_async(_compute_glm_ll,
# range(N),
# [v]*N)
#
# ll_tot += sum(ll_glms.get())
# return ll_tot
#
# def parallel_compute_log_p(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing lp for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_p,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
#
# def parallel_compute_log_prior(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing log prior for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_prior,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
. Output only the next line. | lp_smpls[0] = parallel_compute_log_p(dview, master, x0, N) |
Predict the next line for this snippet: <|code_start|> global parallel_updates
serial_updates, parallel_updates = initialize_updates(popn)
# Return the number of parallel_updates
return len(serial_updates), len(parallel_updates)
n_serial_updates, n_parallel_updates = dview.apply(_create_samplers).get()[0]
# Create map-able functions to sample in parallel
@interactive
def _parallel_update(i, x, n):
return parallel_updates[i].update(x, n)
@interactive
def _serial_update(i, x):
return serial_updates[i].update(x)
## DEBUG Profile the Gibbs sampling loop
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
## END DEBUG
# Alternate fitting the network and fitting the GLMs
lp_smpls = np.zeros(N_samples+1)
lp_smpls[0] = parallel_compute_log_p(dview, master, x0, N)
ll_smpls = np.zeros(N_samples+1)
ll_smpls[0] = parallel_compute_ll(dview, x0, N)
<|code_end|>
with the help of current file imports:
import copy
import numpy as np
import os
import cPickle
import time
from IPython.parallel.util import interactive
from pyglm.utils.parallel_util import parallel_compute_ll, \
parallel_compute_log_p, \
parallel_compute_log_prior
and context from other files:
# Path: pyglm/utils/parallel_util.py
# def parallel_compute_ll(dview,
# v,
# N):
# """ Compute the log prob in parallel
# """
# ll_tot = 0
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_ll(n, vs):
# print "Computing log lkhd for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.ll,
# syms,
# nvars)
# return lp
#
# ll_glms = dview.map_async(_compute_glm_ll,
# range(N),
# [v]*N)
#
# ll_tot += sum(ll_glms.get())
# return ll_tot
#
# def parallel_compute_log_p(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing lp for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_p,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
#
# def parallel_compute_log_prior(dview,
# master,
# v,
# N):
# """ Compute the log prob in parallel
# """
#
# # Compute the log probabaility of global variables
# # (e.g. the network) on the first node
# lp_tot = 0
#
# @interactive
# def _compute_network_lp(vs):
# print "Computing log prob for network"
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs,0)
# lp = seval(popn.network.log_p,
# syms,
# nvars)
# #lp = popn.network.log_p.eval(dict(zip(_flatten(tmpsyms),
# # _flatten(tmpnvars))),
# #on_unused_input='ignore')
# return lp
#
# lp_tot += master.apply_sync(_compute_network_lp, v)
#
# # Decorate with @interactive to ensure that the function runs
# # in the __main__ namespace that contains 'popn'
# @interactive
# def _compute_glm_lp(n, vs):
# print "Computing log prior for GLM %d" % n
# syms = popn.get_variables()
# nvars = popn.extract_vars(vs, n)
# lp = seval(popn.glm.log_prior,
# syms,
# nvars)
# return lp
#
# lp_glms = dview.map_async(_compute_glm_lp,
# range(N),
# [v]*N)
# # print lp_glms.get()
# # lp_glms.display_outputs()
#
# lp_tot += sum(lp_glms.get())
# return lp_tot
, which may contain function names, class names, or code. Output only the next line. | lprior = parallel_compute_log_prior(dview, master, x0, N) |
Here is a snippet: <|code_start|># Run as script using 'python -m test.synth'
def geweke_test(population,
data,
N_samples=1000):
"""
Sample the posterior distribution over parameters using MCMC.
"""
N = population.model['N']
# Draw initial state from prior
x0 = population.sample()
# Create updates for this population
<|code_end|>
. Write the next line using the current file imports:
import cPickle
import os
import matplotlib.pyplot as plt
import cProfile, pstats, StringIO
import time
import matplotlib.mlab as mlab
from pyglm.models.model_factory import *
from pyglm.inference.gibbs import initialize_updates
from population import Population
from optparse import OptionParser
from scipy.stats import norm
from scipy.stats import gamma
and context from other files:
# Path: pyglm/inference/gibbs.py
# def initialize_updates(population):
# """ Compute the set of updates required for the given population.
# TODO: Figure out how to do this in a really principled way.
# """
# serial_updates = []
# parallel_updates = []
#
# print "Initializing latent variable samplers"
#
# print "Ignoring shared tuning curve update"
# tc_sampler = SharedTuningCurveUpdate()
# tc_sampler.preprocess(population)
# serial_updates.append(tc_sampler)
#
#
# loc_sampler = LatentLocationUpdate()
# loc_sampler.preprocess(population)
# serial_updates.append(loc_sampler)
#
# type_sampler = LatentTypeUpdate()
# type_sampler.preprocess(population)
# serial_updates.append(type_sampler)
#
# # All populations have a parallel GLM sampler
# print "Initializing GLM samplers"
# # glm_sampler = HmcGlmUpdate()
# # glm_sampler.preprocess(population)
# # parallel_updates.append(glm_sampler)
#
# bias_sampler = HmcBiasUpdate()
# bias_sampler.preprocess(population)
# parallel_updates.append(bias_sampler)
#
# bkgd_sampler = HmcBkgdUpdate()
# bkgd_sampler.preprocess(population)
# parallel_updates.append(bkgd_sampler)
#
# from pyglm.components.impulse import DirichletImpulses
# if isinstance(population.glm.imp_model, DirichletImpulses):
# imp_sampler = HmcDirichletImpulseUpdate()
# else:
# imp_sampler = HmcImpulseUpdate()
# imp_sampler.preprocess(population)
# parallel_updates.append(imp_sampler)
#
# # All populations have a network sampler
# print "Initializing network sampler"
# # net_sampler = GibbsNetworkColumnUpdate()
# net_sampler = CollapsedGibbsNetworkColumnUpdate()
# net_sampler.preprocess(population)
# parallel_updates.append(net_sampler)
#
# # If the graph model is a latent distance model, add its update
# # from components.graph import LatentDistanceGraphModel
# # if isinstance(population.network.graph, LatentDistanceGraphModel):
# # print "Initializing latent location sampler"
# # loc_sampler = LatentLocationUpdate()
# # loc_sampler.preprocess(population)
# # serial_updates.append(loc_sampler)
#
# return serial_updates, parallel_updates
, which may include functions, classes, or code. Output only the next line. | serial_updates, parallel_updates = initialize_updates(population) |
Predict the next line after this snippet: <|code_start|>
class Population:
"""
Population connected GLMs.
"""
def __init__(self, model):
"""
Initialize the population of GLMs connected by a network.
"""
self.model = model
self.N = model['N']
# Initialize a list of data sequences
self.data_sequences = []
# Initialize latent variables of the population
<|code_end|>
using the current file's imports:
import numpy as np
from pyglm.components.latent import LatentVariables
from pyglm.components.network import Network
from glm import Glm
from pyglm.utils.theano_func_wrapper import seval
and any relevant context from other files:
# Path: pyglm/components/latent.py
# class LatentVariables(Component):
# """
# Container for a set of latent variables, e.g. neuron types/locations
# """
# def __init__(self, model):
# """
# Go through the items in the model, each of which specifies a latent variable component
# """
# self.model = model
# if 'latent' in model.keys():
# self.latent_model = model['latent']
# else:
# self.latent_model = {}
#
# self.log_p = T.constant(0.0)
#
# # Enumerate and create latent variable component
# self.latentlist = []
# self.latentdict = {}
#
# for (k,v) in self.latent_model.items():
# # Create the latent component
# latent_component = create_latent_component(model, v)
# self.log_p += latent_component.log_p
#
# # Add to the list of latent variable components
# self.latentlist.append(latent_component)
# self.latentdict[latent_component.name] = latent_component
#
# def get_variables(self):
# v = {}
# for (name, comp) in self.latentdict.items():
# v[name] = comp.get_variables()
#
# return v
#
# def get_state(self):
# st = {}
# for (name, comp) in self.latentdict.items():
# st[name] = comp.get_state()
#
# return st
#
# def sample(self, acc):
# s = {}
# for (name, comp) in self.latentdict.items():
# s[name] = comp.sample(acc)
# return s
#
# def set_data(self, data):
# for comp in self.latentlist:
# comp.set_data(data)
#
# # Allow consumers to access this container as a dict
# def __getitem__(self, item):
# return self.latentdict[item]
#
# Path: pyglm/components/network.py
# class Network(Component):
# """ The network component encapsulates the impulse responses, weights,
# and adjacency matrix of the spiking interactions.
# """
#
# def __init__(self, model, latent):
# """ Initialize the filtered stim model
# """
# self.model = model
# self.latent = latent
# self.prms = model['network']
#
# # Keep track of the number of variables
# self.n_vars = 0
#
# # Create the graph model for the adjacency matrix
# self.graph = create_graph_component(model, latent)
#
# # Create the weight model for the weight matrix
# self.weights = create_weight_component(model, latent)
#
# # Compute log probability
# self.log_p = self.graph.log_p + self.weights.log_p
#
# def get_variables(self):
# """ Get the theano variables associated with this model.
# """
# return {'graph' : self.graph.get_variables(),
# 'weights' : self.weights.get_variables()}
#
# def get_state(self):
# """ Get the state of the graph and weights
# """
# state = {'graph' : self.graph.get_state(),
# 'weights' : self.weights.get_state()}
# return state
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# self.graph.set_data(data)
# self.weights.set_data(data)
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {'graph' : self.graph.sample(acc),
# 'weights' : self.weights.sample(acc)}
#
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
. Output only the next line. | self.latent = LatentVariables(model) |
Here is a snippet: <|code_start|>
class Population:
"""
Population connected GLMs.
"""
def __init__(self, model):
"""
Initialize the population of GLMs connected by a network.
"""
self.model = model
self.N = model['N']
# Initialize a list of data sequences
self.data_sequences = []
# Initialize latent variables of the population
self.latent = LatentVariables(model)
# Create a network model to connect the GLMs
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
from pyglm.components.latent import LatentVariables
from pyglm.components.network import Network
from glm import Glm
from pyglm.utils.theano_func_wrapper import seval
and context from other files:
# Path: pyglm/components/latent.py
# class LatentVariables(Component):
# """
# Container for a set of latent variables, e.g. neuron types/locations
# """
# def __init__(self, model):
# """
# Go through the items in the model, each of which specifies a latent variable component
# """
# self.model = model
# if 'latent' in model.keys():
# self.latent_model = model['latent']
# else:
# self.latent_model = {}
#
# self.log_p = T.constant(0.0)
#
# # Enumerate and create latent variable component
# self.latentlist = []
# self.latentdict = {}
#
# for (k,v) in self.latent_model.items():
# # Create the latent component
# latent_component = create_latent_component(model, v)
# self.log_p += latent_component.log_p
#
# # Add to the list of latent variable components
# self.latentlist.append(latent_component)
# self.latentdict[latent_component.name] = latent_component
#
# def get_variables(self):
# v = {}
# for (name, comp) in self.latentdict.items():
# v[name] = comp.get_variables()
#
# return v
#
# def get_state(self):
# st = {}
# for (name, comp) in self.latentdict.items():
# st[name] = comp.get_state()
#
# return st
#
# def sample(self, acc):
# s = {}
# for (name, comp) in self.latentdict.items():
# s[name] = comp.sample(acc)
# return s
#
# def set_data(self, data):
# for comp in self.latentlist:
# comp.set_data(data)
#
# # Allow consumers to access this container as a dict
# def __getitem__(self, item):
# return self.latentdict[item]
#
# Path: pyglm/components/network.py
# class Network(Component):
# """ The network component encapsulates the impulse responses, weights,
# and adjacency matrix of the spiking interactions.
# """
#
# def __init__(self, model, latent):
# """ Initialize the filtered stim model
# """
# self.model = model
# self.latent = latent
# self.prms = model['network']
#
# # Keep track of the number of variables
# self.n_vars = 0
#
# # Create the graph model for the adjacency matrix
# self.graph = create_graph_component(model, latent)
#
# # Create the weight model for the weight matrix
# self.weights = create_weight_component(model, latent)
#
# # Compute log probability
# self.log_p = self.graph.log_p + self.weights.log_p
#
# def get_variables(self):
# """ Get the theano variables associated with this model.
# """
# return {'graph' : self.graph.get_variables(),
# 'weights' : self.weights.get_variables()}
#
# def get_state(self):
# """ Get the state of the graph and weights
# """
# state = {'graph' : self.graph.get_state(),
# 'weights' : self.weights.get_state()}
# return state
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# self.graph.set_data(data)
# self.weights.set_data(data)
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {'graph' : self.graph.sample(acc),
# 'weights' : self.weights.sample(acc)}
#
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
, which may include functions, classes, or code. Output only the next line. | self.network = Network(model, self.latent) |
Continue the code snippet: <|code_start|>"""
Weight models for the Network GLM
"""
def create_graph_component(model, latent):
type = model['network']['graph']['type'].lower()
if type == 'complete':
graph = CompleteGraphModel(model)
elif type == 'erdos_renyi' or \
type == 'erdosrenyi':
graph = ErdosRenyiGraphModel(model)
elif type == 'sbm':
graph = StochasticBlockGraphModel(model, latent)
elif type == 'distance':
graph = LatentDistanceGraphModel(model, latent)
else:
raise Exception("Unrecognized graph model: %s" % type)
return graph
<|code_end|>
. Use current file imports:
import numpy as np
import theano
import theano.tensor as T
from pyglm.components.component import Component
and context (classes, functions, or code) from other files:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
. Output only the next line. | class CompleteGraphModel(Component): |
Given the code snippet: <|code_start|> x_true=None,
resdir=None,
do_plot_connectivity=True,
do_plot_stim_resp=True,
do_plot_imp_responses=True,
do_plot_firing_rates=True,
do_plot_ks=True,
do_plot_logpr=True):
""" Plot the inferred stimulus tuning curves and impulse responses
"""
if not resdir:
resdir = '.'
true_given = x_true is not None and popn_true is not None
# Make sure we have a list of x's
if not isinstance(x_inf, list):
x_inf = [x_inf]
# Evaluate the state for each of the parameter settings
N_samples = len(x_inf)
s_inf = []
for x in x_inf:
s_inf.append(population.eval_state(x))
s_true = None
if true_given:
s_true = popn_true.eval_state(x_true)
# Average the inferred states
<|code_end|>
, generate the next line using the imports in this file:
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cPickle
from matplotlib.colors import LinearSegmentedColormap
from pyglm.utils.avg_dicts import average_list_of_dicts, std_list_of_dicts
from matplotlib.colorbar import ColorbarBase
from matplotlib.patches import Rectangle
from matplotlib.patches import Polygon
from optparse import OptionParser
from test.synth_harness import initialize_test_harness
and context (functions, classes, or occasionally code) from other files:
# Path: pyglm/utils/avg_dicts.py
# def average_list_of_dicts(smpls):
# """ Average a list of dictionaries, e.g. a list of samples from
# the MCMC loop. The dictionary elements may themselves be
# dictionaries, so work recursively and only average the leaves.
# """
# N_smpls = len(smpls)
# import copy
# avg = copy.deepcopy(smpls[0])
#
# def inc_avg(avg, smpl):
# """ Helper to recrusively add to the average
# """
# if isinstance(smpl, dict):
# for (key,val) in smpl.items():
# if isinstance(val, dict) or \
# isinstance(val, list):
# # Recurse if the entry is another dict
# avg[key] = inc_avg(avg[key], val)
# elif isinstance(val, np.ndarray):
# # Otherwise increment the value
# avg[key] = avg[key].astype(np.float) + \
# val.astype(np.float)
# else:
# # Who knows if this will work without casting!
# avg[key] += val
#
# elif isinstance(smpl, list):
# for (i,val) in enumerate(smpl):
# if isinstance(val, list) or \
# isinstance(val, dict):
# # Recurse if the entry is another iterable
# avg[i] = inc_avg(avg[i], val)
# elif isinstance(val, np.ndarray):
# avg[i] = avg[i].astype(np.float) + \
# val.astype(np.float)
# else:
# # Who knows if this will work without casting!
# avg[i] += float(val)
# return avg
#
# for smpl in smpls[1:]:
# avg = inc_avg(avg, smpl)
#
# def norm_avg(avg, N_smpls):
# """ Normalize the average by dividing by N_smpls
# """
# if isinstance(avg, dict):
# for (key,val) in avg.items():
# if isinstance(val, dict) or \
# isinstance(val, list):
# avg[key] = norm_avg(val, N_smpls)
# else:
# avg[key] /= float(N_smpls)
# elif isinstance(avg, list):
# for (i,val) in enumerate(avg):
# if isinstance(val, list) or \
# isinstance(val, dict):
# # Recurse if the entry is another iterable
# avg[i] = norm_avg(val, N_smpls)
# else:
# avg[i] /= float(N_smpls)
#
# return avg
#
# avg = norm_avg(avg, N_smpls)
# return avg
#
# def std_list_of_dicts(smpls, avg=None):
# """ Compute the std of a list of dictionaries, e.g. a list of samples from
# the MCMC loop. The dictionary elements may themselves be
# dictionaries, so work recursively and only average the leaves.
# """
# N_smpls = len(smpls)
# var = variance_list_of_dicts(smpls, avg=avg)
#
# def sqrt_helper(smpl):
# """ Helper to recursively take the square root
# """
# if isinstance(smpl, dict):
# for (key,val) in smpl.items():
# if isinstance(val, dict) or \
# isinstance(val, list):
# # Recurse if the entry is another dict
# smpl[key] = sqrt_helper(val)
# else:
# # Take the square root
# smpl[key] = np.sqrt(val)
#
# elif isinstance(smpl, list):
# for (i,val) in enumerate(smpl):
# if isinstance(val, list) or \
# isinstance(val, dict):
# # Recurse if the entry is another iterable
# smpl[i] = sqrt_helper(val)
# else:
# smpl[i] = np.sqrt(val)
# return smpl
#
# std = sqrt_helper(var)
# return std
. Output only the next line. | s_avg = average_list_of_dicts(s_inf) |
Using the snippet: <|code_start|> resdir=None,
do_plot_connectivity=True,
do_plot_stim_resp=True,
do_plot_imp_responses=True,
do_plot_firing_rates=True,
do_plot_ks=True,
do_plot_logpr=True):
""" Plot the inferred stimulus tuning curves and impulse responses
"""
if not resdir:
resdir = '.'
true_given = x_true is not None and popn_true is not None
# Make sure we have a list of x's
if not isinstance(x_inf, list):
x_inf = [x_inf]
# Evaluate the state for each of the parameter settings
N_samples = len(x_inf)
s_inf = []
for x in x_inf:
s_inf.append(population.eval_state(x))
s_true = None
if true_given:
s_true = popn_true.eval_state(x_true)
# Average the inferred states
s_avg = average_list_of_dicts(s_inf)
<|code_end|>
, determine the next line of code. You have imports:
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cPickle
from matplotlib.colors import LinearSegmentedColormap
from pyglm.utils.avg_dicts import average_list_of_dicts, std_list_of_dicts
from matplotlib.colorbar import ColorbarBase
from matplotlib.patches import Rectangle
from matplotlib.patches import Polygon
from optparse import OptionParser
from test.synth_harness import initialize_test_harness
and context (class names, function names, or code) available:
# Path: pyglm/utils/avg_dicts.py
# def average_list_of_dicts(smpls):
# """ Average a list of dictionaries, e.g. a list of samples from
# the MCMC loop. The dictionary elements may themselves be
# dictionaries, so work recursively and only average the leaves.
# """
# N_smpls = len(smpls)
# import copy
# avg = copy.deepcopy(smpls[0])
#
# def inc_avg(avg, smpl):
# """ Helper to recrusively add to the average
# """
# if isinstance(smpl, dict):
# for (key,val) in smpl.items():
# if isinstance(val, dict) or \
# isinstance(val, list):
# # Recurse if the entry is another dict
# avg[key] = inc_avg(avg[key], val)
# elif isinstance(val, np.ndarray):
# # Otherwise increment the value
# avg[key] = avg[key].astype(np.float) + \
# val.astype(np.float)
# else:
# # Who knows if this will work without casting!
# avg[key] += val
#
# elif isinstance(smpl, list):
# for (i,val) in enumerate(smpl):
# if isinstance(val, list) or \
# isinstance(val, dict):
# # Recurse if the entry is another iterable
# avg[i] = inc_avg(avg[i], val)
# elif isinstance(val, np.ndarray):
# avg[i] = avg[i].astype(np.float) + \
# val.astype(np.float)
# else:
# # Who knows if this will work without casting!
# avg[i] += float(val)
# return avg
#
# for smpl in smpls[1:]:
# avg = inc_avg(avg, smpl)
#
# def norm_avg(avg, N_smpls):
# """ Normalize the average by dividing by N_smpls
# """
# if isinstance(avg, dict):
# for (key,val) in avg.items():
# if isinstance(val, dict) or \
# isinstance(val, list):
# avg[key] = norm_avg(val, N_smpls)
# else:
# avg[key] /= float(N_smpls)
# elif isinstance(avg, list):
# for (i,val) in enumerate(avg):
# if isinstance(val, list) or \
# isinstance(val, dict):
# # Recurse if the entry is another iterable
# avg[i] = norm_avg(val, N_smpls)
# else:
# avg[i] /= float(N_smpls)
#
# return avg
#
# avg = norm_avg(avg, N_smpls)
# return avg
#
# def std_list_of_dicts(smpls, avg=None):
# """ Compute the std of a list of dictionaries, e.g. a list of samples from
# the MCMC loop. The dictionary elements may themselves be
# dictionaries, so work recursively and only average the leaves.
# """
# N_smpls = len(smpls)
# var = variance_list_of_dicts(smpls, avg=avg)
#
# def sqrt_helper(smpl):
# """ Helper to recursively take the square root
# """
# if isinstance(smpl, dict):
# for (key,val) in smpl.items():
# if isinstance(val, dict) or \
# isinstance(val, list):
# # Recurse if the entry is another dict
# smpl[key] = sqrt_helper(val)
# else:
# # Take the square root
# smpl[key] = np.sqrt(val)
#
# elif isinstance(smpl, list):
# for (i,val) in enumerate(smpl):
# if isinstance(val, list) or \
# isinstance(val, dict):
# # Recurse if the entry is another iterable
# smpl[i] = sqrt_helper(val)
# else:
# smpl[i] = np.sqrt(val)
# return smpl
#
# std = sqrt_helper(var)
# return std
. Output only the next line. | s_std = std_list_of_dicts(s_inf, s_avg) |
Here is a snippet: <|code_start|>
def create_prior(model, **kwargs):
typ = model['type'].lower()
if typ == 'normal' or \
typ == 'gaussian':
return Gaussian(model, **kwargs)
elif typ == 'categorical':
return Categorical(model, **kwargs)
elif typ == 'jointcategorical' or \
typ == 'joint_categorical':
return JointCategorical(model, **kwargs)
elif typ == 'spherical_gaussian':
return SphericalGaussian(model, **kwargs)
elif typ == 'group_lasso' or \
typ == 'grouplasso':
return GroupLasso(model, **kwargs)
elif typ == 'dpp':
return DeterminenalPointProcess(model, **kwargs)
elif typ == 'dirichlet':
return Dirichlet(model)
else:
raise Exception("Unrecognized prior type: %s" % typ)
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import theano
import theano.tensor as T
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from pyglm.components.component import Component
from theano.sandbox.linalg.ops import Det
from hips.inference.hmc import hmc
and context from other files:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
, which may include functions, classes, or code. Output only the next line. | class Categorical(Component): |
Given snippet: <|code_start|> # master['x0'] = x0
# Also initialize with intelligent parameters from the data
# dview['x0d'] = x0
# @interactive
# def _initialize_with_data(n):
# initialize_with_data(popn, data, x0d, Ns=n)
# x0s = dview.map_async(_initialize_with_data,
# range(N))
# x0s = x0s.get()
# Extract the initial parameters for each GLM
#x0['glms'] = [x0s['glms'][n] for n in np.arange(N)]
print "Preparing Theano functions for inference"
# Compute log prob, gradient, and hessian wrt network parameters
dview.execute('net_inf_prms = prep_network_inference(popn,'
'use_hessian=%s,'
'use_rop=%s)' % (str(use_hessian), str(use_rop)),
block=True)
# Compute gradients of the log prob wrt the GLM parameters
dview.execute('glm_inf_prms = prep_glm_inference(popn,'
'use_hessian=%s,'
'use_rop=%s)' % (str(use_hessian), str(use_rop)),
block=True)
# Parallel function to fit GLMs
@interactive
def _parallel_fit_glm(n, x, use_hessian=False, use_rop=False):
nvars = popn.extract_vars(x, n)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
from IPython.parallel.util import interactive
from pyglm.inference.coord_descent import fit_glm
from pyglm.utils.progress_report import wait_watching_stdout
and context:
# Path: pyglm/inference/coord_descent.py
# def fit_glm(xn, n,
# (glm_syms, glm_nll, g_glm_nll)):
# """ Fit the GLM parameters in state dict x
# """
# # Get the differentiable variables for the n-th GLM
# dnvars = get_vars(glm_syms, xn['glm'])
# x_glm_0, shapes = packdict(dnvars)
#
# # Create lambda functions to compute the nll and its gradient and Hessian
# def nll(x_glm_vec):
# y = glm_nll(x_glm_vec, xn)
# if np.isnan(y):
# y = 1e16
#
# return y
#
# def grad_nll(x_glm_vec):
# g = g_glm_nll(x_glm_vec, xn)
# if np.any(np.isnan(g)):
# g = np.zeros_like(g)
#
# return g
#
# # Callback to print progress. In order to count iters, we need to
# # pass the current iteration via a list
# ncg_iter_ls = [0]
# def progress_report(x_curr, ncg_iter_ls):
# ll = -1.0*nll(x_curr)
# print "Newton iter %d.\tNeuron %d. LL: %.1f" % (ncg_iter_ls[0],n,ll)
# ncg_iter_ls[0] += 1
# cbk = lambda x_curr: progress_report(x_curr, ncg_iter_ls)
#
# # Call the appropriate scipy optimization function
# res = opt.minimize(nll, x_glm_0,
# method="bfgs",
# jac=grad_nll,
# options={'disp': True,
# 'maxiter' : 225},
# callback=cbk)
# xn_opt = res.x
#
# # Unpack the optimized parameters back into the state dict
# x_glm_n = unpackdict(xn_opt, shapes)
# set_vars(glm_syms, xn['glm'], x_glm_n)
#
# Path: pyglm/utils/progress_report.py
# def wait_watching_stdout(ar, interval=1, truncate=100):
# """ Print the outputs of each worker
# """
# stdoffs = [0]*len(ar.stdout)
# while not ar.ready():
# stdouts = ar.stdout
# if not any(stdouts):
# continue
#
# for eid, stdout in enumerate(stdouts):
# if stdout:
# newoff = len(stdout)
# if newoff > stdoffs[eid]:
# print "[ stdout %2i ]\n%s" % (eid, stdout[stdoffs[eid]:])
# stdoffs[eid] = newoff
#
# print '-' * 30
# print "%.3fs elapsed. Progress: %d/%d" % (ar.elapsed, ar.progress, len(ar))
# print ""
#
# sys.stdout.flush()
# time.sleep(interval)
which might include code, classes, or functions. Output only the next line. | fit_glm(nvars, n, glm_inf_prms, use_hessian, use_rop) |
Predict the next line for this snippet: <|code_start|> block=True)
# Parallel function to fit GLMs
@interactive
def _parallel_fit_glm(n, x, use_hessian=False, use_rop=False):
nvars = popn.extract_vars(x, n)
fit_glm(nvars, n, glm_inf_prms, use_hessian, use_rop)
return nvars['glm']
# Alternate fitting the network and fitting the GLMs
x = x0
lp_prev = parallel_compute_log_p(dview, master, x, N)
converged = False
iter = 0
while not converged and iter < maxiter:
iter += 1
print "Coordinate descent iteration %d." % iter
# TODO Fit the network on the first engine
# fit_network(x, net_inf_prms, use_hessian, use_rop)
# Fit the GLMs in parallel
x_glms = dview.map_async(_parallel_fit_glm,
range(N),
[x]*N)
# Print progress report ever interval seconds
interval = 15.0
# Timeout after specified number of seconds (-1 = Inf?)
#timeout = -1
<|code_end|>
with the help of current file imports:
import numpy as np
from IPython.parallel.util import interactive
from pyglm.inference.coord_descent import fit_glm
from pyglm.utils.progress_report import wait_watching_stdout
and context from other files:
# Path: pyglm/inference/coord_descent.py
# def fit_glm(xn, n,
# (glm_syms, glm_nll, g_glm_nll)):
# """ Fit the GLM parameters in state dict x
# """
# # Get the differentiable variables for the n-th GLM
# dnvars = get_vars(glm_syms, xn['glm'])
# x_glm_0, shapes = packdict(dnvars)
#
# # Create lambda functions to compute the nll and its gradient and Hessian
# def nll(x_glm_vec):
# y = glm_nll(x_glm_vec, xn)
# if np.isnan(y):
# y = 1e16
#
# return y
#
# def grad_nll(x_glm_vec):
# g = g_glm_nll(x_glm_vec, xn)
# if np.any(np.isnan(g)):
# g = np.zeros_like(g)
#
# return g
#
# # Callback to print progress. In order to count iters, we need to
# # pass the current iteration via a list
# ncg_iter_ls = [0]
# def progress_report(x_curr, ncg_iter_ls):
# ll = -1.0*nll(x_curr)
# print "Newton iter %d.\tNeuron %d. LL: %.1f" % (ncg_iter_ls[0],n,ll)
# ncg_iter_ls[0] += 1
# cbk = lambda x_curr: progress_report(x_curr, ncg_iter_ls)
#
# # Call the appropriate scipy optimization function
# res = opt.minimize(nll, x_glm_0,
# method="bfgs",
# jac=grad_nll,
# options={'disp': True,
# 'maxiter' : 225},
# callback=cbk)
# xn_opt = res.x
#
# # Unpack the optimized parameters back into the state dict
# x_glm_n = unpackdict(xn_opt, shapes)
# set_vars(glm_syms, xn['glm'], x_glm_n)
#
# Path: pyglm/utils/progress_report.py
# def wait_watching_stdout(ar, interval=1, truncate=100):
# """ Print the outputs of each worker
# """
# stdoffs = [0]*len(ar.stdout)
# while not ar.ready():
# stdouts = ar.stdout
# if not any(stdouts):
# continue
#
# for eid, stdout in enumerate(stdouts):
# if stdout:
# newoff = len(stdout)
# if newoff > stdoffs[eid]:
# print "[ stdout %2i ]\n%s" % (eid, stdout[stdoffs[eid]:])
# stdoffs[eid] = newoff
#
# print '-' * 30
# print "%.3fs elapsed. Progress: %d/%d" % (ar.elapsed, ar.progress, len(ar))
# print ""
#
# sys.stdout.flush()
# time.sleep(interval)
, which may contain function names, class names, or code. Output only the next line. | wait_watching_stdout(x_glms, interval=interval) |
Given the following code snippet before the placeholder: <|code_start|>""" Fit a Network GLM with MAP estimation. For some models, the log posterior
is concave and has a unique maximum.
"""
def map_estimate(network_glm, x0=None):
"""
Compute the maximum a posterior parameter estimate using Theano to compute
gradients of the log probability.
"""
N = network_glm.model['N']
network = network_glm.network
# Make sure the network is a complete adjacency matrix with constant weights
# and basis function impulse responses
if not isinstance(network.graph, CompleteGraphModel):
raise Exception("MAP inference can only be performed with the complete graph model.")
<|code_end|>
, predict the next line using imports from the current file:
import scipy.optimize as opt
from pyglm.utils.packvec import *
from components.graph import CompleteGraphModel
from pyglm.components.weights import ConstantWeightModel
and context including class names, function names, and sometimes code from other files:
# Path: pyglm/components/weights.py
# class ConstantWeightModel(Component):
# def __init__(self, model):
# """ Initialize the filtered stim model
# """
# self.model = model
# N = model['N']
#
# prms = model['network']['weight']
# self.value = prms['value']
#
# # Define weight matrix
# self.W = self.value * T.ones((N,N))
#
# # Define log probability
# self.log_p = T.constant(0.0)
#
# def get_state(self):
# return {'W': self.W}
. Output only the next line. | if not isinstance(network.weights, ConstantWeightModel): |
Next line prediction: <|code_start|>
data_dir = '/Users/scott/Projects/pyglm/data/synth/dist/N16T300/2014_07_22-10_01/'
with open(os.path.join(data_dir, 'data.pkl')) as f:
data = cPickle.load(f)
<|code_end|>
. Use current file imports:
(import cPickle
import os
from pyglm.utils.io import segment_data)
and context including class names, function names, or small code snippets from other files:
# Path: pyglm/utils/io.py
# def segment_data(data, (T_start, T_stop)):
# """ Extract a segment of the data
# """
# import copy
# new_data = copy.deepcopy(data)
#
# # Check that T_start and T_stop are within the range of the data
# assert T_start >= 0 and T_start <= data['T']
# assert T_stop >= 0 and T_stop <= data['T']
# assert T_start < T_stop
#
# # Set the new T's
# new_data['T'] = T_stop - T_start
#
# # Get indices for start and stop of spike train
# i_start = T_start // data['dt']
# i_stop = T_stop // data['dt']
# new_data['S'] = new_data['S'][i_start:i_stop, :]
#
# # Get indices for start and stop of stim
# i_start = T_start // data['dt_stim']
# i_stop = T_stop // data['dt_stim']
# new_data['stim'] = new_data['stim'][i_start:i_stop, :]
#
# return new_data
. Output only the next line. | data_test = segment_data(data, (240,300)) |
Based on the snippet: <|code_start|>
def create_bkgd_component(model, glm, latent):
type = model['bkgd']['type'].lower()
if type == 'no_stimulus' or \
type == 'none' or \
type == 'nostimulus':
bkgd = NoStimulus(model)
elif type == 'basis':
bkgd = BasisStimulus(model)
elif type == 'spatiotemporal':
bkgd = SpatiotemporalStimulus(model)
elif type == 'sharedtuningcurve':
bkgd = SharedTuningCurveStimulus(model, glm, latent)
else:
raise Exception("Unrecognized backgound model: %s" % type)
return bkgd
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import hashlib
import cPickle
import theano
import theano.tensor as T
from os.path import expanduser
from pyglm.components.component import Component
from pyglm.utils.basis import *
and context (classes, functions, sometimes code) from other files:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
. Output only the next line. | class NoStimulus(Component): |
Here is a snippet: <|code_start|> the log probability with respect to the differentiable GLM
parameters, e.g. the weight matrix if it exists.
"""
glm = population.glm
syms = population.get_variables()
# Compute gradients of the log prob wrt the GLM parameters
glm_syms = differentiable(syms['glm'])
print "Computing gradient of the prior w.r.t. the differentiable GLM parameters"
g_glm_logprior, _ = grad_wrt_list(glm.log_prior, _flatten(glm_syms))
print "Computing gradient of the GLM likelihood w.r.t. the differentiable GLM parameters"
g_glm_ll, _ = grad_wrt_list(glm.ll, _flatten(glm_syms))
# TODO: Replace this with a function that just gets the shapes?
x0 = population.sample()
nvars = population.extract_vars(x0, 0)
dnvars = get_vars(glm_syms, nvars['glm'])
_,glm_shapes = packdict(dnvars)
# Private function to compute the log probability and its gradient
# with respect to a set of parameters
def nlp(x_glm_vec, x):
"""
Helper function to compute the negative log posterior for a given set
of GLM parameters. The parameters are passed in as a vector.
"""
x_glm = unpackdict(x_glm_vec, glm_shapes)
set_vars(glm_syms, x['glm'], x_glm)
<|code_end|>
. Write the next line using the current file imports:
import copy
import scipy.optimize as opt
from pyglm.utils.theano_func_wrapper import seval, _flatten
from pyglm.utils.packvec import *
from pyglm.utils.grads import *
from pyglm.components.graph import CompleteGraphModel
from pyglm.inference.smart_init import initialize_with_data
and context from other files:
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
#
# def _flatten(d):
# """ Pack a hierarchical dictionary of variables into a list
# Sorting is important as it ensures the function is called with
# the inputs in the same order each time!
# """
# l = []
# # This sorting is important!
# for (k,v) in sorted(d.items(), key=lambda t: t[0]):
# if isinstance(v, dict):
# lv = _flatten(v)
# for v2 in lv:
# l.append(v2)
# else:
# l.append(v)
# return l
#
# Path: pyglm/components/graph.py
# class CompleteGraphModel(Component):
# def __init__(self, model):
# """ Initialize the filtered stim model
# """
# self.model = model
# N = model['N']
#
# # Define complete adjacency matrix
# self.A = T.ones((N, N))
#
# # Define log probability
# self.log_p = T.constant(0.0)
#
# def get_state(self):
# return {'A': self.A}
#
# Path: pyglm/inference/smart_init.py
# def initialize_with_data(population, data, x0, Ns=None):
# """ Initialize the parameters x0 with smart draws from the data
# """
# initialize_stim_with_sta(population, data, x0, Ns=Ns)
# initialize_with_dense_graph(population, data, x0)
# # initialize_with_no_coupling(population, data, x0)
, which may include functions, classes, or code. Output only the next line. | lp = seval(glm.log_prior, |
Using the snippet: <|code_start|>""" Fit a Network GLM with MAP estimation. For some models, the log posterior
is concave and has a unique maximum.
"""
def prep_first_order_glm_inference(population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable GLM
parameters, e.g. the weight matrix if it exists.
"""
glm = population.glm
syms = population.get_variables()
# Compute gradients of the log prob wrt the GLM parameters
glm_syms = differentiable(syms['glm'])
print "Computing gradient of the prior w.r.t. the differentiable GLM parameters"
<|code_end|>
, determine the next line of code. You have imports:
import copy
import scipy.optimize as opt
from pyglm.utils.theano_func_wrapper import seval, _flatten
from pyglm.utils.packvec import *
from pyglm.utils.grads import *
from pyglm.components.graph import CompleteGraphModel
from pyglm.inference.smart_init import initialize_with_data
and context (class names, function names, or code) available:
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
#
# def _flatten(d):
# """ Pack a hierarchical dictionary of variables into a list
# Sorting is important as it ensures the function is called with
# the inputs in the same order each time!
# """
# l = []
# # This sorting is important!
# for (k,v) in sorted(d.items(), key=lambda t: t[0]):
# if isinstance(v, dict):
# lv = _flatten(v)
# for v2 in lv:
# l.append(v2)
# else:
# l.append(v)
# return l
#
# Path: pyglm/components/graph.py
# class CompleteGraphModel(Component):
# def __init__(self, model):
# """ Initialize the filtered stim model
# """
# self.model = model
# N = model['N']
#
# # Define complete adjacency matrix
# self.A = T.ones((N, N))
#
# # Define log probability
# self.log_p = T.constant(0.0)
#
# def get_state(self):
# return {'A': self.A}
#
# Path: pyglm/inference/smart_init.py
# def initialize_with_data(population, data, x0, Ns=None):
# """ Initialize the parameters x0 with smart draws from the data
# """
# initialize_stim_with_sta(population, data, x0, Ns=Ns)
# initialize_with_dense_graph(population, data, x0)
# # initialize_with_no_coupling(population, data, x0)
. Output only the next line. | g_glm_logprior, _ = grad_wrt_list(glm.log_prior, _flatten(glm_syms)) |
Given the code snippet: <|code_start|> cbk = lambda x_curr: progress_report(x_curr, ncg_iter_ls)
# Call the appropriate scipy optimization function
res = opt.minimize(nll, x_glm_0,
method="bfgs",
jac=grad_nll,
options={'disp': True,
'maxiter' : 225},
callback=cbk)
xn_opt = res.x
# Unpack the optimized parameters back into the state dict
x_glm_n = unpackdict(xn_opt, shapes)
set_vars(glm_syms, xn['glm'], x_glm_n)
def coord_descent(population,
x0=None,
maxiter=50,
atol=1e-5):
"""
Compute the maximum a posterior parameter estimate using Theano to compute
gradients of the log probability.
"""
N = population.model['N']
network = population.network
glm = population.glm
syms = population.get_variables()
# Make sure the network is a complete adjacency matrix because we
# do not do integer programming
<|code_end|>
, generate the next line using the imports in this file:
import copy
import scipy.optimize as opt
from pyglm.utils.theano_func_wrapper import seval, _flatten
from pyglm.utils.packvec import *
from pyglm.utils.grads import *
from pyglm.components.graph import CompleteGraphModel
from pyglm.inference.smart_init import initialize_with_data
and context (functions, classes, or occasionally code) from other files:
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
#
# def _flatten(d):
# """ Pack a hierarchical dictionary of variables into a list
# Sorting is important as it ensures the function is called with
# the inputs in the same order each time!
# """
# l = []
# # This sorting is important!
# for (k,v) in sorted(d.items(), key=lambda t: t[0]):
# if isinstance(v, dict):
# lv = _flatten(v)
# for v2 in lv:
# l.append(v2)
# else:
# l.append(v)
# return l
#
# Path: pyglm/components/graph.py
# class CompleteGraphModel(Component):
# def __init__(self, model):
# """ Initialize the filtered stim model
# """
# self.model = model
# N = model['N']
#
# # Define complete adjacency matrix
# self.A = T.ones((N, N))
#
# # Define log probability
# self.log_p = T.constant(0.0)
#
# def get_state(self):
# return {'A': self.A}
#
# Path: pyglm/inference/smart_init.py
# def initialize_with_data(population, data, x0, Ns=None):
# """ Initialize the parameters x0 with smart draws from the data
# """
# initialize_stim_with_sta(population, data, x0, Ns=Ns)
# initialize_with_dense_graph(population, data, x0)
# # initialize_with_no_coupling(population, data, x0)
. Output only the next line. | if not isinstance(network.graph, CompleteGraphModel): |
Next line prediction: <|code_start|> xn_opt = res.x
# Unpack the optimized parameters back into the state dict
x_glm_n = unpackdict(xn_opt, shapes)
set_vars(glm_syms, xn['glm'], x_glm_n)
def coord_descent(population,
x0=None,
maxiter=50,
atol=1e-5):
"""
Compute the maximum a posterior parameter estimate using Theano to compute
gradients of the log probability.
"""
N = population.model['N']
network = population.network
glm = population.glm
syms = population.get_variables()
# Make sure the network is a complete adjacency matrix because we
# do not do integer programming
if not isinstance(network.graph, CompleteGraphModel):
print " WARNING: MAP inference via coordinate descent can only be performed "\
"with the complete graph model."
# Draw initial state from prior if not given
if x0 is None:
x0 = population.sample()
# Also initialize with intelligent parameters from the data
<|code_end|>
. Use current file imports:
(import copy
import scipy.optimize as opt
from pyglm.utils.theano_func_wrapper import seval, _flatten
from pyglm.utils.packvec import *
from pyglm.utils.grads import *
from pyglm.components.graph import CompleteGraphModel
from pyglm.inference.smart_init import initialize_with_data)
and context including class names, function names, or small code snippets from other files:
# Path: pyglm/utils/theano_func_wrapper.py
# def seval(expr, syms, vals, defaults=None, givens=[]):
# """
# Evaluate the symbolic expression which depends on a set of symbolic variables,
# given a set of variable bindings.
# expr : Theano variable to be evaluated
# syms : dictionary of symbolic variables
# vals : dictionary of values to assign to the symbolic vars in syms
# key structure should mimic that of syms
#
# defaults : an optional dictionary providing backup values for
# syms keys not found in vals.
# """
# # Look for a function handle corresponding to this expression with these givens
# hash_value = lambda v: hashlib.sha1(v).hexdigest() if \
# isinstance(v, np.ndarray) else v
# hashable_givens = tuple(map(lambda (k,v): (k, hash_value(v)), givens))
# hashable_syms = tuple(map(lambda v: hash_value(v), _flatten(syms)))
# key = (expr, hashable_syms, hashable_givens)
# if key in _func_cache.keys():
# #print "DBG: Found key %s in cache" % str(key)
# f = _func_cache[key]
# #print "DBG: Calling expr: %s" % str(expr)
# else:
# # Create a callable theano function and cache it
# # print "DBG: Compiling expr: %s" % str(expr)
# sargs = _flatten(syms)
# if len(givens) == 0:
# # print "DBG: Compiling expression %s with no givens" % expr
# f = theano.function(sargs, expr,
# on_unused_input='ignore')
# else:
# # print "DBG: Compiling expression %s with givens: %s" % (expr, str(givens))
# f = theano.function(sargs, expr,
# givens=givens,
# on_unused_input='ignore')
# _func_cache[key] = f
#
# # Easiest thing to do is call the function with all symbolic variables
# args = _extract_vals(syms,vals,defaults)
# return f(*args)
#
# def _flatten(d):
# """ Pack a hierarchical dictionary of variables into a list
# Sorting is important as it ensures the function is called with
# the inputs in the same order each time!
# """
# l = []
# # This sorting is important!
# for (k,v) in sorted(d.items(), key=lambda t: t[0]):
# if isinstance(v, dict):
# lv = _flatten(v)
# for v2 in lv:
# l.append(v2)
# else:
# l.append(v)
# return l
#
# Path: pyglm/components/graph.py
# class CompleteGraphModel(Component):
# def __init__(self, model):
# """ Initialize the filtered stim model
# """
# self.model = model
# N = model['N']
#
# # Define complete adjacency matrix
# self.A = T.ones((N, N))
#
# # Define log probability
# self.log_p = T.constant(0.0)
#
# def get_state(self):
# return {'A': self.A}
#
# Path: pyglm/inference/smart_init.py
# def initialize_with_data(population, data, x0, Ns=None):
# """ Initialize the parameters x0 with smart draws from the data
# """
# initialize_stim_with_sta(population, data, x0, Ns=Ns)
# initialize_with_dense_graph(population, data, x0)
# # initialize_with_no_coupling(population, data, x0)
. Output only the next line. | initialize_with_data(population, population.data_sequences[-1], x0) |
Given snippet: <|code_start|>
def create_nlin_component(model):
type = model['nonlinearity']['type'].lower()
if type == 'exp':
nlin = ExpNonlinearity(model)
elif type == 'explinear':
nlin = ExpLinearNonlinearity(model)
else:
raise Exception("Unrecognized nonlinearity model: %s" % type)
return nlin
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import theano.tensor as T
from pyglm.components.component import Component
and context:
# Path: pyglm/components/component.py
# class Component(object):
# """
# """
#
# def __init__(self, model):
# """ Initialize the component with the parameters from the given model.
# """
# pass
#
# def get_variables(self):
# """ Get a dictionary of (name : Theano variable) items for all the
# symbolic variables associated with this component.
# """
# return {}
#
# def get_state(self):
# return {}
#
# def preprocess_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_data(self, data):
# """ Set the shared memory variables that depend on the data
# """
# pass
#
# def set_hyperparameters(self, model):
# """ Set hyperparameters of the model
# """
# pass
#
# def sample(self, acc):
# """
# return a sample of the variables
# """
# return {}
which might include code, classes, or functions. Output only the next line. | class ExpNonlinearity(Component): |
Predict the next line after this snippet: <|code_start|>
def get_state(self):
return {str(self.alpha) : self.alpha,
str(self.Y) : self.Y}
def sample(self, acc):
"""
return a sample of the variables
"""
# Sample alpha from a Dirichlet prior
# alpha = np.random.dirichlet(self.alpha0*np.ones(self.R))
alpha = self.alpha_prior.sample(acc)
# Sample Y from categorical dist
Y = np.random.choice(self.R, size=self.N, p=alpha)
return {str(self.alpha) : alpha,
str(self.Y) : Y}
class LatentTypeWithTuningCurve(LatentType):
"""
Extent the basic latent type component to also include tuning curves
"""
def __init__(self, model, type_model):
super(LatentTypeWithTuningCurve, self).__init__(model, type_model)
# Also initialize the tuning curves
self.mu = self.type_model['mu']
self.sigma = self.type_model['sigma']
# Create a basis for the stimulus response
<|code_end|>
using the current file's imports:
import numpy as np
import theano.tensor as T
from component import Component
from pyglm.utils.basis import create_basis
from pyglm.components.priors import create_prior
from pyglm.components.priors import Categorical, JointCategorical
and any relevant context from other files:
# Path: pyglm/utils/basis.py
# def create_basis(prms):
# """ Create a basis for impulse response functions
# """
# type = prms['type'].lower()
# if type == 'exp':
# basis = create_exp_basis(prms)
# elif type == 'cosine':
# basis = create_cosine_basis(prms)
# elif type == 'gaussian':
# basis = create_gaussian_basis(prms)
# elif type == 'identity' or type == 'eye':
# basis = create_identity_basis(prms)
# elif type == 'file':
# if os.path.exists(prms["fname"]):
# basis = load_basis_from_file(prms['fname'])
# else:
# raise Exception("Unrecognized basis type: %s", type)
# return basis
#
# Path: pyglm/components/priors.py
# def create_prior(model, **kwargs):
# typ = model['type'].lower()
# if typ == 'normal' or \
# typ == 'gaussian':
# return Gaussian(model, **kwargs)
# elif typ == 'categorical':
# return Categorical(model, **kwargs)
# elif typ == 'jointcategorical' or \
# typ == 'joint_categorical':
# return JointCategorical(model, **kwargs)
# elif typ == 'spherical_gaussian':
# return SphericalGaussian(model, **kwargs)
# elif typ == 'group_lasso' or \
# typ == 'grouplasso':
# return GroupLasso(model, **kwargs)
# elif typ == 'dpp':
# return DeterminenalPointProcess(model, **kwargs)
# elif typ == 'dirichlet':
# return Dirichlet(model)
# else:
# raise Exception("Unrecognized prior type: %s" % typ)
. Output only the next line. | self.spatial_basis = create_basis(self.type_model['spatial_basis']) |
Continue the code snippet: <|code_start|> def sample(self, acc):
s = {}
for (name, comp) in self.latentdict.items():
s[name] = comp.sample(acc)
return s
def set_data(self, data):
for comp in self.latentlist:
comp.set_data(data)
# Allow consumers to access this container as a dict
def __getitem__(self, item):
return self.latentdict[item]
class LatentType(Component):
def __init__(self, model, type_model):
self.model = model
self.type_model = type_model
self.name = self.type_model['name']
# There are N neurons to assign types to
self.N = type_model['N']
# There are has R latent types
self.R = self.type_model['R']
# Each neuron has a latent type Y
self.Y = T.lvector('Y')
# A probability of each type with a symmetric Dirichlet prior
self.alpha = T.dvector('alpha')
<|code_end|>
. Use current file imports:
import numpy as np
import theano.tensor as T
from component import Component
from pyglm.utils.basis import create_basis
from pyglm.components.priors import create_prior
from pyglm.components.priors import Categorical, JointCategorical
and context (classes, functions, or code) from other files:
# Path: pyglm/utils/basis.py
# def create_basis(prms):
# """ Create a basis for impulse response functions
# """
# type = prms['type'].lower()
# if type == 'exp':
# basis = create_exp_basis(prms)
# elif type == 'cosine':
# basis = create_cosine_basis(prms)
# elif type == 'gaussian':
# basis = create_gaussian_basis(prms)
# elif type == 'identity' or type == 'eye':
# basis = create_identity_basis(prms)
# elif type == 'file':
# if os.path.exists(prms["fname"]):
# basis = load_basis_from_file(prms['fname'])
# else:
# raise Exception("Unrecognized basis type: %s", type)
# return basis
#
# Path: pyglm/components/priors.py
# def create_prior(model, **kwargs):
# typ = model['type'].lower()
# if typ == 'normal' or \
# typ == 'gaussian':
# return Gaussian(model, **kwargs)
# elif typ == 'categorical':
# return Categorical(model, **kwargs)
# elif typ == 'jointcategorical' or \
# typ == 'joint_categorical':
# return JointCategorical(model, **kwargs)
# elif typ == 'spherical_gaussian':
# return SphericalGaussian(model, **kwargs)
# elif typ == 'group_lasso' or \
# typ == 'grouplasso':
# return GroupLasso(model, **kwargs)
# elif typ == 'dpp':
# return DeterminenalPointProcess(model, **kwargs)
# elif typ == 'dirichlet':
# return Dirichlet(model)
# else:
# raise Exception("Unrecognized prior type: %s" % typ)
. Output only the next line. | self.alpha_prior = create_prior(self.type_model['alpha_prior']) |
Predict the next line for this snippet: <|code_start|>
# Evaluate the state
mcmc_state = []
for x in x_mcmc:
mcmc_state.append(popn_mcmc.eval_state(x))
# Now compute the true and false positive rates for MCMC
# For MCMC results, only consider the tail of the samples
N_samples = len(mcmc_state)
sample_frac = 0.2
start_smpl = int(np.floor(N_samples - sample_frac*N_samples))
(mcmc_tpr, mcmc_fpr) = compute_roc_from_sparse_glm_smpls(true_state, mcmc_state[start_smpl:])
mcmc_tprs.append(mcmc_tpr)
mcmc_fprs.append(mcmc_fpr)
# Pickle the roc results
with open(PKL_FNAME, 'w') as f:
# TODO Dump the MCMC results too
cPickle.dump({'map_tprs' : map_tprs,
'map_fprs' : map_fprs},
f,
protocol=-1)
# Plot the actual ROC curve
# Subsample to get about 10 errorbars
subsample = N*N//10
f = plt.figure()
ax = f.add_subplot(111)
<|code_end|>
with the help of current file imports:
import os
import cPickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from pyglm.plotting.roc import plot_roc_curve
from population import Population
from models.model_factory import make_model
and context from other files:
# Path: pyglm/plotting/roc.py
# def plot_roc_curve(tprs, fprs, color='k', ax=None, subsample=1, label=None):
# """ Plot an ROC curve for the given true and false positive rates.
# If multiple rates are given, e.g. corresponding to multiple
# networks inferred using the same procedure, compute error bars
# (both horizontal and vertical) for the ROC curve.
#
# Plot in specified color, default black.
#
# Plot on the specified axes, or create a new axis necessary.
#
# Subsample allows you to subsample the errorbar
# """
# if ax is None:
# plt.figure(frameon=False)
# ax = plt.subplot(111)
#
# if not isinstance(tprs, list):
# tprs = [tprs]
# if not isinstance(fprs, list):
# fprs = [fprs]
#
# # Make sure all tprs and fprs are the same length
# N = tprs[0].size
# for (i,tpr) in enumerate(tprs):
# if not tpr.size == N:
# raise Exception("All TPRs must be vectors of length %d." % N)
# tprs[i] = tpr.reshape((N,1))
# for (i,fpr) in enumerate(fprs):
# if not fpr.size == N:
# raise Exception("All FPRs must be vectors of length %d." % N)
# fprs[i] = fpr.reshape((N,1))
#
# # Stack tprs and fprs to make matrices
# tprs = np.concatenate(tprs, axis=1)
# fprs = np.concatenate(fprs, axis=1)
#
# # Compute error bars (for both tpr and fpr)
# mean_tprs = np.mean(tprs, axis=1)
# std_tprs = np.std(tprs, axis=1)
#
# mean_fprs = np.mean(fprs, axis=1)
# std_fprs = np.std(fprs, axis=1)
#
# # Plot the error bars
# # plt.errorbar(mean_fprs, mean_tprs,
# # xerr=std_fprs, yerr=std_tprs,
# # ecolor=color, color=color,
# # axes=ax)
# err = np.concatenate([np.array([mean_fprs-std_fprs, mean_tprs+std_tprs]).T,
# np.flipud(np.array([mean_fprs+std_fprs, mean_tprs-std_tprs]).T)])
# from matplotlib.patches import PathPatch
# from matplotlib.path import Path
#
# plt.gca().add_patch(PathPatch(Path(err),
# facecolor=color,
# alpha=0.5,
# edgecolor='none',
# linewidth=0))
# # plt.plot(err[:,0], err[:, 1],
# # linestyle='--',
# # color=color)
#
# plt.plot(mean_fprs, mean_tprs,
# linestyle='-',
# color=color,
# linewidth=2,
# label=label)
#
# # Plot the random guessing line
# plt.plot([0,1],[0,1], '--k')
#
# #plt.legend(loc='lower right')
#
# plt.xlabel('FPR')
# plt.ylabel('TPR')
# plt.xlim((-0.01,1))
# plt.ylim((0.0,1))
#
# return ax
, which may contain function names, class names, or code. Output only the next line. | plot_roc_curve(map_tprs, map_fprs, ax=ax, color='b', subsample=subsample) |
Given snippet: <|code_start|># import inspect
class TestFastResonator(unittest.TestCase):
# Run before every test
def setUp(self):
np.random.seed(0)
pass
# Run after every test
def tearDown(self):
pass
def test_fast_resonator_py_V_C_1(self):
n_resonators = 5
size = 10
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py = np.zeros(len(freq_a), complex)
for i in range(0, n_resonators):
impedance_py[1:] += R_S[i] / (1 + 1j * Q[i] *
(freq_a[1:] / freq_R[i] -
freq_R[i] / freq_a[1:]))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import numpy as np
import scipy.integrate
import scipy.integrate
import scipy.integrate
import scipy.integrate
from blond.utils import bmath as bm
and context:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
which might include code, classes, or functions. Output only the next line. | impedance_c = bm.fast_resonator(R_S, Q, freq_a, freq_R)
|
Using the snippet: <|code_start|> if (self.cavityFB is not None) and (self.profile is None):
# ProfileError
raise RuntimeError("ERROR in RingAndRFTracker: Please specify a" +
" Profile object to use the CavityFeedback class")
if (self.rf_params.empty is True) and (self.periodicity is True):
# PeriodicityError
raise RuntimeError("ERROR in RingAndRFTracker: Empty RFStation" +
" with periodicity not yet implemented!")
if (self.cavityFB is not None) and (self.interpolation is False):
self.interpolation = True
warnings.warn('Setting interpolation to TRUE')
# self.logger.warning("Setting interpolation to TRUE")
def kick(self, beam_dt, beam_dE, index):
"""Function updating the particle energy due to the RF kick in a given
RF station. The kicks are summed over the different harmonic RF systems
in the station. The cavity phase can be shifted by the user via
phi_offset. The main RF (harmonic[0]) has by definition phase = 0 at
time = 0 below transition. The phases of all other RF systems are
defined w.r.t.\ to the main RF. The increment in energy is given by the
discrete equation of motion:
.. math::
\Delta E^{n+1} = \Delta E^n + \sum_{k=0}^{n_{\mathsf{rf}}-1}{e V_k^n \\sin{\\left(\omega_{\mathsf{rf,k}}^n \\Delta t^n + \phi_{\mathsf{rf,k}}^n \\right)}} - (E_s^{n+1} - E_s^n)
"""
# voltage_kick = np.ascontiguousarray(self.charge*self.voltage[:, index])
# omegarf_kick = np.ascontiguousarray(self.omega_rf[:, index])
# phirf_kick = np.ascontiguousarray(self.phi_rf[:, index])
<|code_end|>
, determine the next line of code. You have imports:
from builtins import range, object
from scipy.integrate import cumtrapz
from ..utils import bmath as bm
import numpy as np
import ctypes
import warnings
and context (class names, function names, or code) available:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | bm.kick(beam_dt, beam_dE, self.voltage[:, index], |
Continue the code snippet: <|code_start|> n = self.delay + 1
while n < Ring.t_rev.size:
summa = 0
while summa < self.dt:
try:
summa += Ring.t_rev[n]
n += 1
except:
self.on_time = np.append(self.on_time, 0)
return
self.on_time = np.append(self.on_time, n-1)
else:
self.on_time = np.arange(Ring.t_rev.size)
def beam_phase(self):
'''
*Beam phase measured at the main RF frequency and phase. The beam is
convolved with the window function of the band-pass filter of the
machine. The coefficients of sine and cosine components determine the
beam phase, projected to the range -Pi/2 to 3/2 Pi. Note that this beam
phase is already w.r.t. the instantaneous RF phase.*
'''
# Main RF frequency at the present turn
omega_rf = self.rf_station.omega_rf[0, self.rf_station.counter[0]]
phi_rf = self.rf_station.phi_rf[0, self.rf_station.counter[0]]
if self.time_offset is None:
# indexes = np.ones(self.profile.n_slices, dtype=bool)
# time_offset = 0.0
<|code_end|>
. Use current file imports:
from builtins import object
from ..utils import bmath as bm
import numpy as np
and context (classes, functions, or code) from other files:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | coeff = bm.beam_phase(self.profile.bin_centers, |
Here is a snippet: <|code_start|> self.wake_length = self.n_induced_voltage * self.profile.bin_size
# Frequency resolution in Hz
elif (self.wake_length_input == None
and self.frequency_resolution_input == None):
# By default the wake_length is the slicing frame length
self.wake_length = (self.profile.cut_right -
self.profile.cut_left)
self.n_induced_voltage = self.profile.n_slices
else:
raise RuntimeError('Error: only one of wake_length or ' +
'frequency_resolution can be specified.')
if self.multi_turn_wake:
# Number of points of the memory array for multi-turn wake
self.n_mtw_memory = self.n_induced_voltage
self.front_wake_buffer = 0
if self.mtw_mode == 'freq':
# In frequency domain, an extra buffer for a revolution turn is
# needed due to the circular time shift in frequency domain
self.buffer_size = \
np.ceil(np.max(self.RFParams.t_rev) /
self.profile.bin_size)
# Extending the buffer to reduce the effect of the front wake
self.buffer_size += \
np.ceil(np.max(self.buffer_extra) / self.profile.bin_size)
self.n_mtw_memory += int(self.buffer_size)
# Using next regular for FFTs speedup
if self.use_regular_fft:
<|code_end|>
. Write the next line using the current file imports:
from builtins import range, object
from ctypes import c_uint, c_double, c_void_p
from scipy.constants import e
from ..toolbox.next_regular import next_regular
from ..utils import bmath as bm
import numpy as np
and context from other files:
# Path: blond/toolbox/next_regular.py
# def next_regular(target):
# """
# Find the next regular number greater than or equal to target.
# Regular numbers are composites of the prime factors 2, 3, and 5.
# Also known as 5-smooth numbers or Hamming numbers, these are the optimal
# size for inputs to FFTPACK.
#
# Target must be a positive integer.
# """
# if target <= 6:
# return target
#
# # Quickly check if it's already a power of 2
# if not (target & (target-1)):
# return target
#
# target = -(-target // 2)
#
# match = float('inf') # Anything found will be smaller
# p5 = 1
# while p5 < target:
# p35 = p5
# while p35 < target:
# # Ceiling integer division, avoiding conversion to float
# # (quotient = ceil(target / p35))
# quotient = -(-target // p35)
#
# # Quickly find next power of 2 >= quotient
# try:
# p2 = 2**((quotient - 1).bit_length())
# except AttributeError:
# # Fallback for Python <2.7
# p2 = 2**(len(bin(quotient - 1)) - 2)
#
# N = p2 * p35
# if N == target:
# return N * 2
# elif N < match:
# match = N
# p35 *= 3
# if p35 == target:
# return p35 * 2
# if p35 < match:
# match = p35
# p5 *= 5
# if p5 == target:
# return p5 * 2
# if p5 < match:
# match = p5
#
# return match * 2
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
, which may include functions, classes, or code. Output only the next line. | self.n_mtw_fft = next_regular(self.n_mtw_memory) |
Based on the snippet: <|code_start|>
Attributes
----------
beam : object
Copy of the Beam object in order to access the beam info
profile : object
Copy of the Profile object in order to access the profile info
induced_voltage_list : object list
List of objects for which induced voltages have to be calculated
induced_voltage : float array
Array to store the computed induced voltage [V]
time_array : float array
Time array corresponding to induced_voltage [s]
"""
def __init__(self, Beam, Profile, induced_voltage_list):
"""
Constructor.
"""
# Copy of the Beam object in order to access the beam info.
self.beam = Beam
# Copy of the Profile object in order to access the profile info.
self.profile = Profile
# Induced voltage list.
self.induced_voltage_list = induced_voltage_list
# Induced voltage from the sum of the wake sources in V
self.induced_voltage = np.zeros(
<|code_end|>
, predict the immediate next line with the help of imports:
from builtins import range, object
from ctypes import c_uint, c_double, c_void_p
from scipy.constants import e
from ..toolbox.next_regular import next_regular
from ..utils import bmath as bm
import numpy as np
and context (classes, functions, sometimes code) from other files:
# Path: blond/toolbox/next_regular.py
# def next_regular(target):
# """
# Find the next regular number greater than or equal to target.
# Regular numbers are composites of the prime factors 2, 3, and 5.
# Also known as 5-smooth numbers or Hamming numbers, these are the optimal
# size for inputs to FFTPACK.
#
# Target must be a positive integer.
# """
# if target <= 6:
# return target
#
# # Quickly check if it's already a power of 2
# if not (target & (target-1)):
# return target
#
# target = -(-target // 2)
#
# match = float('inf') # Anything found will be smaller
# p5 = 1
# while p5 < target:
# p35 = p5
# while p35 < target:
# # Ceiling integer division, avoiding conversion to float
# # (quotient = ceil(target / p35))
# quotient = -(-target // p35)
#
# # Quickly find next power of 2 >= quotient
# try:
# p2 = 2**((quotient - 1).bit_length())
# except AttributeError:
# # Fallback for Python <2.7
# p2 = 2**(len(bin(quotient - 1)) - 2)
#
# N = p2 * p35
# if N == target:
# return N * 2
# elif N < match:
# match = N
# p35 *= 3
# if p35 == target:
# return p35 * 2
# if p35 < match:
# match = p35
# p5 *= 5
# if p5 == target:
# return p5 * 2
# if p5 < match:
# match = p5
#
# return match * 2
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | int(self.profile.n_slices), dtype=bm.precision.real_t, order='C') |
Predict the next line for this snippet: <|code_start|> self.I4 = self.ring.ring_circumference * self.ring.alpha_0[0, 0] / self.rho**2.0
self.jz = 2.0 + self.I4 / self.I2
# Calculate synchrotron radiation parameters
self.calculate_SR_params()
# Initialize the random number array if quantum excitation is included
if quantum_excitation:
self.random_array = np.zeros(self.beam.n_macroparticles)
# Displace the beam in phase to account for the energy loss due to
# synchrotron radiation (temporary until bunch generation is updated)
if (shift_beam) and (self.rf_params.section_index == 0):
self.beam_phase_to_compensate_SR = np.abs(np.arcsin(
self.U0 / (self.ring.Particle.charge * self.rf_params.voltage[0][0]) ))
self.beam_position_to_compensate_SR = self.beam_phase_to_compensate_SR \
* self.rf_params.t_rf[0, 0] / (2.0*np.pi)
self.beam.dt -= self.beam_position_to_compensate_SR
# Select the right method for the tracker according to the selected
# settings
if python:
if quantum_excitation:
self.track = self.track_full_python
else:
self.track = self.track_SR_python
else:
if quantum_excitation:
if seed is not None:
<|code_end|>
with the help of current file imports:
from builtins import range, object
from ..utils import bmath as bm
import numpy as np
and context from other files:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
, which may contain function names, class names, or code. Output only the next line. | bm.set_random_seed(seed) |
Predict the next line after this snippet: <|code_start|> self.__omega_R = 2 * np.pi * frequency_R
# Resonant angular frequency in rad/s
@property
def omega_R(self):
return self.__omega_R
@omega_R.setter
def omega_R(self, omega_R):
self.__frequency_R = omega_R / 2 / np.pi
self.__omega_R = omega_R
def wake_calc(self, time_array):
r"""
Wake calculation method as a function of time.
Parameters
----------
time_array : float array
Input time array in s
Attributes
----------
time_array : float array
Input time array in s
wake : float array
Output wake in :math:`\Omega / s`
"""
self.time_array = time_array
<|code_end|>
using the current file's imports:
from builtins import range, object
from scipy.constants import c, physical_constants
from scipy.special import gamma as gamma_func
from scipy.special import kv, airy, polygamma
from scipy import integrate
from ..utils import bmath as bm
import numpy as np
import mpmath
and any relevant context from other files:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | self.wake = np.zeros(self.time_array.shape, dtype=bm.precision.real_t, order='C') |
Here is a snippet: <|code_start|># granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
"""
Example for llrf.filters and llrf.cavity_feedback
:Authors: **Helga Timko**
"""
n = 10000
n_ma = 100
iterations = 10
time = np.linspace(0, 10.e-6, n)
tau = 1.e-6
signal = 1 - np.exp(-time/tau) + 0.01*(np.random.randn(n) - 0.5)
time = np.linspace(0, 20.e-6, 2*n)
signal = np.concatenate((signal, signal))
plt.figure('Moving average')
plt.clf()
plt.plot(1e6*time, signal)
prev = np.zeros(n_ma-1)
for i in range(iterations):
print("Average of end of previous signal", np.mean(prev))
tmp = signal[-n_ma+1:]
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import matplotlib.pyplot as plt
from blond.llrf.signal_processing import moving_average
and context from other files:
# Path: blond/llrf/signal_processing.py
# def moving_average(x, N, x_prev=None):
# """Function to calculate the moving average (or running mean) of the input
# data.
#
# Parameters
# ----------
# x : float array
# Data to be smoothed
# N : int
# Window size in points
# x_prev : float array
# Data to pad with in front
#
# Returns
# -------
# float array
# Smoothed data array of size
# * len(x) - N + 1, if x_prev = None
# * len(x) + len(x_prev) - N + 1, if x_prev given
#
# """
#
# if x_prev is not None:
# # Pad in front with x_prev signal
# x = np.concatenate((x_prev, x))
#
# # based on https://stackoverflow.com/a/14314054
# mov_avg = np.cumsum(x)
# mov_avg[N:] = mov_avg[N:] - mov_avg[:-N]
# return mov_avg[N-1:] / N
, which may include functions, classes, or code. Output only the next line. | signal = moving_average(signal, n_ma, prev)
|
Continue the code snippet: <|code_start|>'''
BLonD math and physics core functions
@author Stefan Hegglin, Konstantinos Iliakis
@date 20.10.2017
'''
# from functools import wraps
# from ..utils import bphysics_wrap
<|code_end|>
. Use current file imports:
import numpy as np
from ..utils import butils_wrap
and context (classes, functions, or code) from other files:
# Path: blond/utils/butils_wrap.py
# class Precision:
# class c_complex128(ct.Structure):
# class c_complex64(ct.Structure):
# def __init__(self, precision='double'):
# def __getPointer(x):
# def __getLen(x):
# def __c_real(x):
# def __init__(self, pycomplex):
# def to_complex(self):
# def __init__(self, pycomplex):
# def to_complex(self):
# def where(x, more_than=None, less_than=None, result=None):
# def add(a, b, result=None, inplace=False):
# def mul(a, b, result=None):
# def argmin(x):
# def argmax(x):
# def linspace(start, stop, num=50, retstep=False, result=None):
# def arange(start, stop, step, dtype=float, result=None):
# def sum(x):
# def sort(x, reverse=False):
# def convolve(signal, kernel, mode='full', result=None):
# def mean(x):
# def std(x):
# def sin(x, result=None):
# def cos(x, result=None):
# def exp(x, result=None):
# def interp(x, xp, yp, left=None, right=None, result=None):
# def interp_const_space(x, xp, yp, left=None, right=None, result=None):
# def rfft(a, n=0, result=None):
# def irfft(a, n=0, result=None):
# def rfftfreq(n, d=1.0, result=None):
# def irfft_packed(signal, fftsize=0, result=None):
# def cumtrapz(y, x=None, dx=1.0, initial=None, result=None):
# def trapz(y, x=None, dx=1.0):
# def beam_phase(bin_centers, profile, alpha, omegarf, phirf, bin_size):
# def rf_volt_comp(voltages, omega_rf, phi_rf, bin_centers):
# def kick(dt, dE, voltage, omega_rf, phi_rf, charge, n_rf, acceleration_kick):
# def drift(dt, dE, solver, t_rev, length_ratio, alpha_order, eta_0,
# eta_1, eta_2, alpha_0, alpha_1, alpha_2, beta, energy):
# def linear_interp_kick(dt, dE, voltage,
# bin_centers, charge,
# acceleration_kick):
# def linear_interp_kick_n_drift(dt, dE, total_voltage, bin_centers, charge, acc_kick,
# solver, t_rev, length_ratio, alpha_order, eta_0, eta_1,
# eta_2, beta, energy):
# def slice(dt, profile, cut_left, cut_right):
# def slice_smooth(dt, profile, cut_left, cut_right):
# def sparse_histogram(dt, profile, cut_left, cut_right, bunch_indexes, n_slices_bucket):
# def music_track(dt, dE, induced_voltage, array_parameters,
# alpha, omega_bar,
# const, coeff1, coeff2, coeff3, coeff4):
# def music_track_multiturn(dt, dE, induced_voltage, array_parameters,
# alpha, omega_bar,
# const, coeff1, coeff2, coeff3, coeff4):
# def synchrotron_radiation(dE, U0, n_kicks, tau_z):
# def synchrotron_radiation_full(dE, U0, n_kicks, tau_z, sigma_dE, energy):
# def set_random_seed(seed):
# def fast_resonator(R_S, Q, frequency_array, frequency_R, impedance=None):
# R_S = R_S.astype(dtype=precision.real_t, order='C', copy=False)
# Q = Q.astype(dtype=precision.real_t, order='C', copy=False)
. Output only the next line. | precision = butils_wrap.precision |
Given snippet: <|code_start|> def scale_histo(self):
if not bm.mpiMode():
raise RuntimeError(
'ERROR: Cannot use this routine unless in MPI Mode')
if self.Beam.is_splitted:
bm.mul(self.n_macroparticles, worker.workers, self.n_macroparticles)
def _slice_smooth(self, reduce=True):
"""
At the moment 4x slower than _slice but smoother (filtered).
"""
bm.slice_smooth(self.Beam.dt, self.n_macroparticles, self.cut_left,
self.cut_right)
if bm.mpiMode():
self.reduce_histo(dtype=np.float64)
def apply_fit(self):
"""
It applies Gaussian fit to the profile.
"""
if self.bunchLength == 0:
p0 = [max(self.n_macroparticles), np.mean(self.Beam.dt),
np.std(self.Beam.dt)]
else:
p0 = [max(self.n_macroparticles), self.bunchPosition,
self.bunchLength/4]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from builtins import object
from scipy import ndimage
from ..toolbox import filters_and_fitting as ffroutines
from ..utils import bmath as bm
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
import numpy as np
and context:
# Path: blond/toolbox/filters_and_fitting.py
# def beam_profile_filter_chebyshev(Y_array, X_array, filter_option):
# def gaussian_fit(Y_array, X_array, p0):
# def gauss(x, *p):
# def rms(Y_array, X_array):
# def fwhm(Y_array, X_array, shift=0):
# def fwhm_multibunch(Y_array, X_array, n_bunches,
# bunch_spacing_buckets, bucket_size_tau,
# bucket_tolerance=0.40, shift=0):
# def rms_multibunch(Y_array, X_array, n_bunches,
# bunch_spacing_buckets, bucket_size_tau,
# bucket_tolerance=0.40):
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
which might include code, classes, or functions. Output only the next line. | self.fitExtraOptions = ffroutines.gaussian_fit(self.n_macroparticles,
|
Continue the code snippet: <|code_start|>
if cut_left is not None:
self.cut_left = float(cut_left)
else:
self.cut_left = cut_left
if cut_right is not None:
self.cut_right = float(cut_right)
else:
self.cut_right = cut_right
self.n_slices = int(n_slices)
if n_sigma is not None:
self.n_sigma = float(n_sigma)
else:
self.n_sigma = n_sigma
self.cuts_unit = str(cuts_unit)
self.RFParams = RFSectionParameters
if self.cuts_unit == 'rad' and self.RFParams is None:
# CutError
raise RuntimeError('You should pass an RFParams object to ' +
'convert from radians to seconds')
if self.cuts_unit != 'rad' and self.cuts_unit != 's':
# CutError
raise RuntimeError('cuts_unit should be "s" or "rad"')
<|code_end|>
. Use current file imports:
from builtins import object
from scipy import ndimage
from ..toolbox import filters_and_fitting as ffroutines
from ..utils import bmath as bm
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
import numpy as np
and context (classes, functions, or code) from other files:
# Path: blond/toolbox/filters_and_fitting.py
# def beam_profile_filter_chebyshev(Y_array, X_array, filter_option):
# def gaussian_fit(Y_array, X_array, p0):
# def gauss(x, *p):
# def rms(Y_array, X_array):
# def fwhm(Y_array, X_array, shift=0):
# def fwhm_multibunch(Y_array, X_array, n_bunches,
# bunch_spacing_buckets, bucket_size_tau,
# bucket_tolerance=0.40, shift=0):
# def rms_multibunch(Y_array, X_array, n_bunches,
# bunch_spacing_buckets, bucket_size_tau,
# bucket_tolerance=0.40):
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | self.edges = np.zeros(n_slices + 1, dtype=bm.precision.real_t, order='C')
|
Based on the snippet: <|code_start|> elif dtype == 'int64':
op = add_op_int64
elif dtype == 'uint16':
op = add_op_uint16
elif dtype == 'uint32':
op = add_op_uint32
elif dtype == 'uint64':
op = add_op_uint64
elif dtype == 'float32':
op = add_op_float32
elif dtype == 'float64':
op = add_op_float64
else:
print('Error: Not recognized dtype:{}'.format(dtype))
exit(-1)
elif operator == 'sum':
op = MPI.SUM
elif operator == 'max':
op = MPI.MAX
elif operator == 'min':
op = MPI.MIN
elif operator == 'prod':
op = MPI.PROD
elif operator in ['mean', 'avg']:
op = MPI.SUM
elif operator == 'std':
recvbuf = self.gather(sendbuf)
if worker.isMaster:
assert len(recvbuf) == 3 * self.workers
totals = np.sum((recvbuf[2::3] - 1) * recvbuf[1::3]**2 +
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import os
import numpy as np
import logging
from functools import wraps
from ..utils import bmath as bm
from mpi4py import MPI
and context (classes, functions, sometimes code) from other files:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | recvbuf[2::3] * (recvbuf[1::3] - bm.mean(recvbuf[0::3]))**2) |
Using the snippet: <|code_start|> # Multiply by desired noise probability density
if transform==None or transform=='r':
s = np.sqrt(2*fmax*spectrum) # in [rad]
elif transform=='c':
s = np.sqrt(fmax*spectrum) # in [rad]
dPf = s*Gf.real + 1j*s*Gf.imag # in [rad]
# FFT back to time domain to get final phase shift
if transform==None or transform=='r':
dPt = np.fft.irfft(dPf) # in [rad]
elif transform=='c':
dPt = np.fft.ifft(dPf) # in [rad]
# Use only real part for the phase shift and normalize
self.t = np.linspace(0, float(nt*dt), nt)
self.dphi_output = dPt.real
def generate(self):
for i in range(0, np.int(np.ceil(self.n_turns/self.corr))):
# Scale amplitude to keep area (phase noise amplitude) constant
k = i*self.corr # current time step
ampl = self.A_i*self.fs[0]/self.fs[k]
# Calculate the frequency step
f_max = self.f0[k]/2
n_points_pos_f_incl_zero = int(np.ceil(f_max/self.delta_f) + 1)
nt = 2*(n_points_pos_f_incl_zero - 1)
<|code_end|>
, determine the next line of code. You have imports:
from builtins import range, object
from scipy.constants import c
from ..plots.plot import *
from ..plots.plot_llrf import *
from ..toolbox.next_regular import next_regular
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
and context (class names, function names, or code) available:
# Path: blond/toolbox/next_regular.py
# def next_regular(target):
# """
# Find the next regular number greater than or equal to target.
# Regular numbers are composites of the prime factors 2, 3, and 5.
# Also known as 5-smooth numbers or Hamming numbers, these are the optimal
# size for inputs to FFTPACK.
#
# Target must be a positive integer.
# """
# if target <= 6:
# return target
#
# # Quickly check if it's already a power of 2
# if not (target & (target-1)):
# return target
#
# target = -(-target // 2)
#
# match = float('inf') # Anything found will be smaller
# p5 = 1
# while p5 < target:
# p35 = p5
# while p35 < target:
# # Ceiling integer division, avoiding conversion to float
# # (quotient = ceil(target / p35))
# quotient = -(-target // p35)
#
# # Quickly find next power of 2 >= quotient
# try:
# p2 = 2**((quotient - 1).bit_length())
# except AttributeError:
# # Fallback for Python <2.7
# p2 = 2**(len(bin(quotient - 1)) - 2)
#
# N = p2 * p35
# if N == target:
# return N * 2
# elif N < match:
# match = N
# p35 *= 3
# if p35 == target:
# return p35 * 2
# if p35 < match:
# match = p35
# p5 *= 5
# if p5 == target:
# return p5 * 2
# if p5 < match:
# match = p5
#
# return match * 2
. Output only the next line. | nt_regular = next_regular(int(nt)) |
Given snippet: <|code_start|># coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
"""
Unittest for utils.bmath
:Authors: **Konstantinos Iliakis**
"""
# import inspect
class TestFFTS(unittest.TestCase):
# Run before every test
def setUp(self):
np.random.seed(0)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import numpy as np
from numpy import fft
from blond.utils import bmath as bm
and context:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
which might include code, classes, or functions. Output only the next line. | bm.use_fftw()
|
Given the code snippet: <|code_start|> self.n_particles/(self.n_macroparticles*self.Q)
self.induced_voltage = np.zeros(len(self.beam.dt))
self.induced_voltage[0] = self.const/2
self.coeff1 = -self.alpha/self.omega_bar
self.coeff2 = -self.R_S*self.omega_R/(self.Q*self.omega_bar)
self.coeff3 = self.omega_R*self.Q/(self.R_S*self.omega_bar)
self.coeff4 = self.alpha/self.omega_bar
self.input_first_component = 1
self.input_second_component = 0
self.t_rev = t_rev
self.last_dt = self.beam.dt[-1]
self.array_parameters = np.array([self.input_first_component,
self.input_second_component, self.t_rev, self.last_dt])
def track_cpp(self):
r"""
Voltage in time domain (single-turn) using MuSiC (C++ code).
Note: this method should also be called at turn number 1 when
multi-turn voltage computations are needed.
Examples
--------
>>> import impedances.music as musClass
>>> from setup_cpp import libblond
>>>
>>> music_cpp = musClass.Music(my_beam, [R_S, 2*np.pi*frequency_R, Q],
>>> n_macroparticles, n_particles, t_rev)
>>> music_cpp.track_cpp()
"""
<|code_end|>
, generate the next line using the imports in this file:
from builtins import range, object
from scipy.constants import e
from ..utils import bmath as bm
import numpy as np
import ctypes
and context (functions, classes, or occasionally code) from other files:
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
. Output only the next line. | bm.music_track(self.beam.dt, self.beam.dE, self.induced_voltage,
|
Given snippet: <|code_start|> # itemindex = bm.where(self.id, 0)
self.mean_dt = bm.mean(self.dt[itemindex])
self.sigma_dt = bm.std(self.dt[itemindex])
self._sumsq_dt = np.dot(self.dt[itemindex], self.dt[itemindex])
# self.min_dt = np.min(self.dt[itemindex])
# self.max_dt = np.max(self.dt[itemindex])
self.mean_dE = bm.mean(self.dE[itemindex])
self.sigma_dE = bm.std(self.dE[itemindex])
self._sumsq_dE = np.dot(self.dE[itemindex], self.dE[itemindex])
# self.min_dE = np.min(self.dE[itemindex])
# self.max_dE = np.max(self.dE[itemindex])
# R.m.s. emittance in Gaussian approximation
self.epsn_rms_l = np.pi*self.sigma_dE*self.sigma_dt # in eVs
def losses_separatrix(self, Ring, RFStation):
'''Beam losses based on separatrix.
Set to 0 all the particle's id not in the separatrix anymore.
Parameters
----------
Ring : Ring
Used to call the function is_in_separatrix.
RFStation : RFStation
Used to call the function is_in_separatrix.
'''
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from builtins import object
from scipy.constants import m_p, m_e, e, c, epsilon_0, hbar
from ..trackers.utilities import is_in_separatrix
from ..utils import exceptions as blExcept
from ..utils import bmath as bm
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
import numpy as np
import itertools as itl
import random
and context:
# Path: blond/trackers/utilities.py
# def is_in_separatrix(Ring, RFStation, Beam, dt, dE,
# total_voltage = None):
# r"""Function checking whether coordinate pair(s) are inside the separatrix.
# Uses the single-RF sinusoidal Hamiltonian.
#
# Parameters
# ----------
# Ring : class
# A Ring type class
# RFStation : class
# An RFStation type class
# Beam : class
# A Beam type class
# dt : float array
# Time coordinates of the particles to be checked
# dE : float array
# Energy coordinates of the particles to be checked
# total_voltage : float array
# Total voltage to be used if not single-harmonic RF
#
# Returns
# -------
# bool array
# True/False array for the given coordinates
#
# """
#
# warnings.filterwarnings("once")
#
# if Ring.n_sections > 1:
# warnings.warn("WARNING: in is_in_separatrix(): the usage of several"+
# " sections is not yet implemented!")
# if RFStation.n_rf > 1:
# warnings.warn("WARNING in is_in_separatrix(): taking into account" +
# " the first harmonic only!")
#
#
# counter = RFStation.counter[0]
# dt_sep = (np.pi - RFStation.phi_s[counter]
# - RFStation.phi_rf_d[0,counter])/ \
# RFStation.omega_rf[0,counter]
#
# Hsep = hamiltonian(Ring, RFStation, Beam, dt_sep, 0,
# total_voltage = None)
# isin = np.fabs(hamiltonian(Ring, RFStation, Beam,
# dt, dE, total_voltage = None)) < np.fabs(Hsep)
#
# return isin
#
# Path: blond/utils/exceptions.py
# class MassError(Exception):
# class AllParticlesLost(Exception):
# class ParticleAdditionError(Exception):
# class DistributionError(Exception):
# class GenerationError(Exception):
# class CutError(Exception):
# class ProfileDerivativeError(Exception):
# class WakeLengthError(Exception):
# class FrequencyResolutionError(Exception):
# class ResonatorError(Exception):
# class WrongCalcError(Exception):
# class MissingParameterError(Exception):
# class MomentumError(Exception):
# class PhaseLoopError(Exception):
# class PhaseNoiseError(Exception):
# class FeedbackError(Exception):
# class ImpulseError(Exception):
# class PhaseSpaceError(Exception):
# class NoiseDiffusionError(Exception):
# class PotentialWellError(Exception):
# class SolverError(Exception):
# class PeriodicityError(Exception):
# class ProfileError(Exception):
# class SynchrotronMotionError(Exception):
# class ConvolutionError(Exception):
# class IntegrationError(Exception):
# class SortError(Exception):
# class InterpolationError(Exception):
# class InputDataError(Exception):
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
which might include code, classes, or functions. Output only the next line. | itemindex = np.where(is_in_separatrix(Ring, RFStation, self, |
Predict the next line for this snippet: <|code_start|> '''Beam losses based on lower energy cut.
Set to 0 all the particle's id with dE below dE_min.
Parameters
----------
dE_min : float
minimum dE.
'''
itemindex = np.where((self.dE - dE_min) < 0)[0]
if itemindex.size != 0:
self.id[itemindex] = 0
def add_particles(self, new_particles):
'''
Method to add array of new particles to beam object
New particles are given id numbers sequential from last id of this beam
Parameters
----------
new_particles : array-like
(2, n) array of (dt, dE) for new particles
'''
try:
newdt = new_particles[0]
newdE = new_particles[1]
if len(newdt) != len(newdE):
<|code_end|>
with the help of current file imports:
from builtins import object
from scipy.constants import m_p, m_e, e, c, epsilon_0, hbar
from ..trackers.utilities import is_in_separatrix
from ..utils import exceptions as blExcept
from ..utils import bmath as bm
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
import numpy as np
import itertools as itl
import random
and context from other files:
# Path: blond/trackers/utilities.py
# def is_in_separatrix(Ring, RFStation, Beam, dt, dE,
# total_voltage = None):
# r"""Function checking whether coordinate pair(s) are inside the separatrix.
# Uses the single-RF sinusoidal Hamiltonian.
#
# Parameters
# ----------
# Ring : class
# A Ring type class
# RFStation : class
# An RFStation type class
# Beam : class
# A Beam type class
# dt : float array
# Time coordinates of the particles to be checked
# dE : float array
# Energy coordinates of the particles to be checked
# total_voltage : float array
# Total voltage to be used if not single-harmonic RF
#
# Returns
# -------
# bool array
# True/False array for the given coordinates
#
# """
#
# warnings.filterwarnings("once")
#
# if Ring.n_sections > 1:
# warnings.warn("WARNING: in is_in_separatrix(): the usage of several"+
# " sections is not yet implemented!")
# if RFStation.n_rf > 1:
# warnings.warn("WARNING in is_in_separatrix(): taking into account" +
# " the first harmonic only!")
#
#
# counter = RFStation.counter[0]
# dt_sep = (np.pi - RFStation.phi_s[counter]
# - RFStation.phi_rf_d[0,counter])/ \
# RFStation.omega_rf[0,counter]
#
# Hsep = hamiltonian(Ring, RFStation, Beam, dt_sep, 0,
# total_voltage = None)
# isin = np.fabs(hamiltonian(Ring, RFStation, Beam,
# dt, dE, total_voltage = None)) < np.fabs(Hsep)
#
# return isin
#
# Path: blond/utils/exceptions.py
# class MassError(Exception):
# class AllParticlesLost(Exception):
# class ParticleAdditionError(Exception):
# class DistributionError(Exception):
# class GenerationError(Exception):
# class CutError(Exception):
# class ProfileDerivativeError(Exception):
# class WakeLengthError(Exception):
# class FrequencyResolutionError(Exception):
# class ResonatorError(Exception):
# class WrongCalcError(Exception):
# class MissingParameterError(Exception):
# class MomentumError(Exception):
# class PhaseLoopError(Exception):
# class PhaseNoiseError(Exception):
# class FeedbackError(Exception):
# class ImpulseError(Exception):
# class PhaseSpaceError(Exception):
# class NoiseDiffusionError(Exception):
# class PotentialWellError(Exception):
# class SolverError(Exception):
# class PeriodicityError(Exception):
# class ProfileError(Exception):
# class SynchrotronMotionError(Exception):
# class ConvolutionError(Exception):
# class IntegrationError(Exception):
# class SortError(Exception):
# class InterpolationError(Exception):
# class InputDataError(Exception):
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
, which may contain function names, class names, or code. Output only the next line. | raise blExcept.ParticleAdditionError( |
Here is a snippet: <|code_start|> See Also
---------
distributions.matched_from_line_density:
match a beam with a given bunch profile.
distributions.matched_from_distribution_function:
match a beam with a given distribution function in phase space.
Examples
--------
>>> from input_parameters.ring import Ring
>>> from beam.beam import Beam
>>>
>>> n_turns = 10
>>> C = 100
>>> eta = 0.03
>>> momentum = 26e9
>>> ring = Ring(n_turns, C, eta, momentum, 'proton')
>>> n_macroparticle = 1e6
>>> intensity = 1e11
>>>
>>> my_beam = Beam(ring, n_macroparticle, intensity)
"""
def __init__(self, Ring, n_macroparticles, intensity):
self.Particle = Ring.Particle
self.beta = Ring.beta[0][0]
self.gamma = Ring.gamma[0][0]
self.energy = Ring.energy[0][0]
self.momentum = Ring.momentum[0][0]
<|code_end|>
. Write the next line using the current file imports:
from builtins import object
from scipy.constants import m_p, m_e, e, c, epsilon_0, hbar
from ..trackers.utilities import is_in_separatrix
from ..utils import exceptions as blExcept
from ..utils import bmath as bm
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
from ..utils.mpi_config import worker
import numpy as np
import itertools as itl
import random
and context from other files:
# Path: blond/trackers/utilities.py
# def is_in_separatrix(Ring, RFStation, Beam, dt, dE,
# total_voltage = None):
# r"""Function checking whether coordinate pair(s) are inside the separatrix.
# Uses the single-RF sinusoidal Hamiltonian.
#
# Parameters
# ----------
# Ring : class
# A Ring type class
# RFStation : class
# An RFStation type class
# Beam : class
# A Beam type class
# dt : float array
# Time coordinates of the particles to be checked
# dE : float array
# Energy coordinates of the particles to be checked
# total_voltage : float array
# Total voltage to be used if not single-harmonic RF
#
# Returns
# -------
# bool array
# True/False array for the given coordinates
#
# """
#
# warnings.filterwarnings("once")
#
# if Ring.n_sections > 1:
# warnings.warn("WARNING: in is_in_separatrix(): the usage of several"+
# " sections is not yet implemented!")
# if RFStation.n_rf > 1:
# warnings.warn("WARNING in is_in_separatrix(): taking into account" +
# " the first harmonic only!")
#
#
# counter = RFStation.counter[0]
# dt_sep = (np.pi - RFStation.phi_s[counter]
# - RFStation.phi_rf_d[0,counter])/ \
# RFStation.omega_rf[0,counter]
#
# Hsep = hamiltonian(Ring, RFStation, Beam, dt_sep, 0,
# total_voltage = None)
# isin = np.fabs(hamiltonian(Ring, RFStation, Beam,
# dt, dE, total_voltage = None)) < np.fabs(Hsep)
#
# return isin
#
# Path: blond/utils/exceptions.py
# class MassError(Exception):
# class AllParticlesLost(Exception):
# class ParticleAdditionError(Exception):
# class DistributionError(Exception):
# class GenerationError(Exception):
# class CutError(Exception):
# class ProfileDerivativeError(Exception):
# class WakeLengthError(Exception):
# class FrequencyResolutionError(Exception):
# class ResonatorError(Exception):
# class WrongCalcError(Exception):
# class MissingParameterError(Exception):
# class MomentumError(Exception):
# class PhaseLoopError(Exception):
# class PhaseNoiseError(Exception):
# class FeedbackError(Exception):
# class ImpulseError(Exception):
# class PhaseSpaceError(Exception):
# class NoiseDiffusionError(Exception):
# class PotentialWellError(Exception):
# class SolverError(Exception):
# class PeriodicityError(Exception):
# class ProfileError(Exception):
# class SynchrotronMotionError(Exception):
# class ConvolutionError(Exception):
# class IntegrationError(Exception):
# class SortError(Exception):
# class InterpolationError(Exception):
# class InputDataError(Exception):
#
# Path: blond/utils/bmath.py
# def use_mpi():
# def mpiMode():
# def use_fftw():
# def use_precision(_precision='double'):
# def update_active_dict(new_dict):
, which may include functions, classes, or code. Output only the next line. | self.dt = np.zeros([int(n_macroparticles)], dtype=bm.precision.real_t) |
Predict the next line after this snippet: <|code_start|># coding: utf-8
# Copyright 2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
Unit-tests for coasting beam generation.
'''
#General imports
#BLonD imports
class testCoastingBeamModule(unittest.TestCase):
def setUp(self):
<|code_end|>
using the current file's imports:
import numpy as np
import unittest
import matplotlib.pyplot as plt
import scipy.optimize as spOpt
import blond.beam.coasting_beam as cBeam
import blond.beam.beam as bBeam
import blond.input_parameters.ring as Ring
import blond.utils.exceptions as blExcept
from blond.beam.beam import Proton
and any relevant context from other files:
# Path: blond/beam/beam.py
# class Proton(Particle):
# """ Implements a proton `Particle`.
# """
#
# def __init__(self):
#
# Particle.__init__(self, m_p*c**2/e, 1)
. Output only the next line. | self.ring = Ring.Ring(2*np.pi*25, 1/4.4**2, 1E9, Proton()) |
Here is a snippet: <|code_start|>
class World:
def __init__(self, render_engine, scene, visible_layers, aspect_ratio):
self.background_shader = BackgroundShader(render_engine, scene.world)
self.camera = Camera(render_engine, scene.camera, aspect_ratio)
self.lights = []
# Collect infinite-extent light sources.
# TODO: also get sun lamps inside group instances.
for ob in scene.objects:
if ob.type == 'LAMP' and ob.data.type == 'SUN':
<|code_end|>
. Write the next line using the current file imports:
import bpy
from math import degrees, tan, atan
from mathutils import Vector, Matrix
from .util import escape_name, mat2str, ExportCancelled
and context from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
, which may include functions, classes, or code. Output only the next line. | name = escape_name(ob.name) |
Given the following code snippet before the placeholder: <|code_start|>
# Dof distance
if self.ob.data.dof_object == None:
self.focal_distances += [self.ob.data.dof_distance]
else:
# TODO: implement DoF object tracking here
self.focal_distances += [0.0]
print("WARNING: DoF object tracking not yet implemented.")
# Transform
mat = self.ob.matrix_world.copy()
matz = Matrix()
matz[2][2] = -1
self.xforms += [mat * matz]
def export(self, render_engine, w):
render_engine.update_stats("", "Psychopath: Exporting %s" % self.ob.name)
w.write("Camera {\n")
w.indent()
for fov in self.fovs:
w.write("Fov [%f]\n" % fov)
for rad in self.aperture_radii:
w.write("ApertureRadius [%f]\n" % rad)
for dist in self.focal_distances:
w.write("FocalDistance [%f]\n" % dist)
for mat in self.xforms:
<|code_end|>
, predict the next line using imports from the current file:
import bpy
from math import degrees, tan, atan
from mathutils import Vector, Matrix
from .util import escape_name, mat2str, ExportCancelled
and context including class names, function names, and sometimes code from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
. Output only the next line. | w.write("Transform [%s]\n" % mat2str(mat)) |
Next line prediction: <|code_start|>
class World:
def __init__(self, render_engine, scene, visible_layers, aspect_ratio):
self.background_shader = BackgroundShader(render_engine, scene.world)
self.camera = Camera(render_engine, scene.camera, aspect_ratio)
self.lights = []
# Collect infinite-extent light sources.
# TODO: also get sun lamps inside group instances.
for ob in scene.objects:
if ob.type == 'LAMP' and ob.data.type == 'SUN':
name = escape_name(ob.name)
self.lights += [DistantDiskLamp(ob, name)]
def take_sample(self, render_engine, scene, time):
self.camera.take_sample(render_engine, scene, time)
for light in self.lights:
# Check if render is cancelled
if render_engine.test_break():
<|code_end|>
. Use current file imports:
(import bpy
from math import degrees, tan, atan
from mathutils import Vector, Matrix
from .util import escape_name, mat2str, ExportCancelled)
and context including class names, function names, or small code snippets from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
. Output only the next line. | raise ExportCancelled() |
Here is a snippet: <|code_start|> self.name = group_prefix
self.translation_offset = translation_offset
self.render_engine = render_engine
self.materials = []
self.objects = []
self.instances = []
self.material_names = set()
self.mesh_names = set()
self.assembly_names = set()
# Collect all the objects, materials, instances, etc.
for ob in objects:
# Check if render is cancelled
if render_engine.test_break():
raise ExportCancelled()
# Check if the object is visible for rendering
vis_layer = False
for i in range(len(ob.layers)):
vis_layer = vis_layer or (ob.layers[i] and visible_layers[i])
if ob.hide_render or not vis_layer:
continue
# Store object data
name = None
if ob.type == 'EMPTY':
if ob.dupli_type == 'GROUP':
<|code_end|>
. Write the next line using the current file imports:
import bpy
from .util import escape_name, mat2str, needs_def_mb, needs_xform_mb, ExportCancelled
and context from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# def needs_def_mb(ob):
# """ Determines if the given object needs to be exported with
# deformation motion blur or not.
# """
# anim = ob.animation_data
# no_anim_data = anim == None or (anim.action == None and len(anim.nla_tracks) == 0 and len(anim.drivers) == 0)
#
# for mod in ob.modifiers:
# if mod.type == 'SUBSURF':
# pass
# elif mod.type == 'MULTIRES':
# pass
# elif mod.type == 'MIRROR':
# if mod.mirror_object == None:
# pass
# else:
# return True
# elif mod.type == 'BEVEL' and no_anim_data:
# pass
# elif mod.type == 'EDGE_SPLIT' and no_anim_data:
# pass
# elif mod.type == 'SOLIDIFY' and no_anim_data:
# pass
# elif mod.type == 'MASK' and no_anim_data:
# pass
# elif mod.type == 'REMESH' and no_anim_data:
# pass
# elif mod.type == 'TRIANGULATE' and no_anim_data:
# pass
# elif mod.type == 'WIREFRAME' and no_anim_data:
# pass
# else:
# return True
#
# if ob.type == 'MESH':
# if ob.data.shape_keys == None:
# pass
# else:
# return True
#
# return False
#
# def needs_xform_mb(ob):
# """ Determines if the given object needs to be exported with
# transformation motion blur or not.
# """
# if ob.animation_data != None:
# return True
#
# if len(ob.constraints) > 0:
# return True
#
# if ob.parent != None:
# return needs_xform_mb(ob.parent)
#
# return False
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
, which may include functions, classes, or code. Output only the next line. | name = group_prefix + "__" + escape_name(ob.dupli_group.name) |
Given the code snippet: <|code_start|> for dim in self.time_dim:
w.write("Dimensions [%f %f]\n" % dim)
w.unindent()
w.write("}\n")
class Instance:
def __init__(self, render_engine, ob, data_name):
self.ob = ob
self.data_name = data_name
self.needs_mb = needs_xform_mb(self.ob)
self.time_xforms = []
def take_sample(self, render_engine, time, translation_offset):
if len(self.time_xforms) == 0 or self.needs_mb:
render_engine.update_stats("", "Psychopath: Collecting '{}' xforms at time {}".format(self.ob.name, time))
mat = self.ob.matrix_world.copy()
mat[0][3] += translation_offset[0]
mat[1][3] += translation_offset[1]
mat[2][3] += translation_offset[2]
self.time_xforms += [mat]
def export(self, render_engine, w):
render_engine.update_stats("", "Psychopath: Exporting %s" % self.ob.name)
w.write("Instance {\n")
w.indent()
w.write("Data [$%s]\n" % self.data_name)
for mat in self.time_xforms:
<|code_end|>
, generate the next line using the imports in this file:
import bpy
from .util import escape_name, mat2str, needs_def_mb, needs_xform_mb, ExportCancelled
and context (functions, classes, or occasionally code) from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# def needs_def_mb(ob):
# """ Determines if the given object needs to be exported with
# deformation motion blur or not.
# """
# anim = ob.animation_data
# no_anim_data = anim == None or (anim.action == None and len(anim.nla_tracks) == 0 and len(anim.drivers) == 0)
#
# for mod in ob.modifiers:
# if mod.type == 'SUBSURF':
# pass
# elif mod.type == 'MULTIRES':
# pass
# elif mod.type == 'MIRROR':
# if mod.mirror_object == None:
# pass
# else:
# return True
# elif mod.type == 'BEVEL' and no_anim_data:
# pass
# elif mod.type == 'EDGE_SPLIT' and no_anim_data:
# pass
# elif mod.type == 'SOLIDIFY' and no_anim_data:
# pass
# elif mod.type == 'MASK' and no_anim_data:
# pass
# elif mod.type == 'REMESH' and no_anim_data:
# pass
# elif mod.type == 'TRIANGULATE' and no_anim_data:
# pass
# elif mod.type == 'WIREFRAME' and no_anim_data:
# pass
# else:
# return True
#
# if ob.type == 'MESH':
# if ob.data.shape_keys == None:
# pass
# else:
# return True
#
# return False
#
# def needs_xform_mb(ob):
# """ Determines if the given object needs to be exported with
# transformation motion blur or not.
# """
# if ob.animation_data != None:
# return True
#
# if len(ob.constraints) > 0:
# return True
#
# if ob.parent != None:
# return needs_xform_mb(ob.parent)
#
# return False
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
. Output only the next line. | w.write("Transform [%s]\n" % mat2str(mat.inverted())) |
Predict the next line for this snippet: <|code_start|>
def take_sample(self, render_engine, scene, time):
for mat in self.materials:
# Check if render is cancelled
if render_engine.test_break():
raise ExportCancelled()
mat.take_sample(render_engine, scene, time)
for ob in self.objects:
# Check if render is cancelled
if render_engine.test_break():
raise ExportCancelled()
ob.take_sample(render_engine, scene, time)
for inst in self.instances:
# Check if render is cancelled
if render_engine.test_break():
raise ExportCancelled()
inst.take_sample(render_engine, time, self.translation_offset)
def cleanup(self):
for mat in self.materials:
mat.cleanup()
for ob in self.objects:
ob.cleanup()
def get_mesh(self, ob, group_prefix):
# Figure out if we need to export or not and figure out what name to
# export with.
has_modifiers = len(ob.modifiers) > 0
<|code_end|>
with the help of current file imports:
import bpy
from .util import escape_name, mat2str, needs_def_mb, needs_xform_mb, ExportCancelled
and context from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# def needs_def_mb(ob):
# """ Determines if the given object needs to be exported with
# deformation motion blur or not.
# """
# anim = ob.animation_data
# no_anim_data = anim == None or (anim.action == None and len(anim.nla_tracks) == 0 and len(anim.drivers) == 0)
#
# for mod in ob.modifiers:
# if mod.type == 'SUBSURF':
# pass
# elif mod.type == 'MULTIRES':
# pass
# elif mod.type == 'MIRROR':
# if mod.mirror_object == None:
# pass
# else:
# return True
# elif mod.type == 'BEVEL' and no_anim_data:
# pass
# elif mod.type == 'EDGE_SPLIT' and no_anim_data:
# pass
# elif mod.type == 'SOLIDIFY' and no_anim_data:
# pass
# elif mod.type == 'MASK' and no_anim_data:
# pass
# elif mod.type == 'REMESH' and no_anim_data:
# pass
# elif mod.type == 'TRIANGULATE' and no_anim_data:
# pass
# elif mod.type == 'WIREFRAME' and no_anim_data:
# pass
# else:
# return True
#
# if ob.type == 'MESH':
# if ob.data.shape_keys == None:
# pass
# else:
# return True
#
# return False
#
# def needs_xform_mb(ob):
# """ Determines if the given object needs to be exported with
# transformation motion blur or not.
# """
# if ob.animation_data != None:
# return True
#
# if len(ob.constraints) > 0:
# return True
#
# if ob.parent != None:
# return needs_xform_mb(ob.parent)
#
# return False
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | deform_mb = needs_def_mb(ob) |
Next line prediction: <|code_start|> self.time_dim += [(self.ob.data.size, self.ob.data.size_y)]
else:
self.time_dim += [(self.ob.data.size, self.ob.data.size)]
def cleanup(self):
pass
def export(self, render_engine, w):
render_engine.update_stats("", "Psychopath: Exporting %s" % self.ob.name)
w.write("RectangleLight $%s {\n" % self.name)
w.indent()
for col in self.time_col:
if col[0] == 'Rec709':
w.write("Color [rec709, %f %f %f]\n" % (col[1][0], col[1][1], col[1][2]))
elif col[0] == 'Blackbody':
w.write("Color [blackbody, %f %f]\n" % (col[1], col[2]))
elif col[0] == 'ColorTemperature':
w.write("Color [color_temperature, %f %f]\n" % (col[1], col[2]))
for dim in self.time_dim:
w.write("Dimensions [%f %f]\n" % dim)
w.unindent()
w.write("}\n")
class Instance:
def __init__(self, render_engine, ob, data_name):
self.ob = ob
self.data_name = data_name
<|code_end|>
. Use current file imports:
(import bpy
from .util import escape_name, mat2str, needs_def_mb, needs_xform_mb, ExportCancelled)
and context including class names, function names, or small code snippets from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# def needs_def_mb(ob):
# """ Determines if the given object needs to be exported with
# deformation motion blur or not.
# """
# anim = ob.animation_data
# no_anim_data = anim == None or (anim.action == None and len(anim.nla_tracks) == 0 and len(anim.drivers) == 0)
#
# for mod in ob.modifiers:
# if mod.type == 'SUBSURF':
# pass
# elif mod.type == 'MULTIRES':
# pass
# elif mod.type == 'MIRROR':
# if mod.mirror_object == None:
# pass
# else:
# return True
# elif mod.type == 'BEVEL' and no_anim_data:
# pass
# elif mod.type == 'EDGE_SPLIT' and no_anim_data:
# pass
# elif mod.type == 'SOLIDIFY' and no_anim_data:
# pass
# elif mod.type == 'MASK' and no_anim_data:
# pass
# elif mod.type == 'REMESH' and no_anim_data:
# pass
# elif mod.type == 'TRIANGULATE' and no_anim_data:
# pass
# elif mod.type == 'WIREFRAME' and no_anim_data:
# pass
# else:
# return True
#
# if ob.type == 'MESH':
# if ob.data.shape_keys == None:
# pass
# else:
# return True
#
# return False
#
# def needs_xform_mb(ob):
# """ Determines if the given object needs to be exported with
# transformation motion blur or not.
# """
# if ob.animation_data != None:
# return True
#
# if len(ob.constraints) > 0:
# return True
#
# if ob.parent != None:
# return needs_xform_mb(ob.parent)
#
# return False
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
. Output only the next line. | self.needs_mb = needs_xform_mb(self.ob) |
Given the code snippet: <|code_start|>
class Assembly:
def __init__(self, render_engine, objects, visible_layers, group_prefix="", translation_offset=(0,0,0)):
self.name = group_prefix
self.translation_offset = translation_offset
self.render_engine = render_engine
self.materials = []
self.objects = []
self.instances = []
self.material_names = set()
self.mesh_names = set()
self.assembly_names = set()
# Collect all the objects, materials, instances, etc.
for ob in objects:
# Check if render is cancelled
if render_engine.test_break():
<|code_end|>
, generate the next line using the imports in this file:
import bpy
from .util import escape_name, mat2str, needs_def_mb, needs_xform_mb, ExportCancelled
and context (functions, classes, or occasionally code) from other files:
# Path: psychoblend/util.py
# def escape_name(name):
# name = name.replace("\\", "\\\\")
# name = name.replace(" ", "\\ ")
# name = name.replace("$", "\\$")
# name = name.replace("[", "\\[")
# name = name.replace("]", "\\]")
# name = name.replace("{", "\\{")
# name = name.replace("}", "\\}")
# return name
#
# def mat2str(m):
# """ Converts a matrix into a single-line string of values.
# """
# s = ""
# for j in range(4):
# for i in range(4):
# s += (" %f" % m[i][j])
# return s[1:]
#
# def needs_def_mb(ob):
# """ Determines if the given object needs to be exported with
# deformation motion blur or not.
# """
# anim = ob.animation_data
# no_anim_data = anim == None or (anim.action == None and len(anim.nla_tracks) == 0 and len(anim.drivers) == 0)
#
# for mod in ob.modifiers:
# if mod.type == 'SUBSURF':
# pass
# elif mod.type == 'MULTIRES':
# pass
# elif mod.type == 'MIRROR':
# if mod.mirror_object == None:
# pass
# else:
# return True
# elif mod.type == 'BEVEL' and no_anim_data:
# pass
# elif mod.type == 'EDGE_SPLIT' and no_anim_data:
# pass
# elif mod.type == 'SOLIDIFY' and no_anim_data:
# pass
# elif mod.type == 'MASK' and no_anim_data:
# pass
# elif mod.type == 'REMESH' and no_anim_data:
# pass
# elif mod.type == 'TRIANGULATE' and no_anim_data:
# pass
# elif mod.type == 'WIREFRAME' and no_anim_data:
# pass
# else:
# return True
#
# if ob.type == 'MESH':
# if ob.data.shape_keys == None:
# pass
# else:
# return True
#
# return False
#
# def needs_xform_mb(ob):
# """ Determines if the given object needs to be exported with
# transformation motion blur or not.
# """
# if ob.animation_data != None:
# return True
#
# if len(ob.constraints) > 0:
# return True
#
# if ob.parent != None:
# return needs_xform_mb(ob.parent)
#
# return False
#
# class ExportCancelled(Exception):
# """ Indicates that the render was cancelled in the middle of exporting
# the scene file.
# """
# pass
. Output only the next line. | raise ExportCancelled() |
Continue the code snippet: <|code_start|>try:
except ImportError:
class UserViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.CreateModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin):
"""
User endpoint, GET(list, detail), PATCH to change
This ViewSet has `permission_classes` set and `/me/` has the default permissions
so that the default permission class can be set to IsAuthenticatedOrReadOnly
without leaking user API tokens.
"""
queryset = User.objects.all().order_by('id')
serializer_class = UserSerializer
<|code_end|>
. Use current file imports:
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.decorators import list_route as action
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from rest_framework.settings import api_settings
from yawn.user.permissions import ModelPermissions
from .serializers import UserSerializer, LoginSerializer
and context (classes, functions, or code) from other files:
# Path: yawn/user/permissions.py
# class ModelPermissions(BasePermission):
# """Allow authenticated users to read, and staff to write"""
#
# def has_permission(self, request, view):
# # Workaround to ensure DjangoModelPermissions are not applied
# # to the root view when using DefaultRouter.
# if getattr(view, '_ignore_model_permissions', False):
# return True
#
# if not request.user.is_authenticated:
# return False
#
# if request.user.is_staff:
# return True
#
# # authenticated, but not staff: only allow read
# return request.method in ('GET', 'HEAD', 'OPTIONS')
#
# Path: yawn/user/serializers.py
# class UserSerializer(serializers.ModelSerializer):
# api_token = serializers.CharField(source='auth_token.key', read_only=True)
# refresh_token = serializers.BooleanField(write_only=True, default=False)
# password = serializers.CharField(write_only=True)
#
# class Meta:
# model = User
# fields = ('username', 'first_name', 'last_name', 'api_token', 'email',
# 'refresh_token', 'id', 'password', 'is_staff')
#
# def create(self, validated_data):
# validated_data.pop('refresh_token', None)
# return User.objects.create_user(**validated_data)
#
# def update(self, instance, validated_data):
# # support refreshing the token:
# if validated_data.pop('refresh_token', False):
# if hasattr(instance, 'auth_token'):
# instance.auth_token.delete()
# instance.auth_token = Token.objects.create(user=instance)
# # and setting the password, which causes an unhandled logout...
# if validated_data.get('password'):
# instance.set_password(validated_data.pop('password'))
# return super().update(instance, validated_data)
#
# class LoginSerializer(serializers.Serializer):
# username = serializers.CharField()
# password = serializers.CharField()
. Output only the next line. | permission_classes = (ModelPermissions,) |
Given the following code snippet before the placeholder: <|code_start|>try:
except ImportError:
class UserViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.CreateModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin):
"""
User endpoint, GET(list, detail), PATCH to change
This ViewSet has `permission_classes` set and `/me/` has the default permissions
so that the default permission class can be set to IsAuthenticatedOrReadOnly
without leaking user API tokens.
"""
queryset = User.objects.all().order_by('id')
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.decorators import list_route as action
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from rest_framework.settings import api_settings
from yawn.user.permissions import ModelPermissions
from .serializers import UserSerializer, LoginSerializer
and context including class names, function names, and sometimes code from other files:
# Path: yawn/user/permissions.py
# class ModelPermissions(BasePermission):
# """Allow authenticated users to read, and staff to write"""
#
# def has_permission(self, request, view):
# # Workaround to ensure DjangoModelPermissions are not applied
# # to the root view when using DefaultRouter.
# if getattr(view, '_ignore_model_permissions', False):
# return True
#
# if not request.user.is_authenticated:
# return False
#
# if request.user.is_staff:
# return True
#
# # authenticated, but not staff: only allow read
# return request.method in ('GET', 'HEAD', 'OPTIONS')
#
# Path: yawn/user/serializers.py
# class UserSerializer(serializers.ModelSerializer):
# api_token = serializers.CharField(source='auth_token.key', read_only=True)
# refresh_token = serializers.BooleanField(write_only=True, default=False)
# password = serializers.CharField(write_only=True)
#
# class Meta:
# model = User
# fields = ('username', 'first_name', 'last_name', 'api_token', 'email',
# 'refresh_token', 'id', 'password', 'is_staff')
#
# def create(self, validated_data):
# validated_data.pop('refresh_token', None)
# return User.objects.create_user(**validated_data)
#
# def update(self, instance, validated_data):
# # support refreshing the token:
# if validated_data.pop('refresh_token', False):
# if hasattr(instance, 'auth_token'):
# instance.auth_token.delete()
# instance.auth_token = Token.objects.create(user=instance)
# # and setting the password, which causes an unhandled logout...
# if validated_data.get('password'):
# instance.set_password(validated_data.pop('password'))
# return super().update(instance, validated_data)
#
# class LoginSerializer(serializers.Serializer):
# username = serializers.CharField()
# password = serializers.CharField()
. Output only the next line. | serializer_class = UserSerializer |
Here is a snippet: <|code_start|>try:
except ImportError:
class UserViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.CreateModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin):
"""
User endpoint, GET(list, detail), PATCH to change
This ViewSet has `permission_classes` set and `/me/` has the default permissions
so that the default permission class can be set to IsAuthenticatedOrReadOnly
without leaking user API tokens.
"""
queryset = User.objects.all().order_by('id')
serializer_class = UserSerializer
permission_classes = (ModelPermissions,)
@action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
def me(self, request):
serializer = self.get_serializer(request.user)
return Response(serializer.data)
@action(methods=['patch'], permission_classes=[AllowAny], detail=False)
def login(self, request):
<|code_end|>
. Write the next line using the current file imports:
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.decorators import list_route as action
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from rest_framework.settings import api_settings
from yawn.user.permissions import ModelPermissions
from .serializers import UserSerializer, LoginSerializer
and context from other files:
# Path: yawn/user/permissions.py
# class ModelPermissions(BasePermission):
# """Allow authenticated users to read, and staff to write"""
#
# def has_permission(self, request, view):
# # Workaround to ensure DjangoModelPermissions are not applied
# # to the root view when using DefaultRouter.
# if getattr(view, '_ignore_model_permissions', False):
# return True
#
# if not request.user.is_authenticated:
# return False
#
# if request.user.is_staff:
# return True
#
# # authenticated, but not staff: only allow read
# return request.method in ('GET', 'HEAD', 'OPTIONS')
#
# Path: yawn/user/serializers.py
# class UserSerializer(serializers.ModelSerializer):
# api_token = serializers.CharField(source='auth_token.key', read_only=True)
# refresh_token = serializers.BooleanField(write_only=True, default=False)
# password = serializers.CharField(write_only=True)
#
# class Meta:
# model = User
# fields = ('username', 'first_name', 'last_name', 'api_token', 'email',
# 'refresh_token', 'id', 'password', 'is_staff')
#
# def create(self, validated_data):
# validated_data.pop('refresh_token', None)
# return User.objects.create_user(**validated_data)
#
# def update(self, instance, validated_data):
# # support refreshing the token:
# if validated_data.pop('refresh_token', False):
# if hasattr(instance, 'auth_token'):
# instance.auth_token.delete()
# instance.auth_token = Token.objects.create(user=instance)
# # and setting the password, which causes an unhandled logout...
# if validated_data.get('password'):
# instance.set_password(validated_data.pop('password'))
# return super().update(instance, validated_data)
#
# class LoginSerializer(serializers.Serializer):
# username = serializers.CharField()
# password = serializers.CharField()
, which may include functions, classes, or code. Output only the next line. | credentials = LoginSerializer(data=request.data) |
Given the code snippet: <|code_start|>
router = routers.DefaultRouter()
router.include_format_suffixes = False
router.register(r'names', WorkflowNameViewSet)
router.register(r'workflows', WorkflowViewSet)
router.register(r'runs', RunViewSet)
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls import include, url
from django.http import HttpResponse
from rest_framework import routers
from yawn.task.views import TaskViewSet, ExecutionViewSet
from yawn.worker.views import QueueViewSet, WorkerViewSet
from yawn.workflow.views import WorkflowViewSet, WorkflowNameViewSet, RunViewSet
from yawn.user.views import UserViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: yawn/task/views.py
# class TaskViewSet(viewsets.GenericViewSet,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# GET a task, and its executions. PATCH to re-run or terminate.
# """
# queryset = Task.objects.all().order_by('id').prefetch_related(
# 'execution_set__worker').select_related('template__workflow__name')
#
# serializer_class = TaskDetailSerializer
#
# class ExecutionViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin):
# """
# GET a list of Executions
# """
# queryset = Execution.objects.all().order_by('-id').select_related(
# 'worker', 'task__template__workflow__name')
#
# serializer_class = ExecutionListSerializer
#
# def get_queryset(self):
# """
# Optionally filter to the executions for a given worker
# """
# queryset = self.queryset
# worker = self.request.query_params.get('worker')
# if worker is not None:
# # worker page filters and reverses the order
# queryset = queryset.filter(worker_id=worker)
# return queryset
#
# Path: yawn/worker/views.py
# class QueueViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Queue endpoint GET(list,detail), PATCH to clear all messages
# """
# queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
#
# serializer_class = QueueSerializer
#
# class WorkerViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin):
# """
# Worker endpoint, GET(list)
# """
# queryset = Worker.objects.all().order_by('-id')
#
# serializer_class = WorkerSerializer
#
# Path: yawn/workflow/views.py
# class WorkflowViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin,
# viewsets.mixins.CreateModelMixin):
# """
# Workflow endpoint.
#
# The POST action is unusual as it checks for an existing, identical
# workflow and return it if found instead of creating a new object.
# """
# queryset = Workflow.objects.select_related('name').order_by('id')
#
# serializer_class = WorkflowSerializer
#
# class WorkflowNameViewSet(viewsets.ReadOnlyModelViewSet):
# """
# WorkflowName endpoint. Provides the name, current version number,
# current_version_id, and the select_version_id if a version was
# requested in the query string.
#
# Used to list the workflows and power version switcher.
# """
# queryset = WorkflowName.objects.select_related('current_version').annotate(
# task_count=Count('current_version__template')).order_by('id')
# page_size = 100000 # not much pagination
#
# serializer_class = WorkflowNameSerializer
#
# class RunViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Run endpoint: GET(list,detail), POST(list) and PATCH(detail)
# """
#
# serializer_class = RunSerializer
#
# queryset = Run.objects.prefetch_related('task_set__template').order_by('id')
#
# def get_queryset(self):
# """
# Optionally filter to the runs for a given workflow
# """
# queryset = self.queryset
# workflow = self.request.query_params.get('workflow', None)
# if workflow is not None:
# queryset = queryset.filter(workflow_id=workflow)
# return queryset
#
# Path: yawn/user/views.py
# class UserViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# User endpoint, GET(list, detail), PATCH to change
#
# This ViewSet has `permission_classes` set and `/me/` has the default permissions
# so that the default permission class can be set to IsAuthenticatedOrReadOnly
# without leaking user API tokens.
# """
# queryset = User.objects.all().order_by('id')
#
# serializer_class = UserSerializer
# permission_classes = (ModelPermissions,)
#
# @action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
# def me(self, request):
# serializer = self.get_serializer(request.user)
# return Response(serializer.data)
#
# @action(methods=['patch'], permission_classes=[AllowAny], detail=False)
# def login(self, request):
# credentials = LoginSerializer(data=request.data)
# credentials.is_valid(raise_exception=True)
# user = authenticate(request, **credentials.data)
# if not user:
# return Response({'detail': 'Login failed'}, status.HTTP_401_UNAUTHORIZED)
#
# login(request, user)
# return Response({'detail': 'Login succeeded'})
#
# @action(methods=['delete'], detail=False)
# def logout(self, request):
# logout(request)
# return Response({'detail': 'Logout succeeded'})
. Output only the next line. | router.register(r'tasks', TaskViewSet) |
Predict the next line after this snippet: <|code_start|>
router = routers.DefaultRouter()
router.include_format_suffixes = False
router.register(r'names', WorkflowNameViewSet)
router.register(r'workflows', WorkflowViewSet)
router.register(r'runs', RunViewSet)
router.register(r'tasks', TaskViewSet)
router.register(r'queues', QueueViewSet)
router.register(r'workers', WorkerViewSet)
<|code_end|>
using the current file's imports:
from django.conf.urls import include, url
from django.http import HttpResponse
from rest_framework import routers
from yawn.task.views import TaskViewSet, ExecutionViewSet
from yawn.worker.views import QueueViewSet, WorkerViewSet
from yawn.workflow.views import WorkflowViewSet, WorkflowNameViewSet, RunViewSet
from yawn.user.views import UserViewSet
and any relevant context from other files:
# Path: yawn/task/views.py
# class TaskViewSet(viewsets.GenericViewSet,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# GET a task, and its executions. PATCH to re-run or terminate.
# """
# queryset = Task.objects.all().order_by('id').prefetch_related(
# 'execution_set__worker').select_related('template__workflow__name')
#
# serializer_class = TaskDetailSerializer
#
# class ExecutionViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin):
# """
# GET a list of Executions
# """
# queryset = Execution.objects.all().order_by('-id').select_related(
# 'worker', 'task__template__workflow__name')
#
# serializer_class = ExecutionListSerializer
#
# def get_queryset(self):
# """
# Optionally filter to the executions for a given worker
# """
# queryset = self.queryset
# worker = self.request.query_params.get('worker')
# if worker is not None:
# # worker page filters and reverses the order
# queryset = queryset.filter(worker_id=worker)
# return queryset
#
# Path: yawn/worker/views.py
# class QueueViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Queue endpoint GET(list,detail), PATCH to clear all messages
# """
# queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
#
# serializer_class = QueueSerializer
#
# class WorkerViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin):
# """
# Worker endpoint, GET(list)
# """
# queryset = Worker.objects.all().order_by('-id')
#
# serializer_class = WorkerSerializer
#
# Path: yawn/workflow/views.py
# class WorkflowViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin,
# viewsets.mixins.CreateModelMixin):
# """
# Workflow endpoint.
#
# The POST action is unusual as it checks for an existing, identical
# workflow and return it if found instead of creating a new object.
# """
# queryset = Workflow.objects.select_related('name').order_by('id')
#
# serializer_class = WorkflowSerializer
#
# class WorkflowNameViewSet(viewsets.ReadOnlyModelViewSet):
# """
# WorkflowName endpoint. Provides the name, current version number,
# current_version_id, and the select_version_id if a version was
# requested in the query string.
#
# Used to list the workflows and power version switcher.
# """
# queryset = WorkflowName.objects.select_related('current_version').annotate(
# task_count=Count('current_version__template')).order_by('id')
# page_size = 100000 # not much pagination
#
# serializer_class = WorkflowNameSerializer
#
# class RunViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Run endpoint: GET(list,detail), POST(list) and PATCH(detail)
# """
#
# serializer_class = RunSerializer
#
# queryset = Run.objects.prefetch_related('task_set__template').order_by('id')
#
# def get_queryset(self):
# """
# Optionally filter to the runs for a given workflow
# """
# queryset = self.queryset
# workflow = self.request.query_params.get('workflow', None)
# if workflow is not None:
# queryset = queryset.filter(workflow_id=workflow)
# return queryset
#
# Path: yawn/user/views.py
# class UserViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# User endpoint, GET(list, detail), PATCH to change
#
# This ViewSet has `permission_classes` set and `/me/` has the default permissions
# so that the default permission class can be set to IsAuthenticatedOrReadOnly
# without leaking user API tokens.
# """
# queryset = User.objects.all().order_by('id')
#
# serializer_class = UserSerializer
# permission_classes = (ModelPermissions,)
#
# @action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
# def me(self, request):
# serializer = self.get_serializer(request.user)
# return Response(serializer.data)
#
# @action(methods=['patch'], permission_classes=[AllowAny], detail=False)
# def login(self, request):
# credentials = LoginSerializer(data=request.data)
# credentials.is_valid(raise_exception=True)
# user = authenticate(request, **credentials.data)
# if not user:
# return Response({'detail': 'Login failed'}, status.HTTP_401_UNAUTHORIZED)
#
# login(request, user)
# return Response({'detail': 'Login succeeded'})
#
# @action(methods=['delete'], detail=False)
# def logout(self, request):
# logout(request)
# return Response({'detail': 'Logout succeeded'})
. Output only the next line. | router.register(r'executions', ExecutionViewSet) |
Predict the next line after this snippet: <|code_start|>
router = routers.DefaultRouter()
router.include_format_suffixes = False
router.register(r'names', WorkflowNameViewSet)
router.register(r'workflows', WorkflowViewSet)
router.register(r'runs', RunViewSet)
router.register(r'tasks', TaskViewSet)
<|code_end|>
using the current file's imports:
from django.conf.urls import include, url
from django.http import HttpResponse
from rest_framework import routers
from yawn.task.views import TaskViewSet, ExecutionViewSet
from yawn.worker.views import QueueViewSet, WorkerViewSet
from yawn.workflow.views import WorkflowViewSet, WorkflowNameViewSet, RunViewSet
from yawn.user.views import UserViewSet
and any relevant context from other files:
# Path: yawn/task/views.py
# class TaskViewSet(viewsets.GenericViewSet,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# GET a task, and its executions. PATCH to re-run or terminate.
# """
# queryset = Task.objects.all().order_by('id').prefetch_related(
# 'execution_set__worker').select_related('template__workflow__name')
#
# serializer_class = TaskDetailSerializer
#
# class ExecutionViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin):
# """
# GET a list of Executions
# """
# queryset = Execution.objects.all().order_by('-id').select_related(
# 'worker', 'task__template__workflow__name')
#
# serializer_class = ExecutionListSerializer
#
# def get_queryset(self):
# """
# Optionally filter to the executions for a given worker
# """
# queryset = self.queryset
# worker = self.request.query_params.get('worker')
# if worker is not None:
# # worker page filters and reverses the order
# queryset = queryset.filter(worker_id=worker)
# return queryset
#
# Path: yawn/worker/views.py
# class QueueViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Queue endpoint GET(list,detail), PATCH to clear all messages
# """
# queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
#
# serializer_class = QueueSerializer
#
# class WorkerViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin):
# """
# Worker endpoint, GET(list)
# """
# queryset = Worker.objects.all().order_by('-id')
#
# serializer_class = WorkerSerializer
#
# Path: yawn/workflow/views.py
# class WorkflowViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin,
# viewsets.mixins.CreateModelMixin):
# """
# Workflow endpoint.
#
# The POST action is unusual as it checks for an existing, identical
# workflow and return it if found instead of creating a new object.
# """
# queryset = Workflow.objects.select_related('name').order_by('id')
#
# serializer_class = WorkflowSerializer
#
# class WorkflowNameViewSet(viewsets.ReadOnlyModelViewSet):
# """
# WorkflowName endpoint. Provides the name, current version number,
# current_version_id, and the select_version_id if a version was
# requested in the query string.
#
# Used to list the workflows and power version switcher.
# """
# queryset = WorkflowName.objects.select_related('current_version').annotate(
# task_count=Count('current_version__template')).order_by('id')
# page_size = 100000 # not much pagination
#
# serializer_class = WorkflowNameSerializer
#
# class RunViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Run endpoint: GET(list,detail), POST(list) and PATCH(detail)
# """
#
# serializer_class = RunSerializer
#
# queryset = Run.objects.prefetch_related('task_set__template').order_by('id')
#
# def get_queryset(self):
# """
# Optionally filter to the runs for a given workflow
# """
# queryset = self.queryset
# workflow = self.request.query_params.get('workflow', None)
# if workflow is not None:
# queryset = queryset.filter(workflow_id=workflow)
# return queryset
#
# Path: yawn/user/views.py
# class UserViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# User endpoint, GET(list, detail), PATCH to change
#
# This ViewSet has `permission_classes` set and `/me/` has the default permissions
# so that the default permission class can be set to IsAuthenticatedOrReadOnly
# without leaking user API tokens.
# """
# queryset = User.objects.all().order_by('id')
#
# serializer_class = UserSerializer
# permission_classes = (ModelPermissions,)
#
# @action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
# def me(self, request):
# serializer = self.get_serializer(request.user)
# return Response(serializer.data)
#
# @action(methods=['patch'], permission_classes=[AllowAny], detail=False)
# def login(self, request):
# credentials = LoginSerializer(data=request.data)
# credentials.is_valid(raise_exception=True)
# user = authenticate(request, **credentials.data)
# if not user:
# return Response({'detail': 'Login failed'}, status.HTTP_401_UNAUTHORIZED)
#
# login(request, user)
# return Response({'detail': 'Login succeeded'})
#
# @action(methods=['delete'], detail=False)
# def logout(self, request):
# logout(request)
# return Response({'detail': 'Logout succeeded'})
. Output only the next line. | router.register(r'queues', QueueViewSet) |
Continue the code snippet: <|code_start|>
router = routers.DefaultRouter()
router.include_format_suffixes = False
router.register(r'names', WorkflowNameViewSet)
router.register(r'workflows', WorkflowViewSet)
router.register(r'runs', RunViewSet)
router.register(r'tasks', TaskViewSet)
router.register(r'queues', QueueViewSet)
<|code_end|>
. Use current file imports:
from django.conf.urls import include, url
from django.http import HttpResponse
from rest_framework import routers
from yawn.task.views import TaskViewSet, ExecutionViewSet
from yawn.worker.views import QueueViewSet, WorkerViewSet
from yawn.workflow.views import WorkflowViewSet, WorkflowNameViewSet, RunViewSet
from yawn.user.views import UserViewSet
and context (classes, functions, or code) from other files:
# Path: yawn/task/views.py
# class TaskViewSet(viewsets.GenericViewSet,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# GET a task, and its executions. PATCH to re-run or terminate.
# """
# queryset = Task.objects.all().order_by('id').prefetch_related(
# 'execution_set__worker').select_related('template__workflow__name')
#
# serializer_class = TaskDetailSerializer
#
# class ExecutionViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin):
# """
# GET a list of Executions
# """
# queryset = Execution.objects.all().order_by('-id').select_related(
# 'worker', 'task__template__workflow__name')
#
# serializer_class = ExecutionListSerializer
#
# def get_queryset(self):
# """
# Optionally filter to the executions for a given worker
# """
# queryset = self.queryset
# worker = self.request.query_params.get('worker')
# if worker is not None:
# # worker page filters and reverses the order
# queryset = queryset.filter(worker_id=worker)
# return queryset
#
# Path: yawn/worker/views.py
# class QueueViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Queue endpoint GET(list,detail), PATCH to clear all messages
# """
# queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
#
# serializer_class = QueueSerializer
#
# class WorkerViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin):
# """
# Worker endpoint, GET(list)
# """
# queryset = Worker.objects.all().order_by('-id')
#
# serializer_class = WorkerSerializer
#
# Path: yawn/workflow/views.py
# class WorkflowViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin,
# viewsets.mixins.CreateModelMixin):
# """
# Workflow endpoint.
#
# The POST action is unusual as it checks for an existing, identical
# workflow and return it if found instead of creating a new object.
# """
# queryset = Workflow.objects.select_related('name').order_by('id')
#
# serializer_class = WorkflowSerializer
#
# class WorkflowNameViewSet(viewsets.ReadOnlyModelViewSet):
# """
# WorkflowName endpoint. Provides the name, current version number,
# current_version_id, and the select_version_id if a version was
# requested in the query string.
#
# Used to list the workflows and power version switcher.
# """
# queryset = WorkflowName.objects.select_related('current_version').annotate(
# task_count=Count('current_version__template')).order_by('id')
# page_size = 100000 # not much pagination
#
# serializer_class = WorkflowNameSerializer
#
# class RunViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Run endpoint: GET(list,detail), POST(list) and PATCH(detail)
# """
#
# serializer_class = RunSerializer
#
# queryset = Run.objects.prefetch_related('task_set__template').order_by('id')
#
# def get_queryset(self):
# """
# Optionally filter to the runs for a given workflow
# """
# queryset = self.queryset
# workflow = self.request.query_params.get('workflow', None)
# if workflow is not None:
# queryset = queryset.filter(workflow_id=workflow)
# return queryset
#
# Path: yawn/user/views.py
# class UserViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# User endpoint, GET(list, detail), PATCH to change
#
# This ViewSet has `permission_classes` set and `/me/` has the default permissions
# so that the default permission class can be set to IsAuthenticatedOrReadOnly
# without leaking user API tokens.
# """
# queryset = User.objects.all().order_by('id')
#
# serializer_class = UserSerializer
# permission_classes = (ModelPermissions,)
#
# @action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
# def me(self, request):
# serializer = self.get_serializer(request.user)
# return Response(serializer.data)
#
# @action(methods=['patch'], permission_classes=[AllowAny], detail=False)
# def login(self, request):
# credentials = LoginSerializer(data=request.data)
# credentials.is_valid(raise_exception=True)
# user = authenticate(request, **credentials.data)
# if not user:
# return Response({'detail': 'Login failed'}, status.HTTP_401_UNAUTHORIZED)
#
# login(request, user)
# return Response({'detail': 'Login succeeded'})
#
# @action(methods=['delete'], detail=False)
# def logout(self, request):
# logout(request)
# return Response({'detail': 'Logout succeeded'})
. Output only the next line. | router.register(r'workers', WorkerViewSet) |
Predict the next line for this snippet: <|code_start|>
router = routers.DefaultRouter()
router.include_format_suffixes = False
router.register(r'names', WorkflowNameViewSet)
<|code_end|>
with the help of current file imports:
from django.conf.urls import include, url
from django.http import HttpResponse
from rest_framework import routers
from yawn.task.views import TaskViewSet, ExecutionViewSet
from yawn.worker.views import QueueViewSet, WorkerViewSet
from yawn.workflow.views import WorkflowViewSet, WorkflowNameViewSet, RunViewSet
from yawn.user.views import UserViewSet
and context from other files:
# Path: yawn/task/views.py
# class TaskViewSet(viewsets.GenericViewSet,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# GET a task, and its executions. PATCH to re-run or terminate.
# """
# queryset = Task.objects.all().order_by('id').prefetch_related(
# 'execution_set__worker').select_related('template__workflow__name')
#
# serializer_class = TaskDetailSerializer
#
# class ExecutionViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin):
# """
# GET a list of Executions
# """
# queryset = Execution.objects.all().order_by('-id').select_related(
# 'worker', 'task__template__workflow__name')
#
# serializer_class = ExecutionListSerializer
#
# def get_queryset(self):
# """
# Optionally filter to the executions for a given worker
# """
# queryset = self.queryset
# worker = self.request.query_params.get('worker')
# if worker is not None:
# # worker page filters and reverses the order
# queryset = queryset.filter(worker_id=worker)
# return queryset
#
# Path: yawn/worker/views.py
# class QueueViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Queue endpoint GET(list,detail), PATCH to clear all messages
# """
# queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
#
# serializer_class = QueueSerializer
#
# class WorkerViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin):
# """
# Worker endpoint, GET(list)
# """
# queryset = Worker.objects.all().order_by('-id')
#
# serializer_class = WorkerSerializer
#
# Path: yawn/workflow/views.py
# class WorkflowViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin,
# viewsets.mixins.CreateModelMixin):
# """
# Workflow endpoint.
#
# The POST action is unusual as it checks for an existing, identical
# workflow and return it if found instead of creating a new object.
# """
# queryset = Workflow.objects.select_related('name').order_by('id')
#
# serializer_class = WorkflowSerializer
#
# class WorkflowNameViewSet(viewsets.ReadOnlyModelViewSet):
# """
# WorkflowName endpoint. Provides the name, current version number,
# current_version_id, and the select_version_id if a version was
# requested in the query string.
#
# Used to list the workflows and power version switcher.
# """
# queryset = WorkflowName.objects.select_related('current_version').annotate(
# task_count=Count('current_version__template')).order_by('id')
# page_size = 100000 # not much pagination
#
# serializer_class = WorkflowNameSerializer
#
# class RunViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Run endpoint: GET(list,detail), POST(list) and PATCH(detail)
# """
#
# serializer_class = RunSerializer
#
# queryset = Run.objects.prefetch_related('task_set__template').order_by('id')
#
# def get_queryset(self):
# """
# Optionally filter to the runs for a given workflow
# """
# queryset = self.queryset
# workflow = self.request.query_params.get('workflow', None)
# if workflow is not None:
# queryset = queryset.filter(workflow_id=workflow)
# return queryset
#
# Path: yawn/user/views.py
# class UserViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# User endpoint, GET(list, detail), PATCH to change
#
# This ViewSet has `permission_classes` set and `/me/` has the default permissions
# so that the default permission class can be set to IsAuthenticatedOrReadOnly
# without leaking user API tokens.
# """
# queryset = User.objects.all().order_by('id')
#
# serializer_class = UserSerializer
# permission_classes = (ModelPermissions,)
#
# @action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
# def me(self, request):
# serializer = self.get_serializer(request.user)
# return Response(serializer.data)
#
# @action(methods=['patch'], permission_classes=[AllowAny], detail=False)
# def login(self, request):
# credentials = LoginSerializer(data=request.data)
# credentials.is_valid(raise_exception=True)
# user = authenticate(request, **credentials.data)
# if not user:
# return Response({'detail': 'Login failed'}, status.HTTP_401_UNAUTHORIZED)
#
# login(request, user)
# return Response({'detail': 'Login succeeded'})
#
# @action(methods=['delete'], detail=False)
# def logout(self, request):
# logout(request)
# return Response({'detail': 'Logout succeeded'})
, which may contain function names, class names, or code. Output only the next line. | router.register(r'workflows', WorkflowViewSet) |
Using the snippet: <|code_start|>
router = routers.DefaultRouter()
router.include_format_suffixes = False
router.register(r'names', WorkflowNameViewSet)
router.register(r'workflows', WorkflowViewSet)
<|code_end|>
, determine the next line of code. You have imports:
from django.conf.urls import include, url
from django.http import HttpResponse
from rest_framework import routers
from yawn.task.views import TaskViewSet, ExecutionViewSet
from yawn.worker.views import QueueViewSet, WorkerViewSet
from yawn.workflow.views import WorkflowViewSet, WorkflowNameViewSet, RunViewSet
from yawn.user.views import UserViewSet
and context (class names, function names, or code) available:
# Path: yawn/task/views.py
# class TaskViewSet(viewsets.GenericViewSet,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# GET a task, and its executions. PATCH to re-run or terminate.
# """
# queryset = Task.objects.all().order_by('id').prefetch_related(
# 'execution_set__worker').select_related('template__workflow__name')
#
# serializer_class = TaskDetailSerializer
#
# class ExecutionViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin):
# """
# GET a list of Executions
# """
# queryset = Execution.objects.all().order_by('-id').select_related(
# 'worker', 'task__template__workflow__name')
#
# serializer_class = ExecutionListSerializer
#
# def get_queryset(self):
# """
# Optionally filter to the executions for a given worker
# """
# queryset = self.queryset
# worker = self.request.query_params.get('worker')
# if worker is not None:
# # worker page filters and reverses the order
# queryset = queryset.filter(worker_id=worker)
# return queryset
#
# Path: yawn/worker/views.py
# class QueueViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Queue endpoint GET(list,detail), PATCH to clear all messages
# """
# queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
#
# serializer_class = QueueSerializer
#
# class WorkerViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin):
# """
# Worker endpoint, GET(list)
# """
# queryset = Worker.objects.all().order_by('-id')
#
# serializer_class = WorkerSerializer
#
# Path: yawn/workflow/views.py
# class WorkflowViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin,
# viewsets.mixins.CreateModelMixin):
# """
# Workflow endpoint.
#
# The POST action is unusual as it checks for an existing, identical
# workflow and return it if found instead of creating a new object.
# """
# queryset = Workflow.objects.select_related('name').order_by('id')
#
# serializer_class = WorkflowSerializer
#
# class WorkflowNameViewSet(viewsets.ReadOnlyModelViewSet):
# """
# WorkflowName endpoint. Provides the name, current version number,
# current_version_id, and the select_version_id if a version was
# requested in the query string.
#
# Used to list the workflows and power version switcher.
# """
# queryset = WorkflowName.objects.select_related('current_version').annotate(
# task_count=Count('current_version__template')).order_by('id')
# page_size = 100000 # not much pagination
#
# serializer_class = WorkflowNameSerializer
#
# class RunViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# Run endpoint: GET(list,detail), POST(list) and PATCH(detail)
# """
#
# serializer_class = RunSerializer
#
# queryset = Run.objects.prefetch_related('task_set__template').order_by('id')
#
# def get_queryset(self):
# """
# Optionally filter to the runs for a given workflow
# """
# queryset = self.queryset
# workflow = self.request.query_params.get('workflow', None)
# if workflow is not None:
# queryset = queryset.filter(workflow_id=workflow)
# return queryset
#
# Path: yawn/user/views.py
# class UserViewSet(viewsets.GenericViewSet,
# viewsets.mixins.ListModelMixin,
# viewsets.mixins.CreateModelMixin,
# viewsets.mixins.RetrieveModelMixin,
# viewsets.mixins.UpdateModelMixin):
# """
# User endpoint, GET(list, detail), PATCH to change
#
# This ViewSet has `permission_classes` set and `/me/` has the default permissions
# so that the default permission class can be set to IsAuthenticatedOrReadOnly
# without leaking user API tokens.
# """
# queryset = User.objects.all().order_by('id')
#
# serializer_class = UserSerializer
# permission_classes = (ModelPermissions,)
#
# @action(methods=['get'], permission_classes=api_settings.DEFAULT_PERMISSION_CLASSES, detail=False)
# def me(self, request):
# serializer = self.get_serializer(request.user)
# return Response(serializer.data)
#
# @action(methods=['patch'], permission_classes=[AllowAny], detail=False)
# def login(self, request):
# credentials = LoginSerializer(data=request.data)
# credentials.is_valid(raise_exception=True)
# user = authenticate(request, **credentials.data)
# if not user:
# return Response({'detail': 'Login failed'}, status.HTTP_401_UNAUTHORIZED)
#
# login(request, user)
# return Response({'detail': 'Login succeeded'})
#
# @action(methods=['delete'], detail=False)
# def logout(self, request):
# logout(request)
# return Response({'detail': 'Logout succeeded'})
. Output only the next line. | router.register(r'runs', RunViewSet) |
Using the snippet: <|code_start|>
@pytest.mark.no_transaction
def test_close_on_exception():
@close_on_exception
def example_disconnect():
with connection.cursor() as cursor:
# kill the current connection, which will raise a django.db.OperationalError
cursor.execute('select pg_terminate_backend(pg_backend_pid())')
# which means we never get here
cursor.execute('select 1')
assert False, 'exception was not raised'
# this will cause a disconnect:
example_disconnect()
# but the exception is caught, and on retry the database reconnects:
with connection.cursor() as cursor:
cursor.execute('select 1')
def test_current_time():
<|code_end|>
, determine the next line of code. You have imports:
import datetime
import pytest
from django.db import connection
from yawn.utilities import database
from yawn.utilities.database import close_on_exception
and context (class names, function names, or code) available:
# Path: yawn/utilities/database.py
# def close_on_exception(func):
# def wrapper(*args, **kwargs):
# def current_time():
#
# Path: yawn/utilities/database.py
# def close_on_exception(func):
# """
# A wrapper to close the database connection if a DB error occurs,
# so that it will get re-opened on the next use.
#
# Squashes the exception and logs it.
# """
#
# @functools.wraps(func)
# def wrapper(*args, **kwargs):
# try:
# func(*args, **kwargs)
# except OperationalError:
# logger.error('Database error, closing connection', exc_info=True)
# db.connection.close()
# assert db.connection.closed_in_transaction is False, \
# 'Could not close connection, probably because this wrapper ' \
# 'was used inside an transaction.atomic() block.'
#
# return wrapper
. Output only the next line. | assert isinstance(database.current_time(), datetime.datetime) |
Given the code snippet: <|code_start|>
class WorkerSerializer(serializers.ModelSerializer):
class Meta:
model = Worker
fields = '__all__'
class QueueSerializer(serializers.ModelSerializer):
message_count = serializers.IntegerField(read_only=True, source='message__count')
purge = serializers.BooleanField(write_only=True, default=False)
class Meta:
model = Queue
fields = '__all__'
def update(self, instance, validated_data):
if validated_data['purge'] is True:
instance.message_set.all().delete()
instance.message__count = 0
return instance
class MessageSerializer(serializers.ModelSerializer):
queue = serializers.CharField(source='queue.name')
class Meta:
<|code_end|>
, generate the next line using the imports in this file:
from rest_framework import serializers
from yawn.worker.models import Worker, Message, Queue
and context (functions, classes, or occasionally code) from other files:
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Message(models.Model):
# """The order of tasks waiting to be processed, like messages on a queue"""
#
# # I hope we never get to 9 Quintillion (9,223,372,036,854,775,807) messages
# id = models.BigAutoField(primary_key=True)
#
# queue = models.ForeignKey(Queue, models.PROTECT)
# task = models.ForeignKey('Task', models.PROTECT)
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
. Output only the next line. | model = Message |
Next line prediction: <|code_start|>
class WorkerSerializer(serializers.ModelSerializer):
class Meta:
model = Worker
fields = '__all__'
class QueueSerializer(serializers.ModelSerializer):
message_count = serializers.IntegerField(read_only=True, source='message__count')
purge = serializers.BooleanField(write_only=True, default=False)
class Meta:
<|code_end|>
. Use current file imports:
(from rest_framework import serializers
from yawn.worker.models import Worker, Message, Queue)
and context including class names, function names, or small code snippets from other files:
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Message(models.Model):
# """The order of tasks waiting to be processed, like messages on a queue"""
#
# # I hope we never get to 9 Quintillion (9,223,372,036,854,775,807) messages
# id = models.BigAutoField(primary_key=True)
#
# queue = models.ForeignKey(Queue, models.PROTECT)
# task = models.ForeignKey('Task', models.PROTECT)
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
. Output only the next line. | model = Queue |
Continue the code snippet: <|code_start|>
def some_function(*args):
pass
class SomeClass:
pass
def test_quoted_arg():
<|code_end|>
. Use current file imports:
from yawn.task.helpers import delay
and context (classes, functions, or code) from other files:
# Path: yawn/task/helpers.py
# def delay(func, *args, timeout=None, max_retries=0, queue=None):
# arguments = [shlex.quote(arg) for arg in args]
# command = 'yawn exec {0.__module__} {0.__name__} {1}'.format(
# func, ' '.join(arguments)).strip()
# task_name = '{0.__module__}.{0.__name__}({1})'.format(
# func, ', '.join(arguments))
#
# if queue:
# queue_obj, _ = Queue.objects.get_or_create(name=queue)
# else:
# queue_obj = Queue.get_default_queue()
#
# template, _ = Template.objects.get_or_create(
# name=task_name,
# command=command,
# queue=queue_obj,
# max_retries=max_retries,
# timeout=timeout
# )
# task = Task.objects.create(
# template=template
# )
# task.enqueue()
# return task
. Output only the next line. | task = delay(SomeClass, 'A small "taste" of chaos') |
Given the code snippet: <|code_start|>
# any change, new version created
data['tasks'][0]['max_retries'] = 2
response = client.post('/api/workflows/', data)
assert response.status_code == 201, response.data
assert response.data['version'] == 2
def test_invalid_fields(client, data):
data['name'] = ''
data['parameters'] = 'not a dict'
data['tasks'][1]['upstream'].append('invalid_task')
response = client.post('/api/workflows/', data)
assert response.status_code == 400, response.data
assert 'This field may not be blank' in response.data['name'][0]
assert 'must be a dictionary' in response.data['parameters'][0]
assert 'upstream task(s) invalid_task' in response.data['tasks'][0]
def test_more_invalid_fields(client, data):
data['parameters'] = {'invalid variable': 1}
data['tasks'] = []
response = client.post('/api/workflows/', data)
assert response.status_code == 400, response.data
assert 'Invalid parameter key' in response.data['parameters'][0]
assert 'Invalid parameter value' in response.data['parameters'][1]
assert 'non_field_errors' in response.data['tasks']
def test_list_workflow_names(client):
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from django.utils.dateparse import parse_datetime
from yawn.workflow.models import WorkflowName
from yawn.workflow.tests.utils import load_sample_workflow
and context (functions, classes, or occasionally code) from other files:
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
#
# Path: yawn/workflow/tests/utils.py
# def load_sample_workflow():
# filename = os.path.join(os.path.dirname(__file__), 'workflow.yaml')
# return yaml.safe_load(open(filename).read())
. Output only the next line. | name = WorkflowName.objects.create(name='workflow1') |
Given snippet: <|code_start|>
def test_get_workers(client):
worker = Worker.objects.create(name='worker1')
response = client.get('/api/workers/')
assert response.status_code == 200
workers = response.data['results']
assert len(workers) == 1
assert workers[0]['name'] == worker.name
assert workers[0]['status'] == worker.status
@pytest.fixture()
def queue():
queue = Queue.objects.create(name='queue1')
# need all this for a task...
name = WorkflowName.objects.create(name='workflow1')
workflow = name.new_version()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from yawn.task.models import Template
from yawn.worker.models import Worker, Queue, Message
from yawn.workflow.models import WorkflowName
and context:
# Path: yawn/task/models.py
# class Template(models.Model):
# workflow = models.ForeignKey('Workflow', models.PROTECT, editable=False, null=True)
# queue = models.ForeignKey(Queue, models.PROTECT)
# name = models.TextField()
#
# command = models.TextField()
# max_retries = models.IntegerField(default=0)
# timeout = models.IntegerField(null=True) # seconds
#
# # self-reference for upstream tasks
# upstream = models.ManyToManyField(
# 'Template', related_name='downstream', symmetrical=False, blank=True)
#
# class Meta:
# unique_together = ('workflow', 'name')
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# # can't set the default above because the value isn't serializable for migrations
# if not self.queue_id:
# self.queue = Queue.get_default_queue()
# super().save(*args, **kwargs)
#
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# class Message(models.Model):
# """The order of tasks waiting to be processed, like messages on a queue"""
#
# # I hope we never get to 9 Quintillion (9,223,372,036,854,775,807) messages
# id = models.BigAutoField(primary_key=True)
#
# queue = models.ForeignKey(Queue, models.PROTECT)
# task = models.ForeignKey('Task', models.PROTECT)
#
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
which might include code, classes, or functions. Output only the next line. | Template.objects.create(workflow=workflow, name='task1', command=['']) |
Predict the next line after this snippet: <|code_start|>
def test_get_workers(client):
worker = Worker.objects.create(name='worker1')
response = client.get('/api/workers/')
assert response.status_code == 200
workers = response.data['results']
assert len(workers) == 1
assert workers[0]['name'] == worker.name
assert workers[0]['status'] == worker.status
@pytest.fixture()
def queue():
<|code_end|>
using the current file's imports:
import pytest
from yawn.task.models import Template
from yawn.worker.models import Worker, Queue, Message
from yawn.workflow.models import WorkflowName
and any relevant context from other files:
# Path: yawn/task/models.py
# class Template(models.Model):
# workflow = models.ForeignKey('Workflow', models.PROTECT, editable=False, null=True)
# queue = models.ForeignKey(Queue, models.PROTECT)
# name = models.TextField()
#
# command = models.TextField()
# max_retries = models.IntegerField(default=0)
# timeout = models.IntegerField(null=True) # seconds
#
# # self-reference for upstream tasks
# upstream = models.ManyToManyField(
# 'Template', related_name='downstream', symmetrical=False, blank=True)
#
# class Meta:
# unique_together = ('workflow', 'name')
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# # can't set the default above because the value isn't serializable for migrations
# if not self.queue_id:
# self.queue = Queue.get_default_queue()
# super().save(*args, **kwargs)
#
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# class Message(models.Model):
# """The order of tasks waiting to be processed, like messages on a queue"""
#
# # I hope we never get to 9 Quintillion (9,223,372,036,854,775,807) messages
# id = models.BigAutoField(primary_key=True)
#
# queue = models.ForeignKey(Queue, models.PROTECT)
# task = models.ForeignKey('Task', models.PROTECT)
#
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
. Output only the next line. | queue = Queue.objects.create(name='queue1') |
Based on the snippet: <|code_start|>
def test_get_workers(client):
worker = Worker.objects.create(name='worker1')
response = client.get('/api/workers/')
assert response.status_code == 200
workers = response.data['results']
assert len(workers) == 1
assert workers[0]['name'] == worker.name
assert workers[0]['status'] == worker.status
@pytest.fixture()
def queue():
queue = Queue.objects.create(name='queue1')
# need all this for a task...
name = WorkflowName.objects.create(name='workflow1')
workflow = name.new_version()
Template.objects.create(workflow=workflow, name='task1', command=[''])
run = workflow.submit_run()
task = run.task_set.first()
# create some messages
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from yawn.task.models import Template
from yawn.worker.models import Worker, Queue, Message
from yawn.workflow.models import WorkflowName
and context (classes, functions, sometimes code) from other files:
# Path: yawn/task/models.py
# class Template(models.Model):
# workflow = models.ForeignKey('Workflow', models.PROTECT, editable=False, null=True)
# queue = models.ForeignKey(Queue, models.PROTECT)
# name = models.TextField()
#
# command = models.TextField()
# max_retries = models.IntegerField(default=0)
# timeout = models.IntegerField(null=True) # seconds
#
# # self-reference for upstream tasks
# upstream = models.ManyToManyField(
# 'Template', related_name='downstream', symmetrical=False, blank=True)
#
# class Meta:
# unique_together = ('workflow', 'name')
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# # can't set the default above because the value isn't serializable for migrations
# if not self.queue_id:
# self.queue = Queue.get_default_queue()
# super().save(*args, **kwargs)
#
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# class Message(models.Model):
# """The order of tasks waiting to be processed, like messages on a queue"""
#
# # I hope we never get to 9 Quintillion (9,223,372,036,854,775,807) messages
# id = models.BigAutoField(primary_key=True)
#
# queue = models.ForeignKey(Queue, models.PROTECT)
# task = models.ForeignKey('Task', models.PROTECT)
#
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
. Output only the next line. | Message.objects.create(task=task, queue=queue) |
Given the following code snippet before the placeholder: <|code_start|>
def test_get_workers(client):
worker = Worker.objects.create(name='worker1')
response = client.get('/api/workers/')
assert response.status_code == 200
workers = response.data['results']
assert len(workers) == 1
assert workers[0]['name'] == worker.name
assert workers[0]['status'] == worker.status
@pytest.fixture()
def queue():
queue = Queue.objects.create(name='queue1')
# need all this for a task...
<|code_end|>
, predict the next line using imports from the current file:
import pytest
from yawn.task.models import Template
from yawn.worker.models import Worker, Queue, Message
from yawn.workflow.models import WorkflowName
and context including class names, function names, and sometimes code from other files:
# Path: yawn/task/models.py
# class Template(models.Model):
# workflow = models.ForeignKey('Workflow', models.PROTECT, editable=False, null=True)
# queue = models.ForeignKey(Queue, models.PROTECT)
# name = models.TextField()
#
# command = models.TextField()
# max_retries = models.IntegerField(default=0)
# timeout = models.IntegerField(null=True) # seconds
#
# # self-reference for upstream tasks
# upstream = models.ManyToManyField(
# 'Template', related_name='downstream', symmetrical=False, blank=True)
#
# class Meta:
# unique_together = ('workflow', 'name')
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# # can't set the default above because the value isn't serializable for migrations
# if not self.queue_id:
# self.queue = Queue.get_default_queue()
# super().save(*args, **kwargs)
#
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# class Message(models.Model):
# """The order of tasks waiting to be processed, like messages on a queue"""
#
# # I hope we never get to 9 Quintillion (9,223,372,036,854,775,807) messages
# id = models.BigAutoField(primary_key=True)
#
# queue = models.ForeignKey(Queue, models.PROTECT)
# task = models.ForeignKey('Task', models.PROTECT)
#
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
. Output only the next line. | name = WorkflowName.objects.create(name='workflow1') |
Next line prediction: <|code_start|>class WorkflowName(models.Model):
name = models.TextField(unique=True)
current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
null=True, related_name='is_current')
def new_version(self, **kwargs):
"""Create a new version of a workflow"""
version = 0
if self.current_version_id:
version = self.current_version.version
# disable the past schedule
self.current_version.schedule_active = False
self.current_version.save()
workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
self.current_version = workflow
self.save()
return workflow
class Workflow(models.Model):
class Meta:
unique_together = (('name', 'version'),)
name = models.ForeignKey(WorkflowName, models.PROTECT)
version = models.IntegerField(editable=False) # serializer read-only
# scheduling is completely optional:
schedule_active = models.BooleanField(default=False)
<|code_end|>
. Use current file imports:
(from django.db import models
from django.contrib.postgres import fields
from django.db.models import functions
from yawn.utilities import cron, database
from yawn.utilities.cron import Crontab
from yawn.task.models import Task
from yawn.task.models import Task)
and context including class names, function names, or small code snippets from other files:
# Path: yawn/utilities/cron.py
# class Crontab:
# def __init__(self, schedule: str):
# def __repr__(self):
# def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
# def parse(pattern: str, max_value: int):
# def cron_validator(crontab: str):
#
# Path: yawn/utilities/database.py
# def close_on_exception(func):
# def wrapper(*args, **kwargs):
# def current_time():
#
# Path: yawn/utilities/cron.py
# class Crontab:
# """
# Simplified Crontab
#
# Support "minute hour weekday" components of a standard cron job.
# - "*/15 2,7,15 1-5" means "every fifteen minutes, on hours 2 7 15, Monday-Friday"
# - Minutes are from 0-59, hours from 0-23, and days from 0(Sunday)-6(Saturday)
# - Fields can contain multiple comma-separated values
# - Values can be an integer or repeating pattern of the '*/2' variety
# """
#
# def __init__(self, schedule: str):
# self.schedule = schedule
# components = schedule.split(' ')
# if len(components) != 3:
# raise ValueError('Crontab must be three space-delimited components')
#
# minutes, hours, weekdays = components
# self.minutes = parse(minutes, 60)
# self.hours = parse(hours, 24)
# self.weekdays = parse(weekdays, 24)
#
# def __repr__(self):
# return '<Crontab: {}>'.format(self.schedule)
#
# def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
# """Given the current time, when is the next scheduled run?"""
# # if next run is next day, get smallest hour, smallest minute
# # if next run is today, future hour, get smallest minute
# # if next run is today, this hour, get next greatest minute
# next_run = datetime.datetime(current_time.year, current_time.month, current_time.day,
# tzinfo=current_time.tzinfo)
# weekday = current_time.isoweekday()
# weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
# if weekday in self.weekdays:
# # could be a run today
# if current_time.hour in self.hours:
# # could be a run this hour
# for minute in self.minutes:
# if minute > current_time.minute:
# # there is a run this hour
# return next_run.replace(hour=current_time.hour, minute=minute)
# # no run this hour, check future hours
# for hour in self.hours:
# if hour > current_time.hour:
# # there is a run today
# return next_run.replace(hour=hour, minute=self.minutes[0])
# # no run today, look for next matching weekday
# for day in range(1, 7):
# next_run += datetime.timedelta(days=1)
# weekday = next_run.isoweekday()
# weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
# if weekday in self.weekdays:
# return next_run.replace(hour=self.hours[0], minute=self.minutes[0])
# raise RuntimeError('No next run found for schedule {}'.format(self.schedule))
. Output only the next line. | schedule = models.TextField(null=True, validators=[cron.cron_validator]) |
Predict the next line for this snippet: <|code_start|> version = self.current_version.version
# disable the past schedule
self.current_version.schedule_active = False
self.current_version.save()
workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
self.current_version = workflow
self.save()
return workflow
class Workflow(models.Model):
class Meta:
unique_together = (('name', 'version'),)
name = models.ForeignKey(WorkflowName, models.PROTECT)
version = models.IntegerField(editable=False) # serializer read-only
# scheduling is completely optional:
schedule_active = models.BooleanField(default=False)
schedule = models.TextField(null=True, validators=[cron.cron_validator])
next_run = models.DateTimeField(null=True)
# parameters
parameters = fields.JSONField(default=dict)
def save(self, **kwargs):
if self.schedule_active:
if not self.next_run:
<|code_end|>
with the help of current file imports:
from django.db import models
from django.contrib.postgres import fields
from django.db.models import functions
from yawn.utilities import cron, database
from yawn.utilities.cron import Crontab
from yawn.task.models import Task
from yawn.task.models import Task
and context from other files:
# Path: yawn/utilities/cron.py
# class Crontab:
# def __init__(self, schedule: str):
# def __repr__(self):
# def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
# def parse(pattern: str, max_value: int):
# def cron_validator(crontab: str):
#
# Path: yawn/utilities/database.py
# def close_on_exception(func):
# def wrapper(*args, **kwargs):
# def current_time():
#
# Path: yawn/utilities/cron.py
# class Crontab:
# """
# Simplified Crontab
#
# Support "minute hour weekday" components of a standard cron job.
# - "*/15 2,7,15 1-5" means "every fifteen minutes, on hours 2 7 15, Monday-Friday"
# - Minutes are from 0-59, hours from 0-23, and days from 0(Sunday)-6(Saturday)
# - Fields can contain multiple comma-separated values
# - Values can be an integer or repeating pattern of the '*/2' variety
# """
#
# def __init__(self, schedule: str):
# self.schedule = schedule
# components = schedule.split(' ')
# if len(components) != 3:
# raise ValueError('Crontab must be three space-delimited components')
#
# minutes, hours, weekdays = components
# self.minutes = parse(minutes, 60)
# self.hours = parse(hours, 24)
# self.weekdays = parse(weekdays, 24)
#
# def __repr__(self):
# return '<Crontab: {}>'.format(self.schedule)
#
# def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
# """Given the current time, when is the next scheduled run?"""
# # if next run is next day, get smallest hour, smallest minute
# # if next run is today, future hour, get smallest minute
# # if next run is today, this hour, get next greatest minute
# next_run = datetime.datetime(current_time.year, current_time.month, current_time.day,
# tzinfo=current_time.tzinfo)
# weekday = current_time.isoweekday()
# weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
# if weekday in self.weekdays:
# # could be a run today
# if current_time.hour in self.hours:
# # could be a run this hour
# for minute in self.minutes:
# if minute > current_time.minute:
# # there is a run this hour
# return next_run.replace(hour=current_time.hour, minute=minute)
# # no run this hour, check future hours
# for hour in self.hours:
# if hour > current_time.hour:
# # there is a run today
# return next_run.replace(hour=hour, minute=self.minutes[0])
# # no run today, look for next matching weekday
# for day in range(1, 7):
# next_run += datetime.timedelta(days=1)
# weekday = next_run.isoweekday()
# weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
# if weekday in self.weekdays:
# return next_run.replace(hour=self.hours[0], minute=self.minutes[0])
# raise RuntimeError('No next run found for schedule {}'.format(self.schedule))
, which may contain function names, class names, or code. Output only the next line. | self.next_run = Crontab(self.schedule).next_run(database.current_time()) |
Given the following code snippet before the placeholder: <|code_start|> version = self.current_version.version
# disable the past schedule
self.current_version.schedule_active = False
self.current_version.save()
workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
self.current_version = workflow
self.save()
return workflow
class Workflow(models.Model):
class Meta:
unique_together = (('name', 'version'),)
name = models.ForeignKey(WorkflowName, models.PROTECT)
version = models.IntegerField(editable=False) # serializer read-only
# scheduling is completely optional:
schedule_active = models.BooleanField(default=False)
schedule = models.TextField(null=True, validators=[cron.cron_validator])
next_run = models.DateTimeField(null=True)
# parameters
parameters = fields.JSONField(default=dict)
def save(self, **kwargs):
if self.schedule_active:
if not self.next_run:
<|code_end|>
, predict the next line using imports from the current file:
from django.db import models
from django.contrib.postgres import fields
from django.db.models import functions
from yawn.utilities import cron, database
from yawn.utilities.cron import Crontab
from yawn.task.models import Task
from yawn.task.models import Task
and context including class names, function names, and sometimes code from other files:
# Path: yawn/utilities/cron.py
# class Crontab:
# def __init__(self, schedule: str):
# def __repr__(self):
# def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
# def parse(pattern: str, max_value: int):
# def cron_validator(crontab: str):
#
# Path: yawn/utilities/database.py
# def close_on_exception(func):
# def wrapper(*args, **kwargs):
# def current_time():
#
# Path: yawn/utilities/cron.py
# class Crontab:
# """
# Simplified Crontab
#
# Support "minute hour weekday" components of a standard cron job.
# - "*/15 2,7,15 1-5" means "every fifteen minutes, on hours 2 7 15, Monday-Friday"
# - Minutes are from 0-59, hours from 0-23, and days from 0(Sunday)-6(Saturday)
# - Fields can contain multiple comma-separated values
# - Values can be an integer or repeating pattern of the '*/2' variety
# """
#
# def __init__(self, schedule: str):
# self.schedule = schedule
# components = schedule.split(' ')
# if len(components) != 3:
# raise ValueError('Crontab must be three space-delimited components')
#
# minutes, hours, weekdays = components
# self.minutes = parse(minutes, 60)
# self.hours = parse(hours, 24)
# self.weekdays = parse(weekdays, 24)
#
# def __repr__(self):
# return '<Crontab: {}>'.format(self.schedule)
#
# def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
# """Given the current time, when is the next scheduled run?"""
# # if next run is next day, get smallest hour, smallest minute
# # if next run is today, future hour, get smallest minute
# # if next run is today, this hour, get next greatest minute
# next_run = datetime.datetime(current_time.year, current_time.month, current_time.day,
# tzinfo=current_time.tzinfo)
# weekday = current_time.isoweekday()
# weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
# if weekday in self.weekdays:
# # could be a run today
# if current_time.hour in self.hours:
# # could be a run this hour
# for minute in self.minutes:
# if minute > current_time.minute:
# # there is a run this hour
# return next_run.replace(hour=current_time.hour, minute=minute)
# # no run this hour, check future hours
# for hour in self.hours:
# if hour > current_time.hour:
# # there is a run today
# return next_run.replace(hour=hour, minute=self.minutes[0])
# # no run today, look for next matching weekday
# for day in range(1, 7):
# next_run += datetime.timedelta(days=1)
# weekday = next_run.isoweekday()
# weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
# if weekday in self.weekdays:
# return next_run.replace(hour=self.hours[0], minute=self.minutes[0])
# raise RuntimeError('No next run found for schedule {}'.format(self.schedule))
. Output only the next line. | self.next_run = Crontab(self.schedule).next_run(database.current_time()) |
Given snippet: <|code_start|>
class WorkerViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin):
"""
Worker endpoint, GET(list)
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.db.models import Count
from rest_framework import viewsets
from yawn.worker.models import Worker, Queue
from yawn.worker.serializers import WorkerSerializer, QueueSerializer
and context:
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# Path: yawn/worker/serializers.py
# class WorkerSerializer(serializers.ModelSerializer):
# class Meta:
# model = Worker
# fields = '__all__'
#
# class QueueSerializer(serializers.ModelSerializer):
# message_count = serializers.IntegerField(read_only=True, source='message__count')
# purge = serializers.BooleanField(write_only=True, default=False)
#
# class Meta:
# model = Queue
# fields = '__all__'
#
# def update(self, instance, validated_data):
# if validated_data['purge'] is True:
# instance.message_set.all().delete()
# instance.message__count = 0
# return instance
which might include code, classes, or functions. Output only the next line. | queryset = Worker.objects.all().order_by('-id') |
Next line prediction: <|code_start|>
class WorkerViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin):
"""
Worker endpoint, GET(list)
"""
queryset = Worker.objects.all().order_by('-id')
serializer_class = WorkerSerializer
class QueueViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin):
"""
Queue endpoint GET(list,detail), PATCH to clear all messages
"""
<|code_end|>
. Use current file imports:
(from django.db.models import Count
from rest_framework import viewsets
from yawn.worker.models import Worker, Queue
from yawn.worker.serializers import WorkerSerializer, QueueSerializer)
and context including class names, function names, or small code snippets from other files:
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# Path: yawn/worker/serializers.py
# class WorkerSerializer(serializers.ModelSerializer):
# class Meta:
# model = Worker
# fields = '__all__'
#
# class QueueSerializer(serializers.ModelSerializer):
# message_count = serializers.IntegerField(read_only=True, source='message__count')
# purge = serializers.BooleanField(write_only=True, default=False)
#
# class Meta:
# model = Queue
# fields = '__all__'
#
# def update(self, instance, validated_data):
# if validated_data['purge'] is True:
# instance.message_set.all().delete()
# instance.message__count = 0
# return instance
. Output only the next line. | queryset = Queue.objects.all().annotate(Count('message')).order_by('id') |
Based on the snippet: <|code_start|>
class WorkerViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin):
"""
Worker endpoint, GET(list)
"""
queryset = Worker.objects.all().order_by('-id')
<|code_end|>
, predict the immediate next line with the help of imports:
from django.db.models import Count
from rest_framework import viewsets
from yawn.worker.models import Worker, Queue
from yawn.worker.serializers import WorkerSerializer, QueueSerializer
and context (classes, functions, sometimes code) from other files:
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# Path: yawn/worker/serializers.py
# class WorkerSerializer(serializers.ModelSerializer):
# class Meta:
# model = Worker
# fields = '__all__'
#
# class QueueSerializer(serializers.ModelSerializer):
# message_count = serializers.IntegerField(read_only=True, source='message__count')
# purge = serializers.BooleanField(write_only=True, default=False)
#
# class Meta:
# model = Queue
# fields = '__all__'
#
# def update(self, instance, validated_data):
# if validated_data['purge'] is True:
# instance.message_set.all().delete()
# instance.message__count = 0
# return instance
. Output only the next line. | serializer_class = WorkerSerializer |
Given snippet: <|code_start|>
class WorkerViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin):
"""
Worker endpoint, GET(list)
"""
queryset = Worker.objects.all().order_by('-id')
serializer_class = WorkerSerializer
class QueueViewSet(viewsets.GenericViewSet,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin):
"""
Queue endpoint GET(list,detail), PATCH to clear all messages
"""
queryset = Queue.objects.all().annotate(Count('message')).order_by('id')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.db.models import Count
from rest_framework import viewsets
from yawn.worker.models import Worker, Queue
from yawn.worker.serializers import WorkerSerializer, QueueSerializer
and context:
# Path: yawn/worker/models.py
# class Worker(models.Model):
# """Information about current and past workers"""
# #
# # NOTE: consider instead taking an advisory lock for each worker,
# # and using it to check if a worker is still connected.
# # See `pg_try_advisory_lock` and `select * from pg_locks where locktype = 'advisory'`
# # That would give more immediate feedback, but its not clear we need to be faster.
# #
# ACTIVE = 'active'
# EXITED = 'exited'
# LOST = 'lost'
# STATUS_CHOICES = [(x, x) for x in (ACTIVE, EXITED, LOST)]
#
# name = models.TextField(blank=False)
# status = models.TextField(choices=STATUS_CHOICES, default=ACTIVE)
# start_timestamp = models.DateTimeField(default=functions.Now)
# last_heartbeat = models.DateTimeField(default=functions.Now)
#
# @staticmethod
# def find_lost(timeout):
# from yawn.task.models import Execution
#
# # Make a sparse index so looking up active workers is fast:
# # CREATE INDEX yawn_worker_active ON yawn_worker (status) WHERE status = 'active'
# lost = Worker.objects.filter(
# status=Worker.ACTIVE, last_heartbeat__lt=functions.Now() - timedelta(seconds=timeout)
# )
# for worker in lost:
# logger.warning('Marking %r as lost', worker)
# worker.status = Worker.LOST
# worker.save()
#
# executions = worker.execution_set.filter(status=Execution.RUNNING)
#
# for execution in executions:
# logger.warning('Marking %r as lost', execution)
# execution.mark_finished(lost=True)
#
# def __str__(self):
# return self.name
#
# class Queue(models.Model):
# """Arbitrary tag defining where tasks run."""
#
# name = models.TextField(unique=True)
#
# _default = None
#
# def __str__(self):
# return self.name
#
# @classmethod
# def get_default_queue(cls):
# if not cls._default:
# cls._default = Queue.objects.get_or_create(name='default')[0]
# return cls._default
#
# Path: yawn/worker/serializers.py
# class WorkerSerializer(serializers.ModelSerializer):
# class Meta:
# model = Worker
# fields = '__all__'
#
# class QueueSerializer(serializers.ModelSerializer):
# message_count = serializers.IntegerField(read_only=True, source='message__count')
# purge = serializers.BooleanField(write_only=True, default=False)
#
# class Meta:
# model = Queue
# fields = '__all__'
#
# def update(self, instance, validated_data):
# if validated_data['purge'] is True:
# instance.message_set.all().delete()
# instance.message__count = 0
# return instance
which might include code, classes, or functions. Output only the next line. | serializer_class = QueueSerializer |
Continue the code snippet: <|code_start|>
class Command(BaseCommand):
help = 'Load sample data'
def handle(self, *args, **options):
self.stdout.write('Creating sample workflows!')
self.stdout.write('First, a simple four task workflow...')
name, _ = WorkflowName.objects.get_or_create(name='Simple Workflow Example')
workflow = name.new_version(parameters={'MY_OBJECT_ID': '1', 'SOME_SETTING': 'false'})
<|code_end|>
. Use current file imports:
import random
from django.core.management.base import BaseCommand
from yawn.task.models import Template
from yawn.workflow.models import WorkflowName
from yawn.workflow.serializers import WorkflowSerializer
from yawn.workflow.tests.utils import load_sample_workflow
and context (classes, functions, or code) from other files:
# Path: yawn/task/models.py
# class Template(models.Model):
# workflow = models.ForeignKey('Workflow', models.PROTECT, editable=False, null=True)
# queue = models.ForeignKey(Queue, models.PROTECT)
# name = models.TextField()
#
# command = models.TextField()
# max_retries = models.IntegerField(default=0)
# timeout = models.IntegerField(null=True) # seconds
#
# # self-reference for upstream tasks
# upstream = models.ManyToManyField(
# 'Template', related_name='downstream', symmetrical=False, blank=True)
#
# class Meta:
# unique_together = ('workflow', 'name')
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# # can't set the default above because the value isn't serializable for migrations
# if not self.queue_id:
# self.queue = Queue.get_default_queue()
# super().save(*args, **kwargs)
#
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
#
# Path: yawn/workflow/serializers.py
# class WorkflowSerializer(serializers.ModelSerializer):
# name = serializers.CharField(source='name.name')
# name_id = serializers.IntegerField(read_only=True)
# tasks = TemplateSerializer(many=True, allow_empty=False, source='template_set')
#
# class Meta:
# model = Workflow
# fields = '__all__'
# read_only_fields = ('next_run', 'name_id')
#
# @transaction.atomic
# def create(self, validated_data):
# workflow_name = validated_data.pop('name')['name']
# name, _ = WorkflowName.objects.get_or_create(name=workflow_name)
# # get the current version and check for changes
# if name.current_version and unchanged(name.current_version, validated_data):
# # TODO return 302 found instead of 201 created
# return name.current_version
#
# # changed or no existing version: create a new version
# tasks = validated_data.pop('template_set')
# workflow = name.new_version(**validated_data)
# for task_kwargs in tasks:
# # the awkward 'all' is so serialization gets related tasks through the manager method
# upstream = workflow.template_set.filter(name__in=task_kwargs.pop('upstream')['all'])
# task = Template.objects.create(workflow=workflow, **task_kwargs)
# task.upstream.set(upstream)
#
# return workflow
#
# def validate_parameters(self, parameters):
# """Validate variable name is a slug and value is a string"""
# if not isinstance(parameters, dict):
# raise serializers.ValidationError('Workflow parameters must be a dictionary')
# key_regex = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
# errors = []
# for key, value in parameters.items():
# if not key_regex.fullmatch(key):
# errors.append('Invalid parameter key: %s' % key)
# if not isinstance(value, str):
# errors.append('Invalid parameter value: %s' % value)
# if errors:
# raise serializers.ValidationError(errors)
# return parameters
#
# def validate_tasks(self, tasks):
# """Validate tasks only reference already defined upstream tasks"""
# seen_tasks = set()
# errors = []
# for task in tasks:
# name = task['name']
# if name in seen_tasks:
# errors.append('Task names must be unique. Tas %s is defined more than once' % name)
# missing = set(task['upstream']['all']) - seen_tasks
# if missing:
# errors.append(
# 'Task {task} specifies upstream task(s) {upstream} but they do not exist. '
# 'Hint: check the order of your tasks.'.format(
# task=name, upstream=', '.join(missing))
# )
# seen_tasks.add(name)
#
# if errors:
# raise serializers.ValidationError(errors)
# return tasks
#
# Path: yawn/workflow/tests/utils.py
# def load_sample_workflow():
# filename = os.path.join(os.path.dirname(__file__), 'workflow.yaml')
# return yaml.safe_load(open(filename).read())
. Output only the next line. | task1 = Template.objects.create(workflow=workflow, name='start', command='echo Starting...') |
Using the snippet: <|code_start|>
class Command(BaseCommand):
help = 'Load sample data'
def handle(self, *args, **options):
self.stdout.write('Creating sample workflows!')
self.stdout.write('First, a simple four task workflow...')
<|code_end|>
, determine the next line of code. You have imports:
import random
from django.core.management.base import BaseCommand
from yawn.task.models import Template
from yawn.workflow.models import WorkflowName
from yawn.workflow.serializers import WorkflowSerializer
from yawn.workflow.tests.utils import load_sample_workflow
and context (class names, function names, or code) available:
# Path: yawn/task/models.py
# class Template(models.Model):
# workflow = models.ForeignKey('Workflow', models.PROTECT, editable=False, null=True)
# queue = models.ForeignKey(Queue, models.PROTECT)
# name = models.TextField()
#
# command = models.TextField()
# max_retries = models.IntegerField(default=0)
# timeout = models.IntegerField(null=True) # seconds
#
# # self-reference for upstream tasks
# upstream = models.ManyToManyField(
# 'Template', related_name='downstream', symmetrical=False, blank=True)
#
# class Meta:
# unique_together = ('workflow', 'name')
#
# def __str__(self):
# return self.name
#
# def save(self, *args, **kwargs):
# # can't set the default above because the value isn't serializable for migrations
# if not self.queue_id:
# self.queue = Queue.get_default_queue()
# super().save(*args, **kwargs)
#
# Path: yawn/workflow/models.py
# class WorkflowName(models.Model):
# name = models.TextField(unique=True)
# current_version = models.OneToOneField('Workflow', on_delete=models.CASCADE,
# null=True, related_name='is_current')
#
# def new_version(self, **kwargs):
# """Create a new version of a workflow"""
# version = 0
# if self.current_version_id:
# version = self.current_version.version
#
# # disable the past schedule
# self.current_version.schedule_active = False
# self.current_version.save()
#
# workflow = Workflow.objects.create(name=self, version=version + 1, **kwargs)
# self.current_version = workflow
# self.save()
# return workflow
#
# Path: yawn/workflow/serializers.py
# class WorkflowSerializer(serializers.ModelSerializer):
# name = serializers.CharField(source='name.name')
# name_id = serializers.IntegerField(read_only=True)
# tasks = TemplateSerializer(many=True, allow_empty=False, source='template_set')
#
# class Meta:
# model = Workflow
# fields = '__all__'
# read_only_fields = ('next_run', 'name_id')
#
# @transaction.atomic
# def create(self, validated_data):
# workflow_name = validated_data.pop('name')['name']
# name, _ = WorkflowName.objects.get_or_create(name=workflow_name)
# # get the current version and check for changes
# if name.current_version and unchanged(name.current_version, validated_data):
# # TODO return 302 found instead of 201 created
# return name.current_version
#
# # changed or no existing version: create a new version
# tasks = validated_data.pop('template_set')
# workflow = name.new_version(**validated_data)
# for task_kwargs in tasks:
# # the awkward 'all' is so serialization gets related tasks through the manager method
# upstream = workflow.template_set.filter(name__in=task_kwargs.pop('upstream')['all'])
# task = Template.objects.create(workflow=workflow, **task_kwargs)
# task.upstream.set(upstream)
#
# return workflow
#
# def validate_parameters(self, parameters):
# """Validate variable name is a slug and value is a string"""
# if not isinstance(parameters, dict):
# raise serializers.ValidationError('Workflow parameters must be a dictionary')
# key_regex = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
# errors = []
# for key, value in parameters.items():
# if not key_regex.fullmatch(key):
# errors.append('Invalid parameter key: %s' % key)
# if not isinstance(value, str):
# errors.append('Invalid parameter value: %s' % value)
# if errors:
# raise serializers.ValidationError(errors)
# return parameters
#
# def validate_tasks(self, tasks):
# """Validate tasks only reference already defined upstream tasks"""
# seen_tasks = set()
# errors = []
# for task in tasks:
# name = task['name']
# if name in seen_tasks:
# errors.append('Task names must be unique. Tas %s is defined more than once' % name)
# missing = set(task['upstream']['all']) - seen_tasks
# if missing:
# errors.append(
# 'Task {task} specifies upstream task(s) {upstream} but they do not exist. '
# 'Hint: check the order of your tasks.'.format(
# task=name, upstream=', '.join(missing))
# )
# seen_tasks.add(name)
#
# if errors:
# raise serializers.ValidationError(errors)
# return tasks
#
# Path: yawn/workflow/tests/utils.py
# def load_sample_workflow():
# filename = os.path.join(os.path.dirname(__file__), 'workflow.yaml')
# return yaml.safe_load(open(filename).read())
. Output only the next line. | name, _ = WorkflowName.objects.get_or_create(name='Simple Workflow Example') |
Using the snippet: <|code_start|>
if __name__ == "__main__":
path = "/Users/musthero/Documents/Yura/Applications/tmva_local/BDT_score_distributions_muons.root"
hsig_path = "histo_tmva_sig"
hbkg_path = "histo_tmva_bkg"
rootfile = ROOT.TFile.Open(path)
if rootfile.IsZombie():
print "Root file is corrupt"
hSig = rootfile.Get(hsig_path)
hBkg = rootfile.Get(hbkg_path)
<|code_end|>
, determine the next line of code. You have imports:
import ROOT
from ROOT import TGraphErrors
from array import array
from mva_tools.build_roc_simple import build_roc
and context (class names, function names, or code) available:
# Path: mva_tools/build_roc_simple.py
# def build_roc(h_sig, h_bkg, verbose=0):
#
# nbins_sig = h_sig.GetXaxis().GetNbins()
# nbins_bkg = h_bkg.GetXaxis().GetNbins()
# nbins = nbins_sig if nbins_sig == nbins_bkg else -1
# #print nbins
# if nbins < 0.0:
# sys.exit("Error: nbins_sig != nbins_bkg")
#
# min_val = h_sig.GetXaxis().GetXmin()
# max_val = h_sig.GetXaxis().GetXmax()
#
# step = float(max_val - min_val)/(nbins-1)
# sig_eff = array('f', [])
# bkg_rej = array('f', [])
#
# total_sig = h_sig.Integral()
# total_bkg = h_bkg.Integral()
# print total_sig
# print total_bkg
#
# sig_rejected = 0.0
# bkg_rejected = 0.0
# for i in xrange_bins(nbins):
# sig_rejected += h_sig.GetBinContent(i)
# #print sig_rejected
# bkg_rejected += h_bkg.GetBinContent(i)
# #print bkg_rejected
#
# seff = float(total_sig-sig_rejected)/total_sig
# brej = float(bkg_rejected)/total_bkg
# #print seff, brej
# sig_eff.append(seff)
# bkg_rej.append(brej)
#
# if verbose == 1:
# bdt_score = min_val + i * step
# print "bdt score =", bdt_score, "sig_eff =", seff
#
# #bin_sig = h_sig.GetBinContent()
# print "Overflow =", h_sig.GetBinContent(nbins_sig+1)
# print "Underflow =", h_sig.GetBinContent(0)
#
# g = ROOT.TGraph(nbins, sig_eff, bkg_rej)
# g.GetXaxis().SetRangeUser(0.0,1.0)
# g.GetYaxis().SetRangeUser(0.0,1.0)
# #g.Draw("AC")
# #g.SetLineColor(ROOT.kRed)
# g.SetTitle("ROC curve")
#
#
# return g
#
# #print nbins_bkg
. Output only the next line. | g = build_roc(hSig, hBkg, 1) |
Predict the next line after this snippet: <|code_start|>
if __name__ == "__main__":
path = "/Users/musthero/Documents/Yura/Applications/tmva_local/BDT_score_distributions_electrons.root"
hsig_skTMVA_path = "histo_tmva_sig"
hbkg_skTMVA_path = "histo_tmva_bkg"
hsig_sklearn_path = "histo_sk_sig"
hbkg_sklearn_path = "histo_sk_bkg"
rootfile = ROOT.TFile.Open(path)
if rootfile.IsZombie():
print "Root file is corrupt"
hSig_skTMVA = rootfile.Get(hsig_skTMVA_path)
hBkg_skTMVA = rootfile.Get(hbkg_skTMVA_path)
hSig_sklearn = rootfile.Get(hsig_sklearn_path)
hBkg_sklearn = rootfile.Get(hbkg_sklearn_path)
# Stack for keeping plots
plots = []
# Getting ROC-curve for skTMVA
<|code_end|>
using the current file's imports:
import ROOT
from ROOT import TGraphErrors
from array import array
from mva_tools.build_roc_simple import build_roc
and any relevant context from other files:
# Path: mva_tools/build_roc_simple.py
# def build_roc(h_sig, h_bkg, verbose=0):
#
# nbins_sig = h_sig.GetXaxis().GetNbins()
# nbins_bkg = h_bkg.GetXaxis().GetNbins()
# nbins = nbins_sig if nbins_sig == nbins_bkg else -1
# #print nbins
# if nbins < 0.0:
# sys.exit("Error: nbins_sig != nbins_bkg")
#
# min_val = h_sig.GetXaxis().GetXmin()
# max_val = h_sig.GetXaxis().GetXmax()
#
# step = float(max_val - min_val)/(nbins-1)
# sig_eff = array('f', [])
# bkg_rej = array('f', [])
#
# total_sig = h_sig.Integral()
# total_bkg = h_bkg.Integral()
# print total_sig
# print total_bkg
#
# sig_rejected = 0.0
# bkg_rejected = 0.0
# for i in xrange_bins(nbins):
# sig_rejected += h_sig.GetBinContent(i)
# #print sig_rejected
# bkg_rejected += h_bkg.GetBinContent(i)
# #print bkg_rejected
#
# seff = float(total_sig-sig_rejected)/total_sig
# brej = float(bkg_rejected)/total_bkg
# #print seff, brej
# sig_eff.append(seff)
# bkg_rej.append(brej)
#
# if verbose == 1:
# bdt_score = min_val + i * step
# print "bdt score =", bdt_score, "sig_eff =", seff
#
# #bin_sig = h_sig.GetBinContent()
# print "Overflow =", h_sig.GetBinContent(nbins_sig+1)
# print "Underflow =", h_sig.GetBinContent(0)
#
# g = ROOT.TGraph(nbins, sig_eff, bkg_rej)
# g.GetXaxis().SetRangeUser(0.0,1.0)
# g.GetYaxis().SetRangeUser(0.0,1.0)
# #g.Draw("AC")
# #g.SetLineColor(ROOT.kRed)
# g.SetTitle("ROC curve")
#
#
# return g
#
# #print nbins_bkg
. Output only the next line. | g1 = build_roc(hSig_skTMVA, hBkg_skTMVA) |
Here is a snippet: <|code_start|>
# load decision tree
bdt_path = '/Users/musthero/Documents/Yura/Applications/tmva_local/electrons_v5_VeryTightLH_20per.pkl'
with open(bdt_path, 'rb') as fid:
bdt = cPickle.load(fid)
# specify input variable list
var_list = [
('m_el_pt', 'F'),
('m_el_eta', 'F'),
('m_el_sigd0PV', 'F'),
('m_el_z0SinTheta', 'F'),
('m_el_etcone20Dpt', 'F'),
('m_el_ptcone20Dpt', 'F')
]
# specify output TMVA xml-file
tmva_outfile_xml = 'SKLearn_BDT_electons.weights.xml'
# save scikit-learn trained BDT classifier to TMVA xml-file
<|code_end|>
. Write the next line using the current file imports:
import cPickle
from skTMVA import convert_bdt_sklearn_tmva
and context from other files:
# Path: skTMVA/skTMVA.py
# def convert_bdt_sklearn_tmva(sklearn_bdt_clf, input_var_list, tmva_outfile_xml):
#
# # AdaBoost
# if isinstance(sklearn_bdt_clf, AdaBoostClassifier):
# convert_bdt__AdaBoost(sklearn_bdt_clf, input_var_list, tmva_outfile_xml)
#
# # Gradient Boosting (binary classification only)
# if isinstance(sklearn_bdt_clf, GradientBoostingClassifier):
# convert_bdt__Grad(sklearn_bdt_clf, input_var_list, tmva_outfile_xml)
, which may include functions, classes, or code. Output only the next line. | convert_bdt_sklearn_tmva(bdt, var_list, tmva_outfile_xml) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.