Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|>#! /usr/bin/env python -i
""" Load some useful stuff into the console when running python interactively. """
def _set_prompt():
""" Color code the Python prompt based on environment. """
env = os.environ.get('ENV', 'dev')
color = {'dev': '32', # Green
'stage': '33', # Yellow
'prod': '31'}.get(env) # Red
sys.ps1 = '\001\033[1;%sm\002>>> \001\033[0m\002' % color
sys.ps2 = '\001\033[1;%sm\002... \001\033[0m\002' % color
<|code_end|>
with the help of current file imports:
import os
import sys
import models # pylint: disable=unused-import,wrong-import-position
from common import log
and context from other files:
# Path: common/log.py
# class RainbowLogFormatter(logging.Formatter):
# class ContextFilter(logging.Filter): # pylint: disable=too-few-public-methods
# class PapertrailHandler(logging.handlers.SysLogHandler):
# def format(self, record):
# def filter(self, record):
# def __init__(self, *args, **kwargs):
# def init_logging(level=logging.INFO):
, which may contain function names, class names, or code. Output only the next line. | log.init_logging(log.logging.DEBUG) |
Given the code snippet: <|code_start|>"""
Tests for the Groups Model
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
try:
except ImportError:
print("WARNING: Cannot Load builtins for py3 compatibility.", file=sys.stderr)
warnings.simplefilter("error") # Make All warnings errors while testing.
def test_required_fields_blank(dbsession):
"""
Groups must have a name.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
<|code_end|>
, generate the next line using the imports in this file:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from sqlalchemy.exc import IntegrityError
from models import groups, memberships, profiles
import sys
import warnings
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: models/groups.py
# class Groups(bases.BaseModel):
#
# Path: models/memberships.py
# class Memberships(bases.BaseModel):
#
# Path: models/profiles.py
# class Profiles(bases.BaseModel):
# def get_by_email(cls, email):
# def validate_email(self, key, address): # pylint: disable=unused-argument,no-self-use
. Output only the next line. | group = groups.Groups() |
Given the following code snippet before the placeholder: <|code_start|>
warnings.simplefilter("error") # Make All warnings errors while testing.
def test_required_fields_blank(dbsession):
"""
Groups must have a name.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
group = groups.Groups()
group.save()
# name is blank
with pytest.raises(IntegrityError):
dbsession.commit()
def test_create_group(dbsession):
"""
Create a Group
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
group = groups.Groups(name='A New Group')
group.save()
dbsession.commit()
def test_memberships_relation(dbsession):
""" Groups can have Memberships relations. """
profile1 = profiles.Profiles(full_name='92e17a59 740480af51b5', email='6b7d@4c4b.b33d')
<|code_end|>
, predict the next line using imports from the current file:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from sqlalchemy.exc import IntegrityError
from models import groups, memberships, profiles
import sys
import warnings
import pytest
and context including class names, function names, and sometimes code from other files:
# Path: models/groups.py
# class Groups(bases.BaseModel):
#
# Path: models/memberships.py
# class Memberships(bases.BaseModel):
#
# Path: models/profiles.py
# class Profiles(bases.BaseModel):
# def get_by_email(cls, email):
# def validate_email(self, key, address): # pylint: disable=unused-argument,no-self-use
. Output only the next line. | membership1 = memberships.Memberships(profile=profile1) |
Given the following code snippet before the placeholder: <|code_start|>
warnings.simplefilter("error") # Make All warnings errors while testing.
def test_required_fields_blank(dbsession):
"""
Groups must have a name.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
group = groups.Groups()
group.save()
# name is blank
with pytest.raises(IntegrityError):
dbsession.commit()
def test_create_group(dbsession):
"""
Create a Group
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
group = groups.Groups(name='A New Group')
group.save()
dbsession.commit()
def test_memberships_relation(dbsession):
""" Groups can have Memberships relations. """
<|code_end|>
, predict the next line using imports from the current file:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from sqlalchemy.exc import IntegrityError
from models import groups, memberships, profiles
import sys
import warnings
import pytest
and context including class names, function names, and sometimes code from other files:
# Path: models/groups.py
# class Groups(bases.BaseModel):
#
# Path: models/memberships.py
# class Memberships(bases.BaseModel):
#
# Path: models/profiles.py
# class Profiles(bases.BaseModel):
# def get_by_email(cls, email):
# def validate_email(self, key, address): # pylint: disable=unused-argument,no-self-use
. Output only the next line. | profile1 = profiles.Profiles(full_name='92e17a59 740480af51b5', email='6b7d@4c4b.b33d') |
Predict the next line after this snippet: <|code_start|>"""
Tests for the API JsonApiResource class details endpoints with identifiers. Read, Update, and Delete
all by identifier.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
try:
except ImportError:
print("WARNING: Cannot Load builtins for py3 compatibility.", file=sys.stderr)
warnings.simplefilter("error") # Make All warnings errors while testing.
<|code_end|>
using the current file's imports:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from models import bases
from ourapi.exceptions import Conflict, NotFound
import sys
import datetime
import warnings
import marshmallow
import pytest
import sqlalchemy as sa
import ourapi
import ourmarshmallow
and any relevant context from other files:
# Path: models/bases.py
# NO_VALUE = sa_symbol('NO_VALUE')
# class Base(object):
# class BaseModel(Base):
# def __repr__(self):
# def __str__(self):
# def __tablename__(cls): # pylint: disable=no-self-argument
# def delete(self):
# def _prepare_conditions(cls, conditions):
# def get_all(cls, conditions=None):
# def get_by_pk(cls, the_id):
# def save(self, flush=False):
#
# Path: ourapi/exceptions.py
# class Conflict(JSONAPIError, werkzeug.exceptions.Conflict):
# """ 409 Conflict: fails server unique constraint or id issues. """
# pass
#
# class NotFound(JSONAPIError, werkzeug.exceptions.NotFound):
# """ 404 Not Found: Resource doesn't exist. """
# pass
. Output only the next line. | class Horses(bases.BaseModel): |
Here is a snippet: <|code_start|> with pytest.raises(NotFound) as excinfo:
resource.patch(999, patch_data)
assert excinfo.value.description == {'detail': '999 not found.',
'source': {'parameter': '/id'}}
def test_detail_update_id_required(dbsession): # pylint: disable=unused-argument
""" Payload id and URL id must match for patch/update """
the_model = Horses(id=30, name="Time Coin Instant Understand")
the_model.save()
# id is missing, but it is required: http://jsonapi.org/format/#crud-updating
patch_data = {'data': {'attributes': {'name': 'bad request'}, 'type': 'horses'}}
resource = HorsesResource()
with pytest.raises(marshmallow.ValidationError) as excinfo:
# This will be turned into a BadRequest by the error handler in the API.
resource.patch(30, patch_data)
assert excinfo.value.messages == {'errors': [{'detail': '`data` object must include `id` key.',
'source': {'pointer': '/data'}}]}
def test_detail_update_id_mismatch(dbsession): # pylint: disable=unused-argument
""" Payload id and URL id must match for patch/update """
the_model = Horses(id=40, name="Madden Everything Wonder Pronunciation")
the_model.save()
# id does not match the request url id
patch_data = {'data': {'attributes': {'name': 'bad request'},
'id': '999', 'type': 'horses'}}
resource = HorsesResource()
<|code_end|>
. Write the next line using the current file imports:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from models import bases
from ourapi.exceptions import Conflict, NotFound
import sys
import datetime
import warnings
import marshmallow
import pytest
import sqlalchemy as sa
import ourapi
import ourmarshmallow
and context from other files:
# Path: models/bases.py
# NO_VALUE = sa_symbol('NO_VALUE')
# class Base(object):
# class BaseModel(Base):
# def __repr__(self):
# def __str__(self):
# def __tablename__(cls): # pylint: disable=no-self-argument
# def delete(self):
# def _prepare_conditions(cls, conditions):
# def get_all(cls, conditions=None):
# def get_by_pk(cls, the_id):
# def save(self, flush=False):
#
# Path: ourapi/exceptions.py
# class Conflict(JSONAPIError, werkzeug.exceptions.Conflict):
# """ 409 Conflict: fails server unique constraint or id issues. """
# pass
#
# class NotFound(JSONAPIError, werkzeug.exceptions.NotFound):
# """ 404 Not Found: Resource doesn't exist. """
# pass
, which may include functions, classes, or code. Output only the next line. | with pytest.raises(Conflict) as excinfo: |
Given the following code snippet before the placeholder: <|code_start|>class HorsesResource(ourapi.JsonApiResource):
""" JSONAPI CRUD endpoints for HorsesSchema/Horses Model. """
schema = HorsesSchema
def test_new_detail_resource():
""" Resource endpoint for Model details. """
resource = HorsesResource()
assert isinstance(resource, HorsesResource)
def test_detail_read(dbsession): # pylint: disable=unused-argument
""" Create a resource, and then Read it """
now = datetime.datetime(2017, 8, 7, 18, 37, 29)
the_model = Horses(id=10, name="foo bar baz", modified_at=now)
the_model.save()
resource = HorsesResource()
response = resource.get(10)
assert response == {'data': {'attributes': {'name': 'foo bar baz'},
'id': '10',
'links': {'self': '/horses/10'},
'meta': {'modified_at': '2017-08-07T18:37:29+00:00'},
'type': 'horses'},
'links': {'self': '/horses/10'}}
def test_detail_read_not_found(dbsession): # pylint: disable=unused-argument
""" Resource Detail Read raises NotFound when id isn't found. """
resource = HorsesResource()
<|code_end|>
, predict the next line using imports from the current file:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from models import bases
from ourapi.exceptions import Conflict, NotFound
import sys
import datetime
import warnings
import marshmallow
import pytest
import sqlalchemy as sa
import ourapi
import ourmarshmallow
and context including class names, function names, and sometimes code from other files:
# Path: models/bases.py
# NO_VALUE = sa_symbol('NO_VALUE')
# class Base(object):
# class BaseModel(Base):
# def __repr__(self):
# def __str__(self):
# def __tablename__(cls): # pylint: disable=no-self-argument
# def delete(self):
# def _prepare_conditions(cls, conditions):
# def get_all(cls, conditions=None):
# def get_by_pk(cls, the_id):
# def save(self, flush=False):
#
# Path: ourapi/exceptions.py
# class Conflict(JSONAPIError, werkzeug.exceptions.Conflict):
# """ 409 Conflict: fails server unique constraint or id issues. """
# pass
#
# class NotFound(JSONAPIError, werkzeug.exceptions.NotFound):
# """ 404 Not Found: Resource doesn't exist. """
# pass
. Output only the next line. | with pytest.raises(NotFound) as excinfo: |
Next line prediction: <|code_start|>"""
Tests for the Memberships Model
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
try:
except ImportError:
print("WARNING: Cannot Load builtins for py3 compatibility.", file=sys.stderr)
warnings.simplefilter("error") # Make All warnings errors while testing.
def test_required_relations(dbsession):
"""
Memberships must have a Profile and a Group.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
inst = memberships.Memberships()
inst.save()
with pytest.raises(IntegrityError):
dbsession.commit()
def test_required_profile_missing(dbsession):
"""
Membership Profile not existing is an error.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
<|code_end|>
. Use current file imports:
( from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from sqlalchemy.exc import IntegrityError
from models import groups, memberships, profiles
import sys
import warnings
import pytest)
and context including class names, function names, or small code snippets from other files:
# Path: models/groups.py
# class Groups(bases.BaseModel):
#
# Path: models/memberships.py
# class Memberships(bases.BaseModel):
#
# Path: models/profiles.py
# class Profiles(bases.BaseModel):
# def get_by_email(cls, email):
# def validate_email(self, key, address): # pylint: disable=unused-argument,no-self-use
. Output only the next line. | group = groups.Groups(name='61f7d724-d90c-4e2f-85f4-7cab51d68b61') |
Predict the next line for this snippet: <|code_start|>"""
Tests for the Memberships Model
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
try:
except ImportError:
print("WARNING: Cannot Load builtins for py3 compatibility.", file=sys.stderr)
warnings.simplefilter("error") # Make All warnings errors while testing.
def test_required_relations(dbsession):
"""
Memberships must have a Profile and a Group.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
<|code_end|>
with the help of current file imports:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from sqlalchemy.exc import IntegrityError
from models import groups, memberships, profiles
import sys
import warnings
import pytest
and context from other files:
# Path: models/groups.py
# class Groups(bases.BaseModel):
#
# Path: models/memberships.py
# class Memberships(bases.BaseModel):
#
# Path: models/profiles.py
# class Profiles(bases.BaseModel):
# def get_by_email(cls, email):
# def validate_email(self, key, address): # pylint: disable=unused-argument,no-self-use
, which may contain function names, class names, or code. Output only the next line. | inst = memberships.Memberships() |
Given the following code snippet before the placeholder: <|code_start|>warnings.simplefilter("error") # Make All warnings errors while testing.
def test_required_relations(dbsession):
"""
Memberships must have a Profile and a Group.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
inst = memberships.Memberships()
inst.save()
with pytest.raises(IntegrityError):
dbsession.commit()
def test_required_profile_missing(dbsession):
"""
Membership Profile not existing is an error.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
group = groups.Groups(name='61f7d724-d90c-4e2f-85f4-7cab51d68b61')
membership = memberships.Memberships(group=group)
membership.save()
with pytest.raises(IntegrityError):
dbsession.commit()
def test_required_group_missing(dbsession):
"""
Membership Group not existing is an error.
:param sqlalchemy.orm.session.Session dbsession: pytest fixture for database session
"""
<|code_end|>
, predict the next line using imports from the current file:
from builtins import * # pylint: disable=unused-wildcard-import,redefined-builtin,wildcard-import
from sqlalchemy.exc import IntegrityError
from models import groups, memberships, profiles
import sys
import warnings
import pytest
and context including class names, function names, and sometimes code from other files:
# Path: models/groups.py
# class Groups(bases.BaseModel):
#
# Path: models/memberships.py
# class Memberships(bases.BaseModel):
#
# Path: models/profiles.py
# class Profiles(bases.BaseModel):
# def get_by_email(cls, email):
# def validate_email(self, key, address): # pylint: disable=unused-argument,no-self-use
. Output only the next line. | profile = profiles.Profiles(full_name='103ae318 69f4f2471acc', email='73c5@4568.8dd9') |
Given the following code snippet before the placeholder: <|code_start|> """
def __init__(self, objs, fields):
"""
Constructor
Objs must be a list of (class, id) with optionally extra fields
"""
self.objs = objs
self.fields = fields
def brains(self):
"""
Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
"""
for obj in self.objs:
yield dict(zip(self.fields, obj))
def count(self):
"""
Count results
"""
return len(self.objs)
__len__ = count
def iterator(self):
"""
Iterate on self
"""
for obj in self.objs:
try:
yield self.load(obj)
<|code_end|>
, predict the next line using imports from the current file:
import logging
from sesql import config
from sesql.typemap import typemap
and context including class names, function names, and sometimes code from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
. Output only the next line. | except config.orm.not_found: |
Predict the next line for this snippet: <|code_start|> except config.orm.not_found:
log.warning("Object %r does not exist ! Broken index ?" % (obj,))
__iter__ = iterator
def all(self):
"""
Get all the results as a list
"""
return list(self)
def get(self, index):
"""
Get the row at given index
"""
return self.load(self.objs[index])
__getitem__ = get
def __getslice__(self, i, j):
"""
Get a slice
"""
res = [ self.load(obj) for obj in self.objs[i:j] ]
return res
@staticmethod
def load(obj):
"""
Get a given object
"""
objclass, objid = obj[:2]
<|code_end|>
with the help of current file imports:
import logging
from sesql import config
from sesql.typemap import typemap
and context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
, which may contain function names, class names, or code. Output only the next line. | objclass = typemap.get_class_by_name(objclass) |
Using the snippet: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This monkey patch will enable to use sesql in the Django admin
It's not fully optimal yet, but functionnal
"""
def make_add_filter(original):
def sesql_amdin_query_add_filter(self, filter_expr, *args, **kwargs):
name, value = filter_expr
if not name.startswith("sesql:"):
return original(self, filter_expr, *args, **kwargs)
# Ok, a SeSQL filter ? Hum hum
name = name.split(':', 1)[1]
if "__" in name:
name = name.split('__', 1)[0]
name += "__containswords"
query = longquery.longquery(Q(classname = self.model) &
Q(**{ name: value }))
ids = [ oid for klass, oid in query.objs ]
return original(self, ('id__in', ids), *args, **kwargs)
return sesql_amdin_query_add_filter
<|code_end|>
, determine the next line of code. You have imports:
from django.db.models.sql import Query
from django.db.models import Q
from sesql import config
from sesql import longquery
and context (class names, function names, or code) available:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
. Output only the next line. | if getattr(config, 'ENABLE_SESQL_ADMIN', False): |
Continue the code snippet: <|code_start|>
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
def sql_function(func):
"""
Decorator to execute or print SQL statements
"""
def sql_function_inner(cursor = None, execute = False, verbosity = True,
include_drop = False,
**kwargs):
sql = func(**kwargs)
if not include_drop:
sql = [ row for row in sql if not row.startswith('DROP ') ]
if verbosity:
print
for row in sql:
print row + ";"
print
if execute and cursor:
for row in sql:
cursor.execute(row)
return sql_function_inner
@sql_function
def create_dictionnary():
"""
Create the dictionnary configuration
"""
return [
<|code_end|>
. Use current file imports:
from sesql import config
from sesql.typemap import typemap
and context (classes, functions, or code) from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
. Output only the next line. | "DROP TEXT SEARCH CONFIGURATION IF EXISTS public.%s" % config.TS_CONFIG_NAME, |
Predict the next line for this snippet: <|code_start|> STOPWORDS = %s
)""" % (config.TS_CONFIG_NAME, config.STOPWORDS_FILE),
"""ALTER TEXT SEARCH CONFIGURATION %s
ALTER MAPPING FOR asciiword, asciihword, hword_asciipart WITH %s_dict""" % (config.TS_CONFIG_NAME, config.TS_CONFIG_NAME)
] + getattr(config, "ADDITIONAL_TS_CONFIG", [])
@sql_function
def create_master_table():
"""
Create the master table, that is, the one from which the others
will inherit
"""
schema = "\n ".join([ field.schema() for field in config.FIELDS ])
return [
"DROP TABLE IF EXISTS %s CASCADE" % config.MASTER_TABLE_NAME,
"""CREATE TABLE %s (
%s
PRIMARY KEY (classname, id)
)""" % (config.MASTER_TABLE_NAME, schema)
]
@sql_function
def create_table(table = None):
"""
Create given table
"""
if table is None:
return []
<|code_end|>
with the help of current file imports:
from sesql import config
from sesql.typemap import typemap
and context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
, which may contain function names, class names, or code. Output only the next line. | condition = typemap.get_class_names_for(table) |
Given snippet: <|code_start|>"""
# Allow "with" with python2.5
from __future__ import with_statement
STEP = 1000
class Command(BaseCommand):
help = "Update some columns of all already indexed objects in SeSQL"
option_list = BaseCommand.option_list + (
make_option('--class',
dest='class',
default='',
help='Limit to given classes (comma-separated)'),
)
@transaction.commit_manually
def update(self, classnames, fields):
"""
Reindex a single class
"""
print "=> Starting reindexing columns %s." % ','.join(fields)
result = longquery(Q(classname__in = classnames))
nb = len(result)
print "=> We got %d objects." % nb
sys.stdout.flush()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
from optparse import make_option
from django.db.models import Q
from django.db import transaction
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from sesql.utils import Timer
from sesql.index import update
from sesql.typemap import typemap
from sesql.longquery import longquery
and context:
# Path: sesql/utils.py
# class Timer(object):
# """
# A timer object to be used with « with » statement
# It as a local and global timer
# """
# def __init__(self):
# self._local = self._global = 0.0
# self._start = time.time()
#
# def start(self):
# self._start = time.time()
#
# def stop(self):
# delta = time.time() - self._start
# self._local += delta
# self._global += delta
#
# def __enter__(self):
# self.start()
#
# def __exit__(self, *args, **kwargs):
# self.stop()
#
# def get_local(self):
# return self._local
#
# def get_global(self):
# return self._global
#
# def reset(self):
# self._local = 0
#
# def peek(self):
# res = self.get_local()
# self.reset()
# return res
#
# Path: sesql/index.py
# @index_log_wrap
# @config.orm.transactional
# def update(cursor, obj, message, fields):
# """
# Update only specific fields of given object
# """
# log.info("%s : entering for fields %s" % (message, ','.join(fields)))
#
# table_name = typemap.typemap.get_table_for(obj.__class__)
# if not table_name:
# log.info("%s : not table, skipping" % message)
# return
#
# fields = [ fieldmap.fieldmap.get_field(field) for field in fields ]
# keys, placeholders, results = get_values(obj, fields)
#
# pattern = [ '%s=%s' % (k,p) for k,p in zip(keys, placeholders) ]
#
# if not pattern:
# log.info("%s : nothing to update, skipping" % message)
# return
#
# pattern = ",".join(pattern)
#
# query = "UPDATE %s SET %s WHERE classname=%%s AND id=%%s" % (table_name,
# pattern)
# cursor.execute(query, results + [ obj.__class__.__name__, obj.id ])
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/longquery.py
# @utils.log_time
# def longquery(query, order=None, limit=None, queryid=None, historize=False,
# fields = ()):
# """
# Perform a long query and return a lazy Django result set
#
# If queryid is provided, then the query will be loaded from the
# cache if possible, and redone else. Be careful, if the query is
# redone, results may have changed.
#
# If fields are specified, will fetch those fields from the index
# """
# if queryid:
# _query_cache.lock.acquire()
# try:
# results = _query_cache[queryid]
# if results:
# return results
# log.warning('Cached query id %r expired, re-querying.' % queryid)
# finally:
# _query_cache.lock.release()
#
# query = SeSQLQuery(query, order, fields)
# results = query.longquery(limit)
#
# _query_cache.lock.acquire()
# try:
# # Generate a new query id, ensuring it's unique
# if not queryid:
# while True:
# letters = string.ascii_letters + string.digits
# queryid = ''.join([ random.choice(letters) for i in range(32) ])
# if queryid not in _query_cache:
# break
# _query_cache[queryid] = results
# results.queryid = queryid
# finally:
# _query_cache.lock.release()
#
# if historize: # suggest feature hook
# results.historize(query)
#
# return results
which might include code, classes, or functions. Output only the next line. | full_tmr = Timer() |
Next line prediction: <|code_start|># but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This is a variation over the reindex command, which will only reindex
selected columns. It'll be faster when you've a lot of data and only a
few columns changed.
"""
# Allow "with" with python2.5
from __future__ import with_statement
STEP = 1000
class Command(BaseCommand):
help = "Update some columns of all already indexed objects in SeSQL"
option_list = BaseCommand.option_list + (
make_option('--class',
dest='class',
default='',
help='Limit to given classes (comma-separated)'),
)
@transaction.commit_manually
<|code_end|>
. Use current file imports:
(import sys
from optparse import make_option
from django.db.models import Q
from django.db import transaction
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from sesql.utils import Timer
from sesql.index import update
from sesql.typemap import typemap
from sesql.longquery import longquery)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/utils.py
# class Timer(object):
# """
# A timer object to be used with « with » statement
# It as a local and global timer
# """
# def __init__(self):
# self._local = self._global = 0.0
# self._start = time.time()
#
# def start(self):
# self._start = time.time()
#
# def stop(self):
# delta = time.time() - self._start
# self._local += delta
# self._global += delta
#
# def __enter__(self):
# self.start()
#
# def __exit__(self, *args, **kwargs):
# self.stop()
#
# def get_local(self):
# return self._local
#
# def get_global(self):
# return self._global
#
# def reset(self):
# self._local = 0
#
# def peek(self):
# res = self.get_local()
# self.reset()
# return res
#
# Path: sesql/index.py
# @index_log_wrap
# @config.orm.transactional
# def update(cursor, obj, message, fields):
# """
# Update only specific fields of given object
# """
# log.info("%s : entering for fields %s" % (message, ','.join(fields)))
#
# table_name = typemap.typemap.get_table_for(obj.__class__)
# if not table_name:
# log.info("%s : not table, skipping" % message)
# return
#
# fields = [ fieldmap.fieldmap.get_field(field) for field in fields ]
# keys, placeholders, results = get_values(obj, fields)
#
# pattern = [ '%s=%s' % (k,p) for k,p in zip(keys, placeholders) ]
#
# if not pattern:
# log.info("%s : nothing to update, skipping" % message)
# return
#
# pattern = ",".join(pattern)
#
# query = "UPDATE %s SET %s WHERE classname=%%s AND id=%%s" % (table_name,
# pattern)
# cursor.execute(query, results + [ obj.__class__.__name__, obj.id ])
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/longquery.py
# @utils.log_time
# def longquery(query, order=None, limit=None, queryid=None, historize=False,
# fields = ()):
# """
# Perform a long query and return a lazy Django result set
#
# If queryid is provided, then the query will be loaded from the
# cache if possible, and redone else. Be careful, if the query is
# redone, results may have changed.
#
# If fields are specified, will fetch those fields from the index
# """
# if queryid:
# _query_cache.lock.acquire()
# try:
# results = _query_cache[queryid]
# if results:
# return results
# log.warning('Cached query id %r expired, re-querying.' % queryid)
# finally:
# _query_cache.lock.release()
#
# query = SeSQLQuery(query, order, fields)
# results = query.longquery(limit)
#
# _query_cache.lock.acquire()
# try:
# # Generate a new query id, ensuring it's unique
# if not queryid:
# while True:
# letters = string.ascii_letters + string.digits
# queryid = ''.join([ random.choice(letters) for i in range(32) ])
# if queryid not in _query_cache:
# break
# _query_cache[queryid] = results
# results.queryid = queryid
# finally:
# _query_cache.lock.release()
#
# if historize: # suggest feature hook
# results.historize(query)
#
# return results
. Output only the next line. | def update(self, classnames, fields): |
Next line prediction: <|code_start|> obj = None
broken += 1
log.warning("Object %r does not exist ! Broken index ?" % (obj,))
except:
transaction.rollback()
raise
with index_tmr:
try:
update(obj, fields)
except:
transaction.rollback()
raise
if i % STEP == STEP - 1:
disp_stats()
del obj
disp_stats()
def handle(self, *fields, **options):
"""
Handle the command
"""
if not fields:
print "Syntax : manage.py sesqlupdate [--class <classes>] <columns>"
print " - classes is a comma-separated list of object classes"
print " - columns is a (space-seperated) list of columns to reindex"
if not options['class']:
<|code_end|>
. Use current file imports:
(import sys
from optparse import make_option
from django.db.models import Q
from django.db import transaction
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from sesql.utils import Timer
from sesql.index import update
from sesql.typemap import typemap
from sesql.longquery import longquery)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/utils.py
# class Timer(object):
# """
# A timer object to be used with « with » statement
# It as a local and global timer
# """
# def __init__(self):
# self._local = self._global = 0.0
# self._start = time.time()
#
# def start(self):
# self._start = time.time()
#
# def stop(self):
# delta = time.time() - self._start
# self._local += delta
# self._global += delta
#
# def __enter__(self):
# self.start()
#
# def __exit__(self, *args, **kwargs):
# self.stop()
#
# def get_local(self):
# return self._local
#
# def get_global(self):
# return self._global
#
# def reset(self):
# self._local = 0
#
# def peek(self):
# res = self.get_local()
# self.reset()
# return res
#
# Path: sesql/index.py
# @index_log_wrap
# @config.orm.transactional
# def update(cursor, obj, message, fields):
# """
# Update only specific fields of given object
# """
# log.info("%s : entering for fields %s" % (message, ','.join(fields)))
#
# table_name = typemap.typemap.get_table_for(obj.__class__)
# if not table_name:
# log.info("%s : not table, skipping" % message)
# return
#
# fields = [ fieldmap.fieldmap.get_field(field) for field in fields ]
# keys, placeholders, results = get_values(obj, fields)
#
# pattern = [ '%s=%s' % (k,p) for k,p in zip(keys, placeholders) ]
#
# if not pattern:
# log.info("%s : nothing to update, skipping" % message)
# return
#
# pattern = ",".join(pattern)
#
# query = "UPDATE %s SET %s WHERE classname=%%s AND id=%%s" % (table_name,
# pattern)
# cursor.execute(query, results + [ obj.__class__.__name__, obj.id ])
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/longquery.py
# @utils.log_time
# def longquery(query, order=None, limit=None, queryid=None, historize=False,
# fields = ()):
# """
# Perform a long query and return a lazy Django result set
#
# If queryid is provided, then the query will be loaded from the
# cache if possible, and redone else. Be careful, if the query is
# redone, results may have changed.
#
# If fields are specified, will fetch those fields from the index
# """
# if queryid:
# _query_cache.lock.acquire()
# try:
# results = _query_cache[queryid]
# if results:
# return results
# log.warning('Cached query id %r expired, re-querying.' % queryid)
# finally:
# _query_cache.lock.release()
#
# query = SeSQLQuery(query, order, fields)
# results = query.longquery(limit)
#
# _query_cache.lock.acquire()
# try:
# # Generate a new query id, ensuring it's unique
# if not queryid:
# while True:
# letters = string.ascii_letters + string.digits
# queryid = ''.join([ random.choice(letters) for i in range(32) ])
# if queryid not in _query_cache:
# break
# _query_cache[queryid] = results
# results.queryid = queryid
# finally:
# _query_cache.lock.release()
#
# if historize: # suggest feature hook
# results.historize(query)
#
# return results
. Output only the next line. | classes = typemap.all_class_names() |
Using the snippet: <|code_start|># along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This is a variation over the reindex command, which will only reindex
selected columns. It'll be faster when you've a lot of data and only a
few columns changed.
"""
# Allow "with" with python2.5
from __future__ import with_statement
STEP = 1000
class Command(BaseCommand):
help = "Update some columns of all already indexed objects in SeSQL"
option_list = BaseCommand.option_list + (
make_option('--class',
dest='class',
default='',
help='Limit to given classes (comma-separated)'),
)
@transaction.commit_manually
def update(self, classnames, fields):
"""
Reindex a single class
"""
print "=> Starting reindexing columns %s." % ','.join(fields)
<|code_end|>
, determine the next line of code. You have imports:
import sys
from optparse import make_option
from django.db.models import Q
from django.db import transaction
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from sesql.utils import Timer
from sesql.index import update
from sesql.typemap import typemap
from sesql.longquery import longquery
and context (class names, function names, or code) available:
# Path: sesql/utils.py
# class Timer(object):
# """
# A timer object to be used with « with » statement
# It as a local and global timer
# """
# def __init__(self):
# self._local = self._global = 0.0
# self._start = time.time()
#
# def start(self):
# self._start = time.time()
#
# def stop(self):
# delta = time.time() - self._start
# self._local += delta
# self._global += delta
#
# def __enter__(self):
# self.start()
#
# def __exit__(self, *args, **kwargs):
# self.stop()
#
# def get_local(self):
# return self._local
#
# def get_global(self):
# return self._global
#
# def reset(self):
# self._local = 0
#
# def peek(self):
# res = self.get_local()
# self.reset()
# return res
#
# Path: sesql/index.py
# @index_log_wrap
# @config.orm.transactional
# def update(cursor, obj, message, fields):
# """
# Update only specific fields of given object
# """
# log.info("%s : entering for fields %s" % (message, ','.join(fields)))
#
# table_name = typemap.typemap.get_table_for(obj.__class__)
# if not table_name:
# log.info("%s : not table, skipping" % message)
# return
#
# fields = [ fieldmap.fieldmap.get_field(field) for field in fields ]
# keys, placeholders, results = get_values(obj, fields)
#
# pattern = [ '%s=%s' % (k,p) for k,p in zip(keys, placeholders) ]
#
# if not pattern:
# log.info("%s : nothing to update, skipping" % message)
# return
#
# pattern = ",".join(pattern)
#
# query = "UPDATE %s SET %s WHERE classname=%%s AND id=%%s" % (table_name,
# pattern)
# cursor.execute(query, results + [ obj.__class__.__name__, obj.id ])
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/longquery.py
# @utils.log_time
# def longquery(query, order=None, limit=None, queryid=None, historize=False,
# fields = ()):
# """
# Perform a long query and return a lazy Django result set
#
# If queryid is provided, then the query will be loaded from the
# cache if possible, and redone else. Be careful, if the query is
# redone, results may have changed.
#
# If fields are specified, will fetch those fields from the index
# """
# if queryid:
# _query_cache.lock.acquire()
# try:
# results = _query_cache[queryid]
# if results:
# return results
# log.warning('Cached query id %r expired, re-querying.' % queryid)
# finally:
# _query_cache.lock.release()
#
# query = SeSQLQuery(query, order, fields)
# results = query.longquery(limit)
#
# _query_cache.lock.acquire()
# try:
# # Generate a new query id, ensuring it's unique
# if not queryid:
# while True:
# letters = string.ascii_letters + string.digits
# queryid = ''.join([ random.choice(letters) for i in range(32) ])
# if queryid not in _query_cache:
# break
# _query_cache[queryid] = results
# results.queryid = queryid
# finally:
# _query_cache.lock.release()
#
# if historize: # suggest feature hook
# results.historize(query)
#
# return results
. Output only the next line. | result = longquery(Q(classname__in = classnames)) |
Next line prediction: <|code_start|># along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
# Maximal number of words to lemmatize at once
MAX_WORDS = 1000
# Use GenericCache for now, but will probably be moved to memcached later
_word_cache = GenericCache(maxsize = 50000, expiry = 86400)
def lemmatize_for(words, dictionnary):
"""
Lemmatize a word with given dictionnary
"""
if len(words) > MAX_WORDS:
return lemmatize_for(words[:MAX_WORDS], dictionnary) + lemmatize_for(words[MAX_WORDS:], dictionnary)
values = {}
remaining = []
for word in words:
value = _word_cache[(word, dictionnary)]
if value is not None:
values[word] = value
else:
remaining.append(word)
if remaining:
pattern = "plainto_tsquery('%s', %%s)" % dictionnary
patterns = [ pattern for word in remaining ]
<|code_end|>
. Use current file imports:
(from sesql import config
from sesql.fieldmap import fieldmap
from GenericCache import GenericCache)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
. Output only the next line. | cursor = config.orm.cursor() |
Predict the next line after this snippet: <|code_start|>
for word in words:
value = _word_cache[(word, dictionnary)]
if value is not None:
values[word] = value
else:
remaining.append(word)
if remaining:
pattern = "plainto_tsquery('%s', %%s)" % dictionnary
patterns = [ pattern for word in remaining ]
cursor = config.orm.cursor()
cursor.execute('SELECT %s;' % (','.join(patterns)), remaining)
row = cursor.fetchone()
for word, value in zip(remaining, row):
value = value.strip("'")
values[word] = value
_word_cache[(word, dictionnary)] = value
return [ values[word] for word in words ]
def lemmatize(words, index = None):
"""
Give a lemmatized version of those words
Use the configuration for the given index, or the default one if
index is None
"""
if index is None:
<|code_end|>
using the current file's imports:
from sesql import config
from sesql.fieldmap import fieldmap
from GenericCache import GenericCache
and any relevant context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
. Output only the next line. | index = fieldmap.primary |
Predict the next line after this snippet: <|code_start|>
def get_dictionnary(self):
if self._dictionnary is None:
self._dictionnary = "public.%s" % config.TS_CONFIG_NAME
return self._dictionnary
dictionnary = property(get_dictionnary, set_dictionnary)
def marshall(self, value, extra_letters = "", use_cleanup = True):
"""
Strip accents, escape html_entities, handle unicode, ...
"""
if not value:
return u""
if isinstance(value, unicode):
value = value.encode(config.CHARSET)
if use_cleanup:
cleanup = self.cleanup or getattr(config, 'ADDITIONAL_CLEANUP_FUNCTION', None)
if cleanup:
value = cleanup(value)
if not isinstance(value, unicode):
try:
value = value.decode(config.CHARSET)
except UnicodeDecodeError:
raise ValueError, "Can't parse %s in %s" % (value, config.CHARSET)
# Remove ligatures (oe, ae, ...)
<|code_end|>
using the current file's imports:
import logging
import unicodedata
from sesql.utils import strip_ligatures
from sesql.sources import guess_source, ClassSource
from sesql import config
from sesql import config
from sesql import config
and any relevant context from other files:
# Path: sesql/utils.py
# def strip_ligatures(value):
# """
# Convert the ligatures (œ, æ, ß, ...) into their compound value (oe, ae, ss)
# Take **unicode** as input and output, not string
# """
# charmap = {
# u'\N{Latin capital letter AE}': 'AE',
# u'\N{Latin small letter ae}': 'ae',
# u'\N{Latin capital letter Eth}': 'Dh',
# u'\N{Latin small letter eth}': 'dh',
# u'\N{Latin capital letter O with stroke}': 'Oe',
# u'\N{Latin small letter o with stroke}': 'oe',
# u'\N{Latin capital letter Thorn}': 'Th',
# u'\N{Latin small letter thorn}': 'th',
# u'\N{Latin small letter sharp s}': 'ss',
# u'\N{Latin capital letter D with stroke}': 'Dj',
# u'\N{Latin small letter d with stroke}': 'dj',
# u'\N{Latin capital letter H with stroke}': 'H',
# u'\N{Latin small letter h with stroke}': 'h',
# u'\N{Latin small letter dotless i}': 'i',
# u'\N{Latin small letter kra}': 'q',
# u'\N{Latin capital letter L with stroke}': 'L',
# u'\N{Latin small letter l with stroke}': 'l',
# u'\N{Latin capital letter Eng}': 'Ng',
# u'\N{Latin small letter eng}': 'ng',
# u'\N{Latin capital ligature OE}': 'Oe',
# u'\N{Latin small ligature oe}': 'oe',
# u'\N{Latin capital letter T with stroke}': 'Th',
# u'\N{Latin small letter t with stroke}': 'th',
# }
#
# value = ''.join([ charmap.get(c,c) for c in value ])
# return value
#
# Path: sesql/sources.py
# def guess_source(what):
# """
# Guess what is this source
#
# - AbstractSource will stay as it is
# - a list or tuple will become TextAggregate
# - a name with () in it will be a MethodCaller
# - a name with . in it will be a SubField
# - everything else will be a SimpleField
# """
# if isinstance(what, AbstractSource):
# return what
# if isinstance(what, (list, tuple)):
# return TextAggregate(*what)
# if isinstance(what, dict):
# return WeightedAggregate(what)
# if not isinstance(what, str):
# raise ValueError, "what is neither AbstractSource, list, dict nor string"
# if "." in what:
# what = what.strip(".")
# return SubField(*(what.split(".", 1)))
# if what.endswith('()'):
# return MethodCaller(what[:-2])
# return SimpleField(what)
#
# class ClassSource(AbstractSource):
# """
# Get the class of the object
# """
# def __init__(self, dereference_proxy = False):
# """
# Constructor
# If dereference_proxy is set to True, proxy models will be
# considered as their base classe - this is only for Django ORM
# """
# self.dereference_proxy = dereference_proxy
#
# def load_data(self, obj):
# """
# Load data from a Django object
# """
# klass = obj.__class__
# if self.dereference_proxy:
# if hasattr(obj, '_meta'):
# if getattr(obj._meta, 'proxy', False):
# klass = getattr(obj._meta, 'proxy_for_model', klass)
# return klass.__name__
. Output only the next line. | value = strip_ligatures(value) |
Given the following code snippet before the placeholder: <|code_start|>Contain the field types for SeSQL
We cannot reuse Django types because what we need is too specific
"""
log = logging.getLogger('sesql')
class Field(object):
"""
This represent an abstract field
"""
primary = False
slqtype = None
indexfunction = ""
placeholder = "%s"
def __init__(self, name, source = None, sql_default = None):
"""
Constructor
name = name for the field in our database
sources = list of names used in input
"""
self.name = name
self.index_column = name
self.sql_default = sql_default
self.data_column = name
if not source:
source = name
<|code_end|>
, predict the next line using imports from the current file:
import logging
import unicodedata
from sesql.utils import strip_ligatures
from sesql.sources import guess_source, ClassSource
from sesql import config
from sesql import config
from sesql import config
and context including class names, function names, and sometimes code from other files:
# Path: sesql/utils.py
# def strip_ligatures(value):
# """
# Convert the ligatures (œ, æ, ß, ...) into their compound value (oe, ae, ss)
# Take **unicode** as input and output, not string
# """
# charmap = {
# u'\N{Latin capital letter AE}': 'AE',
# u'\N{Latin small letter ae}': 'ae',
# u'\N{Latin capital letter Eth}': 'Dh',
# u'\N{Latin small letter eth}': 'dh',
# u'\N{Latin capital letter O with stroke}': 'Oe',
# u'\N{Latin small letter o with stroke}': 'oe',
# u'\N{Latin capital letter Thorn}': 'Th',
# u'\N{Latin small letter thorn}': 'th',
# u'\N{Latin small letter sharp s}': 'ss',
# u'\N{Latin capital letter D with stroke}': 'Dj',
# u'\N{Latin small letter d with stroke}': 'dj',
# u'\N{Latin capital letter H with stroke}': 'H',
# u'\N{Latin small letter h with stroke}': 'h',
# u'\N{Latin small letter dotless i}': 'i',
# u'\N{Latin small letter kra}': 'q',
# u'\N{Latin capital letter L with stroke}': 'L',
# u'\N{Latin small letter l with stroke}': 'l',
# u'\N{Latin capital letter Eng}': 'Ng',
# u'\N{Latin small letter eng}': 'ng',
# u'\N{Latin capital ligature OE}': 'Oe',
# u'\N{Latin small ligature oe}': 'oe',
# u'\N{Latin capital letter T with stroke}': 'Th',
# u'\N{Latin small letter t with stroke}': 'th',
# }
#
# value = ''.join([ charmap.get(c,c) for c in value ])
# return value
#
# Path: sesql/sources.py
# def guess_source(what):
# """
# Guess what is this source
#
# - AbstractSource will stay as it is
# - a list or tuple will become TextAggregate
# - a name with () in it will be a MethodCaller
# - a name with . in it will be a SubField
# - everything else will be a SimpleField
# """
# if isinstance(what, AbstractSource):
# return what
# if isinstance(what, (list, tuple)):
# return TextAggregate(*what)
# if isinstance(what, dict):
# return WeightedAggregate(what)
# if not isinstance(what, str):
# raise ValueError, "what is neither AbstractSource, list, dict nor string"
# if "." in what:
# what = what.strip(".")
# return SubField(*(what.split(".", 1)))
# if what.endswith('()'):
# return MethodCaller(what[:-2])
# return SimpleField(what)
#
# class ClassSource(AbstractSource):
# """
# Get the class of the object
# """
# def __init__(self, dereference_proxy = False):
# """
# Constructor
# If dereference_proxy is set to True, proxy models will be
# considered as their base classe - this is only for Django ORM
# """
# self.dereference_proxy = dereference_proxy
#
# def load_data(self, obj):
# """
# Load data from a Django object
# """
# klass = obj.__class__
# if self.dereference_proxy:
# if hasattr(obj, '_meta'):
# if getattr(obj._meta, 'proxy', False):
# klass = getattr(obj._meta, 'proxy_for_model', klass)
# return klass.__name__
. Output only the next line. | self.source = guess_source(source) |
Given the following code snippet before the placeholder: <|code_start|>class LongIntField(IntField):
"""
This is a bigint field
"""
sqltype = "bigint"
class StrField(Field):
"""
This is a simple string field, with specified length
"""
def __init__(self, name, source = None, size = 255):
"""
Constructor
Takes one extra paramater: the field size
"""
super(StrField, self).__init__(name, source)
self.size = size
self.sqltype = "varchar(%d)" % size
class ClassField(Field):
"""
This is a field storing the class of the object
"""
sqltype = "varchar(255)"
def __init__(self, name, dereference_proxy = False):
"""
Constructor
"""
super(ClassField, self).__init__(name, None)
<|code_end|>
, predict the next line using imports from the current file:
import logging
import unicodedata
from sesql.utils import strip_ligatures
from sesql.sources import guess_source, ClassSource
from sesql import config
from sesql import config
from sesql import config
and context including class names, function names, and sometimes code from other files:
# Path: sesql/utils.py
# def strip_ligatures(value):
# """
# Convert the ligatures (œ, æ, ß, ...) into their compound value (oe, ae, ss)
# Take **unicode** as input and output, not string
# """
# charmap = {
# u'\N{Latin capital letter AE}': 'AE',
# u'\N{Latin small letter ae}': 'ae',
# u'\N{Latin capital letter Eth}': 'Dh',
# u'\N{Latin small letter eth}': 'dh',
# u'\N{Latin capital letter O with stroke}': 'Oe',
# u'\N{Latin small letter o with stroke}': 'oe',
# u'\N{Latin capital letter Thorn}': 'Th',
# u'\N{Latin small letter thorn}': 'th',
# u'\N{Latin small letter sharp s}': 'ss',
# u'\N{Latin capital letter D with stroke}': 'Dj',
# u'\N{Latin small letter d with stroke}': 'dj',
# u'\N{Latin capital letter H with stroke}': 'H',
# u'\N{Latin small letter h with stroke}': 'h',
# u'\N{Latin small letter dotless i}': 'i',
# u'\N{Latin small letter kra}': 'q',
# u'\N{Latin capital letter L with stroke}': 'L',
# u'\N{Latin small letter l with stroke}': 'l',
# u'\N{Latin capital letter Eng}': 'Ng',
# u'\N{Latin small letter eng}': 'ng',
# u'\N{Latin capital ligature OE}': 'Oe',
# u'\N{Latin small ligature oe}': 'oe',
# u'\N{Latin capital letter T with stroke}': 'Th',
# u'\N{Latin small letter t with stroke}': 'th',
# }
#
# value = ''.join([ charmap.get(c,c) for c in value ])
# return value
#
# Path: sesql/sources.py
# def guess_source(what):
# """
# Guess what is this source
#
# - AbstractSource will stay as it is
# - a list or tuple will become TextAggregate
# - a name with () in it will be a MethodCaller
# - a name with . in it will be a SubField
# - everything else will be a SimpleField
# """
# if isinstance(what, AbstractSource):
# return what
# if isinstance(what, (list, tuple)):
# return TextAggregate(*what)
# if isinstance(what, dict):
# return WeightedAggregate(what)
# if not isinstance(what, str):
# raise ValueError, "what is neither AbstractSource, list, dict nor string"
# if "." in what:
# what = what.strip(".")
# return SubField(*(what.split(".", 1)))
# if what.endswith('()'):
# return MethodCaller(what[:-2])
# return SimpleField(what)
#
# class ClassSource(AbstractSource):
# """
# Get the class of the object
# """
# def __init__(self, dereference_proxy = False):
# """
# Constructor
# If dereference_proxy is set to True, proxy models will be
# considered as their base classe - this is only for Django ORM
# """
# self.dereference_proxy = dereference_proxy
#
# def load_data(self, obj):
# """
# Load data from a Django object
# """
# klass = obj.__class__
# if self.dereference_proxy:
# if hasattr(obj, '_meta'):
# if getattr(obj._meta, 'proxy', False):
# klass = getattr(obj._meta, 'proxy_for_model', klass)
# return klass.__name__
. Output only the next line. | self.source = ClassSource(dereference_proxy = dereference_proxy) |
Next line prediction: <|code_start|># (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# ashort with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This will preform a SeSQL short query and displays the results
It's mostly useful to check your SeSQL installation
"""
class Command(BaseCommand):
help = "Perform a SeSQL short query"
def handle(self, *apps, **options):
"""
Handle the command
"""
if not 1 <= len(apps) <= 2:
print "Syntax : sesqlshortquery <query> [<order>]"
sys.exit(1)
query = eval(apps[0])
order = len(apps) == 2 and eval(apps[1]) or None
<|code_end|>
. Use current file imports:
(from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.db import connection, transaction
from django.db.models import Q
from sesql.shortquery import shortquery
import settings
import sys)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/shortquery.py
# @log_time
# def shortquery(query, order=None, limit=50, historize=False, fields = ()):
# """
# Perform a short query and return a lazy Django result set
#
# If fields are specified, will fetch those fields from the index
# """
# query = SeSQLQuery(query, order, fields)
# results = query.shortquery(limit)
#
# if historize: #suggest feature hook
# results.historize(query)
#
# return results
. Output only the next line. | print shortquery(query, order).objs |
Here is a snippet: <|code_start|># (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This will preform a SeSQL long query and displays the results
It's mostly useful to check your SeSQL installation
"""
class Command(BaseCommand):
help = "Perform a SeSQL long query"
def handle(self, *apps, **options):
"""
Handle the command
"""
if not 1 <= len(apps) <= 2:
print "Syntax : sesqllongquery <query> [<order>]"
sys.exit(1)
query = eval(apps[0])
order = len(apps) == 2 and eval(apps[1]) or None
<|code_end|>
. Write the next line using the current file imports:
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.db import connection, transaction
from django.db.models import Q
from sesql.longquery import longquery
import settings
import sys
and context from other files:
# Path: sesql/longquery.py
# @utils.log_time
# def longquery(query, order=None, limit=None, queryid=None, historize=False,
# fields = ()):
# """
# Perform a long query and return a lazy Django result set
#
# If queryid is provided, then the query will be loaded from the
# cache if possible, and redone else. Be careful, if the query is
# redone, results may have changed.
#
# If fields are specified, will fetch those fields from the index
# """
# if queryid:
# _query_cache.lock.acquire()
# try:
# results = _query_cache[queryid]
# if results:
# return results
# log.warning('Cached query id %r expired, re-querying.' % queryid)
# finally:
# _query_cache.lock.release()
#
# query = SeSQLQuery(query, order, fields)
# results = query.longquery(limit)
#
# _query_cache.lock.acquire()
# try:
# # Generate a new query id, ensuring it's unique
# if not queryid:
# while True:
# letters = string.ascii_letters + string.digits
# queryid = ''.join([ random.choice(letters) for i in range(32) ])
# if queryid not in _query_cache:
# break
# _query_cache[queryid] = results
# results.queryid = queryid
# finally:
# _query_cache.lock.release()
#
# if historize: # suggest feature hook
# results.historize(query)
#
# return results
, which may include functions, classes, or code. Output only the next line. | print longquery(query, order).objs |
Next line prediction: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This should be runned in a cron to process search histories and compute stats
"""
class Command(BaseCommand):
help = """Build SearchQuery index"""
option_list = BaseCommand.option_list + (
make_option('-e','--erode',
action='store_true',
dest='erode',
help = 'tell if we must erode result or not'),
make_option('-f','--filter',
dest ='filter',
type='int',
<|code_end|>
. Use current file imports:
(from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/models.py
# class SearchHit(models.Model):
# """Used to store queries made to the search engine"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True)
#
# Path: sesql/models.py
# class SearchQuery(models.Model):
# """A table containing statistics and scores about search queries"""
# query = models.CharField(max_length=500)
# phonex = models.FloatField()
# clean_query = models.CharField(max_length=500)
# clean_phonex = models.FloatField()
#
# nb_results = models.PositiveIntegerField()
#
# nb_recent_search = models.PositiveIntegerField()
# nb_total_search = models.PositiveIntegerField()
# pondered_search_nb = models.FloatField()
#
# weight = models.FloatField()
#
# Path: sesql/models.py
# class SearchHitHistoric(models.Model):
# """Same as SearchHit used as an archive"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True, db_index=True)
#
# Path: sesql/suggest.py
# def levenshtein(a, b):
. Output only the next line. | default=config.HISTORY_DEFAULT_FILTER, |
Using the snippet: <|code_start|>
for hit in last_hits:
query = hit.query
# blacklist
if query in config.HISTORY_BLACKLIST:
continue
if hit.nb_results < filter_nb:
SearchHitHistoric(query=hit.query,
nb_results=hit.nb_results,
date=hit.date).save()
hit.delete()
continue
# manual get_or_create
try:
search_query = SearchQuery.objects.get(query=query)
created = False
except SearchQuery.DoesNotExist:
search_query = SearchQuery(query=query)
created = True
# if it's a new one, initialize it
if created:
search_query.phonex = phonex(query)
# clean the query, the '_' char cause bugy clean_query
query = query.replace('_', '')
<|code_end|>
, determine the next line of code. You have imports:
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
and context (class names, function names, or code) available:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/models.py
# class SearchHit(models.Model):
# """Used to store queries made to the search engine"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True)
#
# Path: sesql/models.py
# class SearchQuery(models.Model):
# """A table containing statistics and scores about search queries"""
# query = models.CharField(max_length=500)
# phonex = models.FloatField()
# clean_query = models.CharField(max_length=500)
# clean_phonex = models.FloatField()
#
# nb_results = models.PositiveIntegerField()
#
# nb_recent_search = models.PositiveIntegerField()
# nb_total_search = models.PositiveIntegerField()
# pondered_search_nb = models.FloatField()
#
# weight = models.FloatField()
#
# Path: sesql/models.py
# class SearchHitHistoric(models.Model):
# """Same as SearchHit used as an archive"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True, db_index=True)
#
# Path: sesql/suggest.py
# def levenshtein(a, b):
. Output only the next line. | lems = lemmatize(query.split()) |
Using the snippet: <|code_start|> help = """Build SearchQuery index"""
option_list = BaseCommand.option_list + (
make_option('-e','--erode',
action='store_true',
dest='erode',
help = 'tell if we must erode result or not'),
make_option('-f','--filter',
dest ='filter',
type='int',
default=config.HISTORY_DEFAULT_FILTER,
help = 'how many time a search must occur to be treated'))
def handle(self, *apps, **options):
self.process_hits(options['filter'])
if options['erode']:
self.erode()
def erode(self):
for search_query in SearchQuery.objects.all():
search_query.pondered_search_nb = (config.HISTORY_ALPHA
* search_query.pondered_search_nb
+ (1-config.HISTORY_ALPHA)
* search_query.nb_recent_search)
search_query.nb_recent_search = 0
search_query.save()
def process_hits(self, filter_nb):
<|code_end|>
, determine the next line of code. You have imports:
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
and context (class names, function names, or code) available:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/models.py
# class SearchHit(models.Model):
# """Used to store queries made to the search engine"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True)
#
# Path: sesql/models.py
# class SearchQuery(models.Model):
# """A table containing statistics and scores about search queries"""
# query = models.CharField(max_length=500)
# phonex = models.FloatField()
# clean_query = models.CharField(max_length=500)
# clean_phonex = models.FloatField()
#
# nb_results = models.PositiveIntegerField()
#
# nb_recent_search = models.PositiveIntegerField()
# nb_total_search = models.PositiveIntegerField()
# pondered_search_nb = models.FloatField()
#
# weight = models.FloatField()
#
# Path: sesql/models.py
# class SearchHitHistoric(models.Model):
# """Same as SearchHit used as an archive"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True, db_index=True)
#
# Path: sesql/suggest.py
# def levenshtein(a, b):
. Output only the next line. | last_hits = SearchHit.objects.all() |
Predict the next line after this snippet: <|code_start|># along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This should be runned in a cron to process search histories and compute stats
"""
class Command(BaseCommand):
help = """Build SearchQuery index"""
option_list = BaseCommand.option_list + (
make_option('-e','--erode',
action='store_true',
dest='erode',
help = 'tell if we must erode result or not'),
make_option('-f','--filter',
dest ='filter',
type='int',
default=config.HISTORY_DEFAULT_FILTER,
help = 'how many time a search must occur to be treated'))
def handle(self, *apps, **options):
self.process_hits(options['filter'])
if options['erode']:
self.erode()
def erode(self):
<|code_end|>
using the current file's imports:
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
and any relevant context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/models.py
# class SearchHit(models.Model):
# """Used to store queries made to the search engine"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True)
#
# Path: sesql/models.py
# class SearchQuery(models.Model):
# """A table containing statistics and scores about search queries"""
# query = models.CharField(max_length=500)
# phonex = models.FloatField()
# clean_query = models.CharField(max_length=500)
# clean_phonex = models.FloatField()
#
# nb_results = models.PositiveIntegerField()
#
# nb_recent_search = models.PositiveIntegerField()
# nb_total_search = models.PositiveIntegerField()
# pondered_search_nb = models.FloatField()
#
# weight = models.FloatField()
#
# Path: sesql/models.py
# class SearchHitHistoric(models.Model):
# """Same as SearchHit used as an archive"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True, db_index=True)
#
# Path: sesql/suggest.py
# def levenshtein(a, b):
. Output only the next line. | for search_query in SearchQuery.objects.all(): |
Given the code snippet: <|code_start|> help = 'how many time a search must occur to be treated'))
def handle(self, *apps, **options):
self.process_hits(options['filter'])
if options['erode']:
self.erode()
def erode(self):
for search_query in SearchQuery.objects.all():
search_query.pondered_search_nb = (config.HISTORY_ALPHA
* search_query.pondered_search_nb
+ (1-config.HISTORY_ALPHA)
* search_query.nb_recent_search)
search_query.nb_recent_search = 0
search_query.save()
def process_hits(self, filter_nb):
last_hits = SearchHit.objects.all()
processed_hits = []
for hit in last_hits:
query = hit.query
# blacklist
if query in config.HISTORY_BLACKLIST:
continue
if hit.nb_results < filter_nb:
<|code_end|>
, generate the next line using the imports in this file:
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
and context (functions, classes, or occasionally code) from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/models.py
# class SearchHit(models.Model):
# """Used to store queries made to the search engine"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True)
#
# Path: sesql/models.py
# class SearchQuery(models.Model):
# """A table containing statistics and scores about search queries"""
# query = models.CharField(max_length=500)
# phonex = models.FloatField()
# clean_query = models.CharField(max_length=500)
# clean_phonex = models.FloatField()
#
# nb_results = models.PositiveIntegerField()
#
# nb_recent_search = models.PositiveIntegerField()
# nb_total_search = models.PositiveIntegerField()
# pondered_search_nb = models.FloatField()
#
# weight = models.FloatField()
#
# Path: sesql/models.py
# class SearchHitHistoric(models.Model):
# """Same as SearchHit used as an archive"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True, db_index=True)
#
# Path: sesql/suggest.py
# def levenshtein(a, b):
. Output only the next line. | SearchHitHistoric(query=hit.query, |
Predict the next line after this snippet: <|code_start|>
def process_hits(self, filter_nb):
last_hits = SearchHit.objects.all()
processed_hits = []
for hit in last_hits:
query = hit.query
# blacklist
if query in config.HISTORY_BLACKLIST:
continue
if hit.nb_results < filter_nb:
SearchHitHistoric(query=hit.query,
nb_results=hit.nb_results,
date=hit.date).save()
hit.delete()
continue
# manual get_or_create
try:
search_query = SearchQuery.objects.get(query=query)
created = False
except SearchQuery.DoesNotExist:
search_query = SearchQuery(query=query)
created = True
# if it's a new one, initialize it
if created:
<|code_end|>
using the current file's imports:
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
and any relevant context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/models.py
# class SearchHit(models.Model):
# """Used to store queries made to the search engine"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True)
#
# Path: sesql/models.py
# class SearchQuery(models.Model):
# """A table containing statistics and scores about search queries"""
# query = models.CharField(max_length=500)
# phonex = models.FloatField()
# clean_query = models.CharField(max_length=500)
# clean_phonex = models.FloatField()
#
# nb_results = models.PositiveIntegerField()
#
# nb_recent_search = models.PositiveIntegerField()
# nb_total_search = models.PositiveIntegerField()
# pondered_search_nb = models.FloatField()
#
# weight = models.FloatField()
#
# Path: sesql/models.py
# class SearchHitHistoric(models.Model):
# """Same as SearchHit used as an archive"""
# query = models.CharField(max_length=500)
# nb_results = models.PositiveIntegerField()
# date = models.DateField(auto_now=True, db_index=True)
#
# Path: sesql/suggest.py
# def levenshtein(a, b):
. Output only the next line. | search_query.phonex = phonex(query) |
Based on the snippet: <|code_start|># This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This should be necessary since SQL is executed in a syncdb handler, if you need
it, this will print the SQL required to generate SeSQL tables
"""
class Command(BaseCommand):
help = "Dump the commands to create SeSQL tables"
def handle(self, *apps, **options):
"""
Handle the command
"""
print "BEGIN;"
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.management.base import BaseCommand
from django.core.management import call_command
from sesql import datamodel
from sesql.typemap import typemap
import settings
and context (classes, functions, sometimes code) from other files:
# Path: sesql/datamodel.py
# def sql_function(func):
# def sql_function_inner(cursor = None, execute = False, verbosity = True,
# include_drop = False,
# **kwargs):
# def create_dictionnary():
# def create_master_table():
# def create_table(table = None):
# def create_schedule_table():
# def sync_db(cursor, verbosity = 0):
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
. Output only the next line. | datamodel.create_dictionnary(include_drop = True) |
Given the following code snippet before the placeholder: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This should be necessary since SQL is executed in a syncdb handler, if you need
it, this will print the SQL required to generate SeSQL tables
"""
class Command(BaseCommand):
help = "Dump the commands to create SeSQL tables"
def handle(self, *apps, **options):
"""
Handle the command
"""
print "BEGIN;"
datamodel.create_dictionnary(include_drop = True)
datamodel.create_master_table(include_drop = True)
<|code_end|>
, predict the next line using imports from the current file:
from django.core.management.base import BaseCommand
from django.core.management import call_command
from sesql import datamodel
from sesql.typemap import typemap
import settings
and context including class names, function names, and sometimes code from other files:
# Path: sesql/datamodel.py
# def sql_function(func):
# def sql_function_inner(cursor = None, execute = False, verbosity = True,
# include_drop = False,
# **kwargs):
# def create_dictionnary():
# def create_master_table():
# def create_table(table = None):
# def create_schedule_table():
# def sync_db(cursor, verbosity = 0):
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
. Output only the next line. | for table in typemap.all_tables(): |
Using the snippet: <|code_start|># Copyright (c) Pilot Systems and Libération, 2010-2011
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
Handle the field map
"""
class FieldMap(object):
"""
Handle the classes <=> table mapping
"""
def __init__(self):
"""
Constructor
"""
self.fields_map = {}
<|code_end|>
, determine the next line of code. You have imports:
from sesql import config
and context (class names, function names, or code) available:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
. Output only the next line. | self.fields = config.FIELDS |
Using the snippet: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This command will index or reindex a single object into SeSQL
Can be used as a test, or to fix a single problem
"""
class Command(BaseCommand):
help = "Index a single object into SeSQL"
@transaction.commit_manually
def handle(self, *apps, **options):
"""
Handle the command
"""
if len(apps) != 2:
print "Syntax : sesqlindex <classname> <objid>"
sys.exit(1)
<|code_end|>
, determine the next line of code. You have imports:
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.db import connection, transaction
from django.db.models import Q
from sesql.results import SeSQLResultSet
from sesql.index import index
import settings
import sys
and context (class names, function names, or code) available:
# Path: sesql/results.py
# class SeSQLResultSet(object):
# """
# A lazy SeSQL result set
# It mimicks a bit the Django QuerySet, but doesn't work the same way,
# and doesn't provide exactly the same methods
# """
# def __init__(self, objs, fields):
# """
# Constructor
# Objs must be a list of (class, id) with optionally extra fields
# """
# self.objs = objs
# self.fields = fields
#
# def brains(self):
# """
# Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
# """
# for obj in self.objs:
# yield dict(zip(self.fields, obj))
#
# def count(self):
# """
# Count results
# """
# return len(self.objs)
# __len__ = count
#
# def iterator(self):
# """
# Iterate on self
# """
# for obj in self.objs:
# try:
# yield self.load(obj)
# except config.orm.not_found:
# log.warning("Object %r does not exist ! Broken index ?" % (obj,))
# __iter__ = iterator
#
# def all(self):
# """
# Get all the results as a list
# """
# return list(self)
#
# def get(self, index):
# """
# Get the row at given index
# """
# return self.load(self.objs[index])
# __getitem__ = get
#
# def __getslice__(self, i, j):
# """
# Get a slice
# """
# res = [ self.load(obj) for obj in self.objs[i:j] ]
# return res
#
# @staticmethod
# def load(obj):
# """
# Get a given object
# """
# objclass, objid = obj[:2]
# objclass = typemap.get_class_by_name(objclass)
# if not objclass:
# return config.orm.not_found
# entry = "%s:%s" % (objclass.__name__, objid)
# log.debug("Fetching %s" % entry)
# return config.orm.load_object(objclass, objid)
#
# def historize(self, query):
# """save in the database the query for future processing"""
# nb_results = self.count()
# query_text = query.get_fulltext_query()[2][0]
# config.orm.historize(query=query_text, nb_results=nb_results)
#
# Path: sesql/index.py
# @index_log_wrap
# @config.orm.transactional
# def index(cursor, obj, message, noindex = False, index_related = True):
# """
# Index a Django object into SeSQL, do the real work
# """
# log.info("%s : entering" % message)
# try:
# classname, objid = get_sesql_id(obj)
# except (TypeError, AttributeError, ValueError):
# log.info("%r: can't get classname/id, skipping" % obj)
# return
#
# # Handle dependancies
# gro = getattr(obj, "get_related_objects_for_indexation", None)
# if index_related and gro:
# related = gro()
# nbrelated = len(related)
# for item in related:
# schedule_reindex(item)
# else:
# nbrelated = 0
#
# log.info("%s : %d dependancies found" % (message, nbrelated))
#
# table_name = typemap.typemap.get_table_for(classname)
# if not table_name:
# log.info("%s: no table found, skipping" % message)
# return
#
# query = "DELETE FROM %s WHERE id=%%s AND classname=%%s" % table_name
# cursor.execute(query, (objid, classname))
#
# if noindex:
# log.info("%s : running in 'noindex' mode, only deleteing" % message)
# return
#
# if config.SKIP_CONDITION and config.SKIP_CONDITION(obj):
# log.info("%s : not indexing because of skip_condition" % message)
# return
#
# log.info("%s : indexing entry in table %s" % (message, table_name))
#
# keys, placeholders, results = get_values(obj, config.FIELDS)
#
# query = "INSERT INTO %s (%s) VALUES (%s)" % (table_name,
# ",".join(keys),
# ",".join(placeholders))
# cursor.execute(query, results)
. Output only the next line. | obj = SeSQLResultSet.load(apps) |
Based on the snippet: <|code_start|># (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This command will index or reindex a single object into SeSQL
Can be used as a test, or to fix a single problem
"""
class Command(BaseCommand):
help = "Index a single object into SeSQL"
@transaction.commit_manually
def handle(self, *apps, **options):
"""
Handle the command
"""
if len(apps) != 2:
print "Syntax : sesqlindex <classname> <objid>"
sys.exit(1)
obj = SeSQLResultSet.load(apps)
try:
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.db import connection, transaction
from django.db.models import Q
from sesql.results import SeSQLResultSet
from sesql.index import index
import settings
import sys
and context (classes, functions, sometimes code) from other files:
# Path: sesql/results.py
# class SeSQLResultSet(object):
# """
# A lazy SeSQL result set
# It mimicks a bit the Django QuerySet, but doesn't work the same way,
# and doesn't provide exactly the same methods
# """
# def __init__(self, objs, fields):
# """
# Constructor
# Objs must be a list of (class, id) with optionally extra fields
# """
# self.objs = objs
# self.fields = fields
#
# def brains(self):
# """
# Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
# """
# for obj in self.objs:
# yield dict(zip(self.fields, obj))
#
# def count(self):
# """
# Count results
# """
# return len(self.objs)
# __len__ = count
#
# def iterator(self):
# """
# Iterate on self
# """
# for obj in self.objs:
# try:
# yield self.load(obj)
# except config.orm.not_found:
# log.warning("Object %r does not exist ! Broken index ?" % (obj,))
# __iter__ = iterator
#
# def all(self):
# """
# Get all the results as a list
# """
# return list(self)
#
# def get(self, index):
# """
# Get the row at given index
# """
# return self.load(self.objs[index])
# __getitem__ = get
#
# def __getslice__(self, i, j):
# """
# Get a slice
# """
# res = [ self.load(obj) for obj in self.objs[i:j] ]
# return res
#
# @staticmethod
# def load(obj):
# """
# Get a given object
# """
# objclass, objid = obj[:2]
# objclass = typemap.get_class_by_name(objclass)
# if not objclass:
# return config.orm.not_found
# entry = "%s:%s" % (objclass.__name__, objid)
# log.debug("Fetching %s" % entry)
# return config.orm.load_object(objclass, objid)
#
# def historize(self, query):
# """save in the database the query for future processing"""
# nb_results = self.count()
# query_text = query.get_fulltext_query()[2][0]
# config.orm.historize(query=query_text, nb_results=nb_results)
#
# Path: sesql/index.py
# @index_log_wrap
# @config.orm.transactional
# def index(cursor, obj, message, noindex = False, index_related = True):
# """
# Index a Django object into SeSQL, do the real work
# """
# log.info("%s : entering" % message)
# try:
# classname, objid = get_sesql_id(obj)
# except (TypeError, AttributeError, ValueError):
# log.info("%r: can't get classname/id, skipping" % obj)
# return
#
# # Handle dependancies
# gro = getattr(obj, "get_related_objects_for_indexation", None)
# if index_related and gro:
# related = gro()
# nbrelated = len(related)
# for item in related:
# schedule_reindex(item)
# else:
# nbrelated = 0
#
# log.info("%s : %d dependancies found" % (message, nbrelated))
#
# table_name = typemap.typemap.get_table_for(classname)
# if not table_name:
# log.info("%s: no table found, skipping" % message)
# return
#
# query = "DELETE FROM %s WHERE id=%%s AND classname=%%s" % table_name
# cursor.execute(query, (objid, classname))
#
# if noindex:
# log.info("%s : running in 'noindex' mode, only deleteing" % message)
# return
#
# if config.SKIP_CONDITION and config.SKIP_CONDITION(obj):
# log.info("%s : not indexing because of skip_condition" % message)
# return
#
# log.info("%s : indexing entry in table %s" % (message, table_name))
#
# keys, placeholders, results = get_values(obj, config.FIELDS)
#
# query = "INSERT INTO %s (%s) VALUES (%s)" % (table_name,
# ",".join(keys),
# ",".join(placeholders))
# cursor.execute(query, results)
. Output only the next line. | index(obj) |
Predict the next line after this snippet: <|code_start|>
log = logging.getLogger('sesql')
def cached(method):
"""
Decorator to make a method without argument to store result
in the object itself
"""
cache_name = "_cache_" + method.__name__
def cached_inner(self, *args, **kwargs):
if args or kwargs:
return method(self, *args, **kwargs)
if not hasattr(self, cache_name):
value = method(self)
setattr(self, cache_name, value)
else:
value = getattr(self, cache_name)
return value
return cached_inner
class SeSQLQuery(object):
"""
SeSQL Query handler
"""
def __init__(self, query, order, fields = ()):
"""
Constructor
"""
self.query = query
<|code_end|>
using the current file's imports:
import logging
from sesql import config
from sesql.typemap import typemap
from sesql.fieldmap import fieldmap
from sesql.results import SeSQLResultSet
and any relevant context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
#
# Path: sesql/results.py
# class SeSQLResultSet(object):
# """
# A lazy SeSQL result set
# It mimicks a bit the Django QuerySet, but doesn't work the same way,
# and doesn't provide exactly the same methods
# """
# def __init__(self, objs, fields):
# """
# Constructor
# Objs must be a list of (class, id) with optionally extra fields
# """
# self.objs = objs
# self.fields = fields
#
# def brains(self):
# """
# Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
# """
# for obj in self.objs:
# yield dict(zip(self.fields, obj))
#
# def count(self):
# """
# Count results
# """
# return len(self.objs)
# __len__ = count
#
# def iterator(self):
# """
# Iterate on self
# """
# for obj in self.objs:
# try:
# yield self.load(obj)
# except config.orm.not_found:
# log.warning("Object %r does not exist ! Broken index ?" % (obj,))
# __iter__ = iterator
#
# def all(self):
# """
# Get all the results as a list
# """
# return list(self)
#
# def get(self, index):
# """
# Get the row at given index
# """
# return self.load(self.objs[index])
# __getitem__ = get
#
# def __getslice__(self, i, j):
# """
# Get a slice
# """
# res = [ self.load(obj) for obj in self.objs[i:j] ]
# return res
#
# @staticmethod
# def load(obj):
# """
# Get a given object
# """
# objclass, objid = obj[:2]
# objclass = typemap.get_class_by_name(objclass)
# if not objclass:
# return config.orm.not_found
# entry = "%s:%s" % (objclass.__name__, objid)
# log.debug("Fetching %s" % entry)
# return config.orm.load_object(objclass, objid)
#
# def historize(self, query):
# """save in the database the query for future processing"""
# nb_results = self.count()
# query_text = query.get_fulltext_query()[2][0]
# config.orm.historize(query=query_text, nb_results=nb_results)
. Output only the next line. | order = order or config.DEFAULT_ORDER |
Given the following code snippet before the placeholder: <|code_start|> def shortquery(self, limit = 50):
"""
Perform a long query and return a lazy Django result set
"""
table = self.get_table_name()
if table == config.MASTER_TABLE_NAME:
# Multitable or unprecise query ? Falling back to longquery
log.warning("Query on master table will not be optimized on %s" % self.query)
return self.longquery(limit)
if "sesql_relevance" in self.order or "-sesql_relevance" in self.order:
# Order on relevance ? Falling back to longquery
log.info("Query sorting on relevance will not be optimized on %s" % self.query)
return self.longquery(limit)
log.debug("Trying short query for %s" % self.query)
cursor = self._do_smart_query(limit)
return SeSQLResultSet(list(cursor), self.fields)
@cached
def get_table_name(self):
"""
Get the name of table to use for the query
For now, if we are accross more than one, use the master table
"""
classes = self.get_classes()
tables = set()
for k in classes:
<|code_end|>
, predict the next line using imports from the current file:
import logging
from sesql import config
from sesql.typemap import typemap
from sesql.fieldmap import fieldmap
from sesql.results import SeSQLResultSet
and context including class names, function names, and sometimes code from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
#
# Path: sesql/results.py
# class SeSQLResultSet(object):
# """
# A lazy SeSQL result set
# It mimicks a bit the Django QuerySet, but doesn't work the same way,
# and doesn't provide exactly the same methods
# """
# def __init__(self, objs, fields):
# """
# Constructor
# Objs must be a list of (class, id) with optionally extra fields
# """
# self.objs = objs
# self.fields = fields
#
# def brains(self):
# """
# Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
# """
# for obj in self.objs:
# yield dict(zip(self.fields, obj))
#
# def count(self):
# """
# Count results
# """
# return len(self.objs)
# __len__ = count
#
# def iterator(self):
# """
# Iterate on self
# """
# for obj in self.objs:
# try:
# yield self.load(obj)
# except config.orm.not_found:
# log.warning("Object %r does not exist ! Broken index ?" % (obj,))
# __iter__ = iterator
#
# def all(self):
# """
# Get all the results as a list
# """
# return list(self)
#
# def get(self, index):
# """
# Get the row at given index
# """
# return self.load(self.objs[index])
# __getitem__ = get
#
# def __getslice__(self, i, j):
# """
# Get a slice
# """
# res = [ self.load(obj) for obj in self.objs[i:j] ]
# return res
#
# @staticmethod
# def load(obj):
# """
# Get a given object
# """
# objclass, objid = obj[:2]
# objclass = typemap.get_class_by_name(objclass)
# if not objclass:
# return config.orm.not_found
# entry = "%s:%s" % (objclass.__name__, objid)
# log.debug("Fetching %s" % entry)
# return config.orm.load_object(objclass, objid)
#
# def historize(self, query):
# """save in the database the query for future processing"""
# nb_results = self.count()
# query_text = query.get_fulltext_query()[2][0]
# config.orm.historize(query=query_text, nb_results=nb_results)
. Output only the next line. | tables.add(typemap.get_table_for(k)) |
Next line prediction: <|code_start|> return SeSQLResultSet(list(query), self.fields)
def _do_longquery(self, limit = None):
"""
Perform a long query and return a cursor
"""
table = self.get_table_name()
pattern, values = self.get_pattern()
o_pattern, o_values = self.get_order()
query = """SELECT %s
FROM %s
WHERE %s
ORDER BY %s""" % (', '.join(self.fields), table, pattern, o_pattern)
if limit:
query += """
LIMIT %d""" % limit
return self.execute(query, values + o_values)
@cached
def _get_smart_query(self):
"""
Get the template for performing smart queries
"""
table = self.get_table_name()
pattern, values = self.get_pattern()
o_pattern, o_values = self.get_order()
classes = self.get_classes()
<|code_end|>
. Use current file imports:
(import logging
from sesql import config
from sesql.typemap import typemap
from sesql.fieldmap import fieldmap
from sesql.results import SeSQLResultSet)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
#
# Path: sesql/results.py
# class SeSQLResultSet(object):
# """
# A lazy SeSQL result set
# It mimicks a bit the Django QuerySet, but doesn't work the same way,
# and doesn't provide exactly the same methods
# """
# def __init__(self, objs, fields):
# """
# Constructor
# Objs must be a list of (class, id) with optionally extra fields
# """
# self.objs = objs
# self.fields = fields
#
# def brains(self):
# """
# Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
# """
# for obj in self.objs:
# yield dict(zip(self.fields, obj))
#
# def count(self):
# """
# Count results
# """
# return len(self.objs)
# __len__ = count
#
# def iterator(self):
# """
# Iterate on self
# """
# for obj in self.objs:
# try:
# yield self.load(obj)
# except config.orm.not_found:
# log.warning("Object %r does not exist ! Broken index ?" % (obj,))
# __iter__ = iterator
#
# def all(self):
# """
# Get all the results as a list
# """
# return list(self)
#
# def get(self, index):
# """
# Get the row at given index
# """
# return self.load(self.objs[index])
# __getitem__ = get
#
# def __getslice__(self, i, j):
# """
# Get a slice
# """
# res = [ self.load(obj) for obj in self.objs[i:j] ]
# return res
#
# @staticmethod
# def load(obj):
# """
# Get a given object
# """
# objclass, objid = obj[:2]
# objclass = typemap.get_class_by_name(objclass)
# if not objclass:
# return config.orm.not_found
# entry = "%s:%s" % (objclass.__name__, objid)
# log.debug("Fetching %s" % entry)
# return config.orm.load_object(objclass, objid)
#
# def historize(self, query):
# """save in the database the query for future processing"""
# nb_results = self.count()
# query_text = query.get_fulltext_query()[2][0]
# config.orm.historize(query=query_text, nb_results=nb_results)
. Output only the next line. | l_pattern, l_values = fieldmap.get_field("classname").get_in(classes) |
Next line prediction: <|code_start|> """
SeSQL Query handler
"""
def __init__(self, query, order, fields = ()):
"""
Constructor
"""
self.query = query
order = order or config.DEFAULT_ORDER
if isinstance(order, (str, unicode)):
order = order.split(',')
self.order = order
self.fields = ('classname', 'id') + tuple(fields)
def execute(self, query, values):
"""
Execute and log query
"""
cursor = config.orm.cursor()
log.debug("Query %r with values %r" % (query, values))
cursor.execute(query, values)
return cursor
def longquery(self, limit = None):
"""
Perform a long query and return a lazy Django result set
"""
query = self._do_longquery(limit)
if limit:
query = query.fetchmany(limit)
<|code_end|>
. Use current file imports:
(import logging
from sesql import config
from sesql.typemap import typemap
from sesql.fieldmap import fieldmap
from sesql.results import SeSQLResultSet)
and context including class names, function names, or small code snippets from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
#
# Path: sesql/results.py
# class SeSQLResultSet(object):
# """
# A lazy SeSQL result set
# It mimicks a bit the Django QuerySet, but doesn't work the same way,
# and doesn't provide exactly the same methods
# """
# def __init__(self, objs, fields):
# """
# Constructor
# Objs must be a list of (class, id) with optionally extra fields
# """
# self.objs = objs
# self.fields = fields
#
# def brains(self):
# """
# Get the raw objects from SeSQL index, aka the "brains", as dictionnaries
# """
# for obj in self.objs:
# yield dict(zip(self.fields, obj))
#
# def count(self):
# """
# Count results
# """
# return len(self.objs)
# __len__ = count
#
# def iterator(self):
# """
# Iterate on self
# """
# for obj in self.objs:
# try:
# yield self.load(obj)
# except config.orm.not_found:
# log.warning("Object %r does not exist ! Broken index ?" % (obj,))
# __iter__ = iterator
#
# def all(self):
# """
# Get all the results as a list
# """
# return list(self)
#
# def get(self, index):
# """
# Get the row at given index
# """
# return self.load(self.objs[index])
# __getitem__ = get
#
# def __getslice__(self, i, j):
# """
# Get a slice
# """
# res = [ self.load(obj) for obj in self.objs[i:j] ]
# return res
#
# @staticmethod
# def load(obj):
# """
# Get a given object
# """
# objclass, objid = obj[:2]
# objclass = typemap.get_class_by_name(objclass)
# if not objclass:
# return config.orm.not_found
# entry = "%s:%s" % (objclass.__name__, objid)
# log.debug("Fetching %s" % entry)
# return config.orm.load_object(objclass, objid)
#
# def historize(self, query):
# """save in the database the query for future processing"""
# nb_results = self.count()
# query_text = query.get_fulltext_query()[2][0]
# config.orm.historize(query=query_text, nb_results=nb_results)
. Output only the next line. | return SeSQLResultSet(list(query), self.fields) |
Given the code snippet: <|code_start|>
class Command(BaseCommand):
help = "Fixate a date field by copying empty values from another"
option_list = BaseCommand.option_list + (
make_option('-s', '--step',
dest='step',
default=1000,
type='int',
help='Size of a step (default: 1000 items)'),
make_option('-d', '--delay',
dest='delay',
type='float',
default=0.5,
help='Delay between two steps (default: 0.1 s) '),
make_option('--source',
dest='source',
default=None,
help='Name of the source field'),
make_option('--target',
dest='target',
default=None,
help='Name of the target field'),
)
@transaction.commit_on_success
def iteration(self, table, idmin, idmax):
"""
Perform one iteration : reindex everything
"""
<|code_end|>
, generate the next line using the imports in this file:
import sys
import time
from optparse import make_option
from django.db import transaction
from django.core.management.base import BaseCommand
from sesql import config
from sesql import typemap
from sesql.utils import print_eta
and context (functions, classes, or occasionally code) from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/utils.py
# def print_eta(percent, timedelta):
# """
# Print an ETA line, given we did percent of total in timedelta
# """
# if percent == 100.0 or percent == 0.0:
# eta = 0
# else:
# eta = timedelta / percent * (100.0 - percent)
#
# print "%05.2f %% done in %s; ETA : %s" % (percent,
# format_time(timedelta),
# format_time(eta))
. Output only the next line. | cursor = config.orm.cursor() |
Given the following code snippet before the placeholder: <|code_start|> start_time = time.time()
idmin, idmax = cursor.fetchone()
if idmin and idmax:
print "Processing table %s from id %d to %d" % (table, idmin, idmax)
start = idmin
while start <= idmax:
end = min(start + self.options['step'], idmax) + 1
self.iteration(table, start, end)
start = end
time.sleep(self.options['delay'])
timedelta = time.time() - start_time
percent = float(start - idmin - 1) / float(idmax - idmin) * 100.0
print_eta(percent, timedelta)
else:
print "Table %s is good, nothing to do" % table
def handle(self, **options):
"""
Really handle the command
"""
self.options = options
# Ensure we have a okish configuration file
if not self.options['source'] or not self.options['target']:
print "--source and --target are mandatory"
sys.exit(1)
self.source = self.options['source']
self.target = self.options['target']
<|code_end|>
, predict the next line using imports from the current file:
import sys
import time
from optparse import make_option
from django.db import transaction
from django.core.management.base import BaseCommand
from sesql import config
from sesql import typemap
from sesql.utils import print_eta
and context including class names, function names, and sometimes code from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/utils.py
# def print_eta(percent, timedelta):
# """
# Print an ETA line, given we did percent of total in timedelta
# """
# if percent == 100.0 or percent == 0.0:
# eta = 0
# else:
# eta = timedelta / percent * (100.0 - percent)
#
# print "%05.2f %% done in %s; ETA : %s" % (percent,
# format_time(timedelta),
# format_time(eta))
. Output only the next line. | for table in typemap.typemap.tables.keys(): |
Given the code snippet: <|code_start|> cursor = config.orm.cursor()
query = '''
UPDATE %s
SET %s = %s
WHERE %s IS NULL
AND (id >= %d) AND (id <= %d)
''' % (table, self.target, self.source, self.target, idmin, idmax)
cursor.execute(query)
def process_table(self, table):
"""
Process on given table
"""
cursor = config.orm.cursor()
query = 'SELECT min(id), max(id) FROM %s WHERE %s IS NULL' % (table,
self.target)
cursor.execute(query)
start_time = time.time()
idmin, idmax = cursor.fetchone()
if idmin and idmax:
print "Processing table %s from id %d to %d" % (table, idmin, idmax)
start = idmin
while start <= idmax:
end = min(start + self.options['step'], idmax) + 1
self.iteration(table, start, end)
start = end
time.sleep(self.options['delay'])
timedelta = time.time() - start_time
percent = float(start - idmin - 1) / float(idmax - idmin) * 100.0
<|code_end|>
, generate the next line using the imports in this file:
import sys
import time
from optparse import make_option
from django.db import transaction
from django.core.management.base import BaseCommand
from sesql import config
from sesql import typemap
from sesql.utils import print_eta
and context (functions, classes, or occasionally code) from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/utils.py
# def print_eta(percent, timedelta):
# """
# Print an ETA line, given we did percent of total in timedelta
# """
# if percent == 100.0 or percent == 0.0:
# eta = 0
# else:
# eta = timedelta / percent * (100.0 - percent)
#
# print "%05.2f %% done in %s; ETA : %s" % (percent,
# format_time(timedelta),
# format_time(eta))
. Output only the next line. | print_eta(percent, timedelta) |
Given snippet: <|code_start|># Copyright (c) Pilot Systems and Libération, 2010-2011
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
log = logging.getLogger('sesql')
def index_log_wrap(function):
"""
Log wrap the method, giving it a name and logging its time
"""
def inner(obj, *args, **kwargs):
try:
classname, objid = get_sesql_id(obj)
message = "%s (%s:%s)" % (function.__name__, classname, objid)
except (TypeError, AttributeError, ValueError):
message = "%s (invalid object %r)" % (function.__name__, obj)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from sesql import utils
from sesql import config
from sesql import typemap
from sesql import fieldmap
and context:
# Path: sesql/utils.py
# class Timer(object):
# def __init__(self):
# def start(self):
# def stop(self):
# def __enter__(self):
# def __exit__(self, *args, **kwargs):
# def get_local(self):
# def get_global(self):
# def reset(self):
# def peek(self):
# def safe_str(what):
# def log_time(function, message = None):
# def log_time_inner(*args, **kwargs):
# def strip_ligatures(value):
# def format_time(timedelta, keep = 3):
# def print_eta(percent, timedelta):
#
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
which might include code, classes, or functions. Output only the next line. | return utils.log_time(function, message)(obj, message, *args, **kwargs) |
Continue the code snippet: <|code_start|> inner.__name__ = function.__name__
return inner
def get_values(obj, fields):
"""
Get SQL keys, placeholders and results for this object and those fields
"""
keys = [ ]
placeholders = [ ]
results = [ ]
for field in fields:
keys.extend(field.index_columns)
placeholders.extend(field.index_placeholders)
results.extend(field.get_values(obj))
return keys, placeholders, results
def get_sesql_id(obj):
"""
Get classname and id, the SeSQL identifiers
"""
if isinstance(obj, (tuple, list)) and len(obj) == 2:
return tuple(obj)
def get_val(field):
vals = fieldmap.fieldmap[field].get_values(obj)
return vals and vals[0] or None
return (get_val('classname'), get_val('id'))
<|code_end|>
. Use current file imports:
import logging
from sesql import utils
from sesql import config
from sesql import typemap
from sesql import fieldmap
and context (classes, functions, or code) from other files:
# Path: sesql/utils.py
# class Timer(object):
# def __init__(self):
# def start(self):
# def stop(self):
# def __enter__(self):
# def __exit__(self, *args, **kwargs):
# def get_local(self):
# def get_global(self):
# def reset(self):
# def peek(self):
# def safe_str(what):
# def log_time(function, message = None):
# def log_time_inner(*args, **kwargs):
# def strip_ligatures(value):
# def format_time(timedelta, keep = 3):
# def print_eta(percent, timedelta):
#
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
. Output only the next line. | @config.orm.transactional |
Based on the snippet: <|code_start|> results = [ ]
for field in fields:
keys.extend(field.index_columns)
placeholders.extend(field.index_placeholders)
results.extend(field.get_values(obj))
return keys, placeholders, results
def get_sesql_id(obj):
"""
Get classname and id, the SeSQL identifiers
"""
if isinstance(obj, (tuple, list)) and len(obj) == 2:
return tuple(obj)
def get_val(field):
vals = fieldmap.fieldmap[field].get_values(obj)
return vals and vals[0] or None
return (get_val('classname'), get_val('id'))
@config.orm.transactional
def schedule_reindex(cursor, item):
try:
item = get_sesql_id(item)
except (TypeError, AttributeError, ValueError):
log.info("%r: can't get classname/id, skipping" % item)
return
classname, objid = item
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
from sesql import utils
from sesql import config
from sesql import typemap
from sesql import fieldmap
and context (classes, functions, sometimes code) from other files:
# Path: sesql/utils.py
# class Timer(object):
# def __init__(self):
# def start(self):
# def stop(self):
# def __enter__(self):
# def __exit__(self, *args, **kwargs):
# def get_local(self):
# def get_global(self):
# def reset(self):
# def peek(self):
# def safe_str(what):
# def log_time(function, message = None):
# def log_time_inner(*args, **kwargs):
# def strip_ligatures(value):
# def format_time(timedelta, keep = 3):
# def print_eta(percent, timedelta):
#
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
. Output only the next line. | table_name = typemap.typemap.get_table_for(classname) |
Given the following code snippet before the placeholder: <|code_start|> message = "%s (%s:%s)" % (function.__name__, classname, objid)
except (TypeError, AttributeError, ValueError):
message = "%s (invalid object %r)" % (function.__name__, obj)
return utils.log_time(function, message)(obj, message, *args, **kwargs)
inner.__name__ = function.__name__
return inner
def get_values(obj, fields):
"""
Get SQL keys, placeholders and results for this object and those fields
"""
keys = [ ]
placeholders = [ ]
results = [ ]
for field in fields:
keys.extend(field.index_columns)
placeholders.extend(field.index_placeholders)
results.extend(field.get_values(obj))
return keys, placeholders, results
def get_sesql_id(obj):
"""
Get classname and id, the SeSQL identifiers
"""
if isinstance(obj, (tuple, list)) and len(obj) == 2:
return tuple(obj)
def get_val(field):
<|code_end|>
, predict the next line using imports from the current file:
import logging
from sesql import utils
from sesql import config
from sesql import typemap
from sesql import fieldmap
and context including class names, function names, and sometimes code from other files:
# Path: sesql/utils.py
# class Timer(object):
# def __init__(self):
# def start(self):
# def stop(self):
# def __enter__(self):
# def __exit__(self, *args, **kwargs):
# def get_local(self):
# def get_global(self):
# def reset(self):
# def peek(self):
# def safe_str(what):
# def log_time(function, message = None):
# def log_time_inner(*args, **kwargs):
# def strip_ligatures(value):
# def format_time(timedelta, keep = 3):
# def print_eta(percent, timedelta):
#
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
#
# Path: sesql/typemap.py
# class TypeMap(object):
# def __init__(self):
# def all_subclasses(klass, done = None):
# def all_tables(self):
# def all_classes(self):
# def all_class_names(self):
# def get_class_names_for(self, table):
# def get_classes_for(self, table):
# def get_table_for(self, klass):
# def get_class_by_name(self, klass):
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
. Output only the next line. | vals = fieldmap.fieldmap[field].get_values(obj) |
Using the snippet: <|code_start|># (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
def highlight(text, words, index = None):
"""
Give the position of words in a text, cleaning everything as sesql does
That can be used to highlight the words, for example
The index will be use to lemmatize, if none, it'll use the default one
"""
if not text:
return []
if index is None:
index = fieldmap.primary
if index is None:
raise ValueError, "Not index given and no primary one"
size = len(text)
letters = set(string.ascii_letters)
# Lemmatize the words
<|code_end|>
, determine the next line of code. You have imports:
from sesql.lemmatize import lemmatize
from sesql.fieldmap import fieldmap
import string
and context (class names, function names, or code) available:
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
. Output only the next line. | lems = lemmatize(words, index) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright (c) Pilot Systems and Libération, 2010-2011
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
def highlight(text, words, index = None):
"""
Give the position of words in a text, cleaning everything as sesql does
That can be used to highlight the words, for example
The index will be use to lemmatize, if none, it'll use the default one
"""
if not text:
return []
if index is None:
<|code_end|>
. Write the next line using the current file imports:
from sesql.lemmatize import lemmatize
from sesql.fieldmap import fieldmap
import string
and context from other files:
# Path: sesql/lemmatize.py
# def lemmatize(words, index = None):
# """
# Give a lemmatized version of those words
#
# Use the configuration for the given index, or the default one if
# index is None
# """
# if index is None:
# index = fieldmap.primary
#
# if index is None:
# raise ValueError, "Not index given and no primary one"
#
# words = [ index.marshall(word) for word in words ]
#
# index = fieldmap.get_field(index)
# return lemmatize_for(words, index.dictionnary)
#
# Path: sesql/fieldmap.py
# class FieldMap(object):
# def __init__(self):
# def all_fields(self):
# def get_field(self, field):
# def get_primary(self):
, which may include functions, classes, or code. Output only the next line. | index = fieldmap.primary |
Predict the next line for this snippet: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
Handle the type map, that is, the map between Django models and SQL tables
"""
class TypeMap(object):
"""
Handle the classes <=> table mapping
"""
def __init__(self):
"""
Constructor
"""
self.tables = {}
self.classes = {}
self.class_names = {}
self.valid_classes = []
self.valid_class_names = []
<|code_end|>
with the help of current file imports:
from sesql import config
and context from other files:
# Path: sesql/config.py
# CONFIG_PATH_FALLBACK = 'sesql_config'
# CONFIG_PATH = getattr(settings, 'SESQL_CONFIG_PATH', CONFIG_PATH_FALLBACK)
# CONFIG_PATH = CONFIG_PATH_FALLBACK
, which may contain function names, class names, or code. Output only the next line. | type_map = [ len(t) == 3 and t or t + (True,) for t in config.TYPE_MAP ] |
Predict the next line for this snippet: <|code_start|>
class current_question(service_func):
def __init__(self):
service_func.__init__(self, "/question/current")
self.name = "Get the current question"
self.description = "Return the current question or nothing if there is ongoing question"
<|code_end|>
with the help of current file imports:
from ..service_func import service_func, meta_arg, func_error
and context from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
, which may contain function names, class names, or code. Output only the next line. | self.args.append(meta_arg("key", "Protection key", "none")) |
Predict the next line for this snippet: <|code_start|>#this will answer the current answer in game_data.current_question
class answer_question(service_func):
def __init__(self):
service_func.__init__(self, "/question/answer")
self.name = "Answer question"
self.description = "Answer the current question"
<|code_end|>
with the help of current file imports:
from ..service_func import service_func, meta_arg, func_error
and context from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
, which may contain function names, class names, or code. Output only the next line. | self.args.append(meta_arg("key", "Protection key", "none")) |
Based on the snippet: <|code_start|>
class answer_question(service_func):
def __init__(self):
service_func.__init__(self, "/question/answer")
self.name = "Answer question"
self.description = "Answer the current question"
self.args.append(meta_arg("key", "Protection key", "none"))
self.args.append(meta_arg("valid", "Is the answer is good or not (true or false)", "none"))
self.args.append(meta_arg("next_team", "If the answer is invalid, next team to got a chance to answer, if it -1, the question is removed from current_question", "none"))
def execute(self, args, server):
key = args["key"]
valid = bool(args["valid"])
if server.key == key:
if server.game_data.current_question is not None:
if valid:
server.game_data.valid_answer()
else:
next_team = int(args["next_team"])
if next_team == -1:
# question is ended, nobody gets the points
server.game_data.current_question = None
else:
# this team got a chance to answer this question
server.game_data.current_question.team = next_team
else:
<|code_end|>
, predict the immediate next line with the help of imports:
from ..service_func import service_func, meta_arg, func_error
and context (classes, functions, sometimes code) from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | raise func_error("No question waiting for an answer") |
Given the following code snippet before the placeholder: <|code_start|>
class add_point_to_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/addpoint")
self.name = "Add point to team"
self.description = "Add point to a team corresponding by it team id"
self.args.append(meta_arg("key", "Protection key", "none"))
self.args.append(meta_arg("team", "Team Id", "none"))
self.args.append(meta_arg("point", "Point to add", "none"))
def execute(self, args, server):
key = args["key"]
point = int(args["point"])
team = int(args["team"])
if server.key == key:
t = server.game_data.get_team(team)
if t is not None:
t.points += point
else:
<|code_end|>
, predict the next line using imports from the current file:
from ..service_func import service_func, func_error, meta_arg
and context including class names, function names, and sometimes code from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | raise func_error("Invalid key") |
Predict the next line after this snippet: <|code_start|>
class add_point_to_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/addpoint")
self.name = "Add point to team"
self.description = "Add point to a team corresponding by it team id"
<|code_end|>
using the current file's imports:
from ..service_func import service_func, func_error, meta_arg
and any relevant context from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | self.args.append(meta_arg("key", "Protection key", "none")) |
Using the snippet: <|code_start|>
def init(self):
self.question = None
self.points = 0
def execute(self, args, server):
key = args["key"]
category = int(args["category"])
rank = int(args["rank"])
team = int(args["team"])
if server.key == key:
cat = server.game_data.get_category(category)
if cat is not None:
if cat.ranks_available[rank]:
if server.game_data.current_question is None:
pool = []
for q in cat.questions:
if q.rank == rank and not q.asked:
pool.append(q)
if len(pool) > 0:
q_i = random.randint(0, len(pool)-1)
self.question = pool[q_i]
self.question.asked = True
self.points = server.game_data.points_table.points[self.question.rank]
server.game_data.ask_question(self.question, team)
cat.ranks_available[rank] = False
else:
<|code_end|>
, determine the next line of code. You have imports:
from ..service_func import service_func, func_error, meta_arg
import random
and context (class names, function names, or code) available:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | raise func_error("No more question in this category with this rank") |
Here is a snippet: <|code_start|>
class ask_question(service_func):
def __init__(self):
service_func.__init__(self, '/question/ask')
self.name = "Ask Question"
self.description = "Ask a question to a team, with a category id and a rank"
self.question = None
self.points = 0
<|code_end|>
. Write the next line using the current file imports:
from ..service_func import service_func, func_error, meta_arg
import random
and context from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
, which may include functions, classes, or code. Output only the next line. | self.args.append(meta_arg("key", "Protection Key", "none")) |
Next line prediction: <|code_start|>
class remove_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/remove")
self.name = "Remove team"
self.description = "Remove a team from the Jeopardy Game"
self.args.append(meta_arg("key", "Protection key", "none"))
self.args.append(meta_arg("team", "Team id to remove", "none"))
def execute(self, args, server):
key = args["key"]
team = int(args["team"])
if server.key == key:
teams = server.game_data.teams
server.game_data.teams = []
for t in teams:
if not t.id == team:
server.game_data.teams.append(t)
else:
<|code_end|>
. Use current file imports:
(from ..service_func import service_func, func_error, meta_arg)
and context including class names, function names, or small code snippets from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | raise func_error("Key is invalid") |
Based on the snippet: <|code_start|>
class remove_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/remove")
self.name = "Remove team"
self.description = "Remove a team from the Jeopardy Game"
<|code_end|>
, predict the immediate next line with the help of imports:
from ..service_func import service_func, func_error, meta_arg
and context (classes, functions, sometimes code) from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | self.args.append(meta_arg("key", "Protection key", "none")) |
Continue the code snippet: <|code_start|>
class start_game(service_func):
def __init__(self):
service_func.__init__(self, "/game/start")
self.name = "Start Game"
self.description = "Start a new game, need correct number of teams (4)"
self.args.append(meta_arg("key", "Protection Key", "none"))
def execute(self, args, server):
key = args["key"]
# tired of doing this check, maybe implement a classic check_key into service_func
if server.key == key:
if not server.game_data.game_on:
if len(server.game_data.teams) == 4: # remove this hardcoded team count ? but jeopardy need 4 teams ?
server.game_data.start_game()
else:
<|code_end|>
. Use current file imports:
from ..service_func import service_func, func_error, meta_arg
and context (classes, functions, or code) from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | raise func_error("Only %d teams are registered, need 4" % len(server.game_data.teams)) |
Using the snippet: <|code_start|>
class start_game(service_func):
def __init__(self):
service_func.__init__(self, "/game/start")
self.name = "Start Game"
self.description = "Start a new game, need correct number of teams (4)"
<|code_end|>
, determine the next line of code. You have imports:
from ..service_func import service_func, func_error, meta_arg
and context (class names, function names, or code) available:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | self.args.append(meta_arg("key", "Protection Key", "none")) |
Given the following code snippet before the placeholder: <|code_start|>
class add_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/add")
self.name = "Add team"
self.description = "Add a new team to the jeopardy"
self.args.append(meta_arg("key", "Protection Key", "none"))
self.args.append(meta_arg("name", "New Team Name", "none"))
self.team = None
def init(self):
self.team = None
def execute(self, args, server):
key = args["key"]
name = args["name"]
if key != server.key:
<|code_end|>
, predict the next line using imports from the current file:
from ..service_func import service_func, func_error, meta_arg
and context including class names, function names, and sometimes code from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | raise func_error("Invalid key") |
Using the snippet: <|code_start|>
class add_team(service_func):
def __init__(self):
service_func.__init__(self, "/team/add")
self.name = "Add team"
self.description = "Add a new team to the jeopardy"
<|code_end|>
, determine the next line of code. You have imports:
from ..service_func import service_func, func_error, meta_arg
and context (class names, function names, or code) available:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | self.args.append(meta_arg("key", "Protection Key", "none")) |
Given the code snippet: <|code_start|>
class test_key(service_func):
def __init__(self):
service_func.__init__(self, "/key")
self.name = "Test Key"
self.description = "Test the protection key if it's valid or not"
<|code_end|>
, generate the next line using the imports in this file:
from ..service_func import service_func, func_error, meta_arg
and context (functions, classes, or occasionally code) from other files:
# Path: webservice/service_func.py
# class func_error(Exception):
# class service_func:
# class default_func(service_func):
# def __init__(self, message):
# def __str__(self):
# def __init__(self, func_path):
# def check_server_key(self, args, server):
# def init(self):
# def execute(self, args, server):
# def answer(self):
# def get_meta(self):
# def __init__(self, functions):
# def answer(self):
. Output only the next line. | self.args.append(meta_arg("key", "Protection key", "none")) |
Predict the next line after this snippet: <|code_start|>from __future__ import division
from __future__ import print_function
def test_join_overlapping():
<|code_end|>
using the current file's imports:
import numpy as np
import numpy.testing as npt
import pandas as pd
from deepcpg.data import annotations as annos
and any relevant context from other files:
# Path: deepcpg/data/annotations.py
# def read_bed(filename, sort=False, usecols=[0, 1, 2], *args, **kwargs):
# def in_which(x, ys, ye):
# def is_in(pos, start, end):
# def distance(pos, start, end):
# def join_overlapping(s, e):
# def join_overlapping_frame(d):
# def group_overlapping(s, e):
# def extend_len(start, end, min_len, min_pos=1):
# def extend_len_frame(d, min_len):
. Output only the next line. | f = annos.join_overlapping |
Given snippet: <|code_start|> filename:
Path of HDF5 file.
group:
HDF5 group to be explored.
recursive: bool
If `True`, list records recursively.
groups: bool
If `True`, only list group names but not name of datasets.
regex: str
Regex to filter listed records.
nb_key: int
Maximum number of records to be listed.
must_exist: bool
If `False`, return `None` if file or group does not exist.
Returns
-------
list
`list` with name of records in `filename`.
"""
if not group.startswith('/'):
group = '/%s' % group
h5_file = h5.File(filename, 'r')
if not must_exist and group not in h5_file:
return None
keys = _ls(h5_file[group], recursive, groups)
for i, key in enumerate(keys):
keys[i] = re.sub('^%s/' % group, '', key)
h5_file.close()
if regex:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import h5py as h5
import numpy as np
import six
from six.moves import range
from ..utils import filter_regex, to_list
from .utils import stack_dict
and context:
# Path: deepcpg/utils.py
# def filter_regex(values, regexs):
# """Filters list of `values` by list of `regexs`.
#
# Paramters
# ---------
# values: list
# list of `str` values.
# regexs: list
# list of `str` regexs.
#
# Returns
# -------
# list
# Sorted `list` of values in `values` that match any regex in `regexs`.
# """
# if not isinstance(values, list):
# values = [values]
# if not isinstance(regexs, list):
# regexs = [regexs]
# filtered = set()
# for value in values:
# for regex in regexs:
# if re.search(regex, value):
# filtered.add(value)
# return sorted(list(filtered))
#
# def to_list(value):
# """Convert `value` to a list."""
# if not isinstance(value, list) and value is not None:
# value = [value]
# return value
which might include code, classes, or functions. Output only the next line. | keys = filter_regex(keys, regex) |
Given the following code snippet before the placeholder: <|code_start|> if is_root:
group.close()
def hnames_to_names(hnames):
"""Flattens `dict` `hnames` of hierarchical names.
Converts hierarchical `dict`, e.g. hnames={'a': ['a1', 'a2'], 'b'}, to flat
list of keys for accessing HDF5 file, e.g. ['a/a1', 'a/a2', 'b']
"""
names = []
for key, value in six.iteritems(hnames):
if isinstance(value, dict):
for name in hnames_to_names(value):
names.append('%s/%s' % (key, name))
elif isinstance(value, list):
for name in value:
names.append('%s/%s' % (key, name))
elif isinstance(value, str):
names.append('%s/%s' % (key, value))
else:
names.append(key)
return names
def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
loop=False):
if isinstance(names, dict):
names = hnames_to_names(names)
else:
<|code_end|>
, predict the next line using imports from the current file:
import re
import h5py as h5
import numpy as np
import six
from six.moves import range
from ..utils import filter_regex, to_list
from .utils import stack_dict
and context including class names, function names, and sometimes code from other files:
# Path: deepcpg/utils.py
# def filter_regex(values, regexs):
# """Filters list of `values` by list of `regexs`.
#
# Paramters
# ---------
# values: list
# list of `str` values.
# regexs: list
# list of `str` regexs.
#
# Returns
# -------
# list
# Sorted `list` of values in `values` that match any regex in `regexs`.
# """
# if not isinstance(values, list):
# values = [values]
# if not isinstance(regexs, list):
# regexs = [regexs]
# filtered = set()
# for value in values:
# for regex in regexs:
# if re.search(regex, value):
# filtered.add(value)
# return sorted(list(filtered))
#
# def to_list(value):
# """Convert `value` to a list."""
# if not isinstance(value, list) and value is not None:
# value = [value]
# return value
. Output only the next line. | names = to_list(names) |
Predict the next line after this snippet: <|code_start|>
def main(self, name, opts):
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(opts)
if opts.dna_wlen and opts.dna_wlen % 2 == 0:
raise ValueError('DNA window length must be odd!')
if opts.cpg_wlen and opts.cpg_wlen % 2 == 1:
raise ValueError('CpG window length must be even!')
data = []
for filename in opts.data_files:
data_file = h5.File(filename, 'r')
data_chunk = OrderedDict()
loc = pd.DataFrame({'chromo': data_file['chromo'].value,
'pos': data_file['pos'].value},
columns=['chromo', 'pos'])
data_chunk['loc'] = loc
if opts.outputs is not None:
group = data_file['outputs']
output_names = opts.outputs
if not len(output_names):
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from deepcpg.data import hdf
import os
import sys
import argparse
import h5py as h5
import logging
import numpy.random
import pandas as pd
and any relevant context from other files:
# Path: deepcpg/data/hdf.py
# def _ls(item, recursive=False, groups=False, level=0):
# def ls(filename, group='/', recursive=False, groups=False,
# regex=None, nb_key=None, must_exist=True):
# def write_data(data, filename):
# def hnames_to_names(hnames):
# def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
# loop=False):
# def _to_dict(data):
# def read_from(reader, nb_sample=None):
# def read(data_files, names, nb_sample=None, batch_size=1024, *args, **kwargs):
. Output only the next line. | output_names = hdf.ls(filename, 'outputs', recursive=True) |
Here is a snippet: <|code_start|>from __future__ import division
from __future__ import print_function
def test_hnames_to_names():
hnames = OrderedDict.fromkeys(['a', 'b'])
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from numpy import testing as npt
from six.moves import range
from deepcpg.data import hdf
import os
import h5py as h5
import numpy as np
import six
and context from other files:
# Path: deepcpg/data/hdf.py
# def _ls(item, recursive=False, groups=False, level=0):
# def ls(filename, group='/', recursive=False, groups=False,
# regex=None, nb_key=None, must_exist=True):
# def write_data(data, filename):
# def hnames_to_names(hnames):
# def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
# loop=False):
# def _to_dict(data):
# def read_from(reader, nb_sample=None):
# def read(data_files, names, nb_sample=None, batch_size=1024, *args, **kwargs):
, which may include functions, classes, or code. Output only the next line. | names = hdf.hnames_to_names(hnames) |
Given snippet: <|code_start|> Returns
-------
List of :class:`FastaSeq` objects.
"""
list
if gzip is None:
gzip = filename.endswith('.gz')
if gzip:
lines = gz.open(filename, 'r').read().decode()
else:
lines = open(filename, 'r').read()
lines = lines.splitlines()
return parse_lines(lines)
def select_file_by_chromo(filenames, chromo):
"""Select file of chromosome `chromo`.
Parameters
----------
filenames: list
List of file names or directory with FASTA files.
chromo: str
Chromosome that is selected.
Returns
-------
str
Filename in `filenames` that contains chromosome `chromo`.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import gzip as gz
from glob import glob
from six.moves import range
from ..utils import to_list
and context:
# Path: deepcpg/utils.py
# def to_list(value):
# """Convert `value` to a list."""
# if not isinstance(value, list) and value is not None:
# value = [value]
# return value
which might include code, classes, or functions. Output only the next line. | filenames = to_list(filenames) |
Predict the next line after this snippet: <|code_start|>
# Add new validation epoch logs to logs table
for metric, metric_logs in six.iteritems(self.val_epoch_logs):
metric_val = 'val_' + metric
if metric_val in logs:
metric_logs.append(logs[metric_val])
else:
metric_logs.append(None)
self._update_means(self.val_epoch_logs, self._val_epoch_metrics)
# Show table
table = OrderedDict()
table['split'] = ['train']
# Show mean logs first
for mean_name in self._epoch_metrics:
table[mean_name] = []
# Show output logs
if self.verbose:
for mean_name, names in six.iteritems(self._epoch_metrics):
for name in names:
table[name] = []
for name, logs in six.iteritems(self.epoch_logs):
if name in table:
table[name].append(logs[-1])
if self.val_epoch_logs:
table['split'].append('val')
for name, logs in six.iteritems(self.val_epoch_logs):
if name in table:
table[name].append(logs[-1])
self._log('')
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from time import time
from keras.callbacks import Callback
from .utils import format_table
import os
import numpy as np
import six
and any relevant context from other files:
# Path: deepcpg/utils.py
# def format_table(table, colwidth=None, precision=2, header=True, sep=' | '):
# """Format a table of values as string.
#
# Formats a table represented as a `dict` with keys as column headers and
# values as a lists of values in each column.
#
# Parameters
# ----------
# table: `dict` or `OrderedDict`
# `dict` or `OrderedDict` with keys as column headers and values as lists
# of values in each column.
# precision: int or list of ints
# Precision of floating point values in each column. If `int`, uses same
# precision for all columns, otherwise formats columns with different
# precisions.
# header: bool
# If `True`, print column names.
# sep: str
# Column separator.
#
# Returns
# -------
# str
# String of formatted table values.
# """
#
# col_names = list(table.keys())
# if not isinstance(precision, list):
# precision = [precision] * len(col_names)
# col_widths = []
# tot_width = 0
# nb_row = None
# ftable = OrderedDict()
# for col_idx, col_name in enumerate(col_names):
# width = max(len(col_name), precision[col_idx] + 2)
# values = []
# for value in table[col_name]:
# if value is None:
# value = ''
# elif isinstance(value, float):
# value = '{0:.{1}f}'.format(value, precision[col_idx])
# else:
# value = str(value)
# width = max(width, len(value))
# values.append(value)
# ftable[col_name] = values
# col_widths.append(width)
# if not nb_row:
# nb_row = len(values)
# else:
# nb_row = max(nb_row, len(values))
# tot_width += width
# tot_width += len(sep) * (len(col_widths) - 1)
# rows = []
# if header:
# rows.append(format_table_row(col_names, col_widths, sep=sep))
# rows.append('-' * tot_width)
# for row in range(nb_row):
# values = []
# for col_values in six.itervalues(ftable):
# if row < len(col_values):
# values.append(col_values[row])
# else:
# values.append(None)
# rows.append(format_table_row(values, col_widths, sep=sep))
# return '\n'.join(rows)
. Output only the next line. | self._log(format_table(table, precision=self.precision)) |
Here is a snippet: <|code_start|>
class TestMake(object):
def setup_class(self):
self.data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data')
self.data_files = glob(os.path.join(self.data_path, 'c*.h5'))
names = ['chromo', 'pos',
'/inputs/dna',
'/inputs/cpg/BS27_4_SER/dist',
'/inputs/cpg/BS27_4_SER/state',
'/inputs/cpg/BS28_2_SER/dist',
'/inputs/cpg/BS28_2_SER/state',
'/inputs/annos/exons',
'/inputs/annos/CGI',
'/outputs/cpg/BS27_4_SER',
'/outputs/cpg/BS28_2_SER',
'/outputs/cpg_stats/mean',
'/outputs/cpg_stats/var',
'/outputs/cpg_stats/cat_var',
'/outputs/cpg_stats/cat2_var',
'/outputs/cpg_stats/diff',
'/outputs/cpg_stats/mode',
]
<|code_end|>
. Write the next line using the current file imports:
from glob import glob
from numpy import testing as npt
from deepcpg.data import hdf, CPG_NAN
from deepcpg.data.fasta import read_chromo
from deepcpg.data.dna import CHAR_TO_INT
import os
import numpy as np
and context from other files:
# Path: deepcpg/data/hdf.py
# def _ls(item, recursive=False, groups=False, level=0):
# def ls(filename, group='/', recursive=False, groups=False,
# regex=None, nb_key=None, must_exist=True):
# def write_data(data, filename):
# def hnames_to_names(hnames):
# def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
# loop=False):
# def _to_dict(data):
# def read_from(reader, nb_sample=None):
# def read(data_files, names, nb_sample=None, batch_size=1024, *args, **kwargs):
#
# Path: deepcpg/data/fasta.py
# def read_chromo(filenames, chromo):
# """Read DNA sequence of chromosome `chromo`.
#
# Parameters
# ----------
# filenames: list
# List of FASTA files.
# chromo: str
# Chromosome that is read.
#
# Returns
# -------
# str
# DNA sequence of chromosome `chromo`.
# """
# filename = select_file_by_chromo(filenames, chromo)
# if not filename:
# raise ValueError('DNA file for chromosome "%s" not found!' % chromo)
#
# fasta_seqs = read_file(filename)
# if len(fasta_seqs) != 1:
# raise ValueError('Single sequence expected in file "%s"!' % filename)
# return fasta_seqs[0].seq
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
, which may include functions, classes, or code. Output only the next line. | self.data = hdf.read(self.data_files, names) |
Given the code snippet: <|code_start|> assert actual[idx] == e[2]
def test_outputs(self):
expected = [('18', 3000023, 1.0),
('18', 3000086, 1.0),
('18', 3012584, 0.0),
('19', 4398070, 0.0),
('19', 4428709, 1.0),
('19', 4442494, 0.0),
('19', 4447847, 1.0)
]
self._test_outputs('cpg/BS27_4_SER', expected)
expected = [('18', 3000092, 1.0),
('18', 3010064, 0.0),
('18', 3140338, 1.0),
('18', 3143169, 0.0),
('19', 4187854, 1.0),
('19', 4190571, 0.0),
('19', 4192788, 0.0),
('19', 4202077, 0.0)
]
self._test_outputs('cpg/BS28_2_SER', expected)
def _test_dna(self, chromo):
pos = self.pos[self.chromo == chromo.encode()]
dna = self.data['/inputs/dna'][self.chromo == chromo.encode()]
dna_wlen = dna.shape[1]
center = dna_wlen // 2
<|code_end|>
, generate the next line using the imports in this file:
from glob import glob
from numpy import testing as npt
from deepcpg.data import hdf, CPG_NAN
from deepcpg.data.fasta import read_chromo
from deepcpg.data.dna import CHAR_TO_INT
import os
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: deepcpg/data/hdf.py
# def _ls(item, recursive=False, groups=False, level=0):
# def ls(filename, group='/', recursive=False, groups=False,
# regex=None, nb_key=None, must_exist=True):
# def write_data(data, filename):
# def hnames_to_names(hnames):
# def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
# loop=False):
# def _to_dict(data):
# def read_from(reader, nb_sample=None):
# def read(data_files, names, nb_sample=None, batch_size=1024, *args, **kwargs):
#
# Path: deepcpg/data/fasta.py
# def read_chromo(filenames, chromo):
# """Read DNA sequence of chromosome `chromo`.
#
# Parameters
# ----------
# filenames: list
# List of FASTA files.
# chromo: str
# Chromosome that is read.
#
# Returns
# -------
# str
# DNA sequence of chromosome `chromo`.
# """
# filename = select_file_by_chromo(filenames, chromo)
# if not filename:
# raise ValueError('DNA file for chromosome "%s" not found!' % chromo)
#
# fasta_seqs = read_file(filename)
# if len(fasta_seqs) != 1:
# raise ValueError('Single sequence expected in file "%s"!' % filename)
# return fasta_seqs[0].seq
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
. Output only the next line. | dna_seq = read_chromo(os.path.join(self.data_path, '../dna_db'), |
Predict the next line for this snippet: <|code_start|> ('19', 4447847, 1.0)
]
self._test_outputs('cpg/BS27_4_SER', expected)
expected = [('18', 3000092, 1.0),
('18', 3010064, 0.0),
('18', 3140338, 1.0),
('18', 3143169, 0.0),
('19', 4187854, 1.0),
('19', 4190571, 0.0),
('19', 4192788, 0.0),
('19', 4202077, 0.0)
]
self._test_outputs('cpg/BS28_2_SER', expected)
def _test_dna(self, chromo):
pos = self.pos[self.chromo == chromo.encode()]
dna = self.data['/inputs/dna'][self.chromo == chromo.encode()]
dna_wlen = dna.shape[1]
center = dna_wlen // 2
dna_seq = read_chromo(os.path.join(self.data_path, '../dna_db'),
chromo)
idxs = np.linspace(0, len(pos) - 1, 100).astype(np.int32)
for idx in idxs:
p = pos[idx] - 1
assert dna_seq[p:(p + 2)] == 'CG'
assert dna[idx, center] == 3
assert dna[idx, center + 1] == 2
<|code_end|>
with the help of current file imports:
from glob import glob
from numpy import testing as npt
from deepcpg.data import hdf, CPG_NAN
from deepcpg.data.fasta import read_chromo
from deepcpg.data.dna import CHAR_TO_INT
import os
import numpy as np
and context from other files:
# Path: deepcpg/data/hdf.py
# def _ls(item, recursive=False, groups=False, level=0):
# def ls(filename, group='/', recursive=False, groups=False,
# regex=None, nb_key=None, must_exist=True):
# def write_data(data, filename):
# def hnames_to_names(hnames):
# def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
# loop=False):
# def _to_dict(data):
# def read_from(reader, nb_sample=None):
# def read(data_files, names, nb_sample=None, batch_size=1024, *args, **kwargs):
#
# Path: deepcpg/data/fasta.py
# def read_chromo(filenames, chromo):
# """Read DNA sequence of chromosome `chromo`.
#
# Parameters
# ----------
# filenames: list
# List of FASTA files.
# chromo: str
# Chromosome that is read.
#
# Returns
# -------
# str
# DNA sequence of chromosome `chromo`.
# """
# filename = select_file_by_chromo(filenames, chromo)
# if not filename:
# raise ValueError('DNA file for chromosome "%s" not found!' % chromo)
#
# fasta_seqs = read_file(filename)
# if len(fasta_seqs) != 1:
# raise ValueError('Single sequence expected in file "%s"!' % filename)
# return fasta_seqs[0].seq
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
, which may contain function names, class names, or code. Output only the next line. | assert dna[idx, center + 10] == CHAR_TO_INT[dna_seq[p + 10]] |
Based on the snippet: <|code_start|>def _cat_sample_weights(y, mask=None):
return 1 - K.cast(K.equal(K.sum(y, axis=-1), 0), K.floatx())
def cat_acc(y, z):
"""Compute categorical accuracy given one-hot matrices."""
weights = _cat_sample_weights(y)
_acc = K.cast(K.equal(K.argmax(y, axis=-1),
K.argmax(z, axis=-1)),
K.floatx())
_acc = K.sum(_acc * weights) / K.sum(weights)
return _acc
def mse(y, z, mask=CPG_NAN):
"""Compute mean squared error."""
weights = _sample_weights(y, mask)
_mse = K.sum(K.square(y - z) * weights) / K.sum(weights)
return _mse
def mae(y, z, mask=CPG_NAN):
"""Compute mean absolute deviation."""
weights = _sample_weights(y, mask)
_mae = K.sum(K.abs(y - z) * weights) / K.sum(weights)
return _mae
def get(name):
"""Return object from module by its name."""
<|code_end|>
, predict the immediate next line with the help of imports:
from keras import backend as K
from .utils import get_from_module
from .data import CPG_NAN
and context (classes, functions, sometimes code) from other files:
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | return get_from_module(name, globals()) |
Given the following code snippet before the placeholder: <|code_start|>"""DNA models.
Provides models trained with DNA sequence windows.
"""
from __future__ import division
from __future__ import print_function
<|code_end|>
, predict the next line using imports from the current file:
import inspect
from keras import layers as kl
from keras import regularizers as kr
from .utils import Model
from ..utils import get_from_module
and context including class names, function names, and sometimes code from other files:
# Path: deepcpg/models/utils.py
# class Model(object):
# """Abstract model call.
#
# Abstract class of DNA, CpG, and Joint models.
#
# Parameters
# ----------
# dropout: float
# Dropout rate.
# l1_decay: float
# L1 weight decay.
# l2_decay: float
# L2 weight decay.
# init: str
# Name of Keras initialization.
# """
#
# def __init__(self, dropout=0.0, l1_decay=0.0, l2_decay=0.0,
# init='glorot_uniform'):
# self.dropout = dropout
# self.l1_decay = l1_decay
# self.l2_decay = l2_decay
# self.init = init
# self.name = self.__class__.__name__
# self.scope = None
#
# def inputs(self, *args, **kwargs):
# """Return list of Keras model inputs."""
# pass
#
# def _build(self, input, output):
# """Build final model at the end of `__call__`."""
# model = km.Model(input, output, name=self.name)
# if self.scope:
# for layer in model.layers:
# if not is_input_layer(layer):
# layer.name = '%s/%s' % (self.scope, layer.name)
# return model
#
# def __call__(self, inputs=None):
# """Build model.
#
# Parameters
# ----------
# inputs: list
# Keras model inputs
# """
# pass
#
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | class DnaModel(Model): |
Predict the next line after this snippet: <|code_start|> x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2)
x = self._res_unit(x, [64, 64, 256], atrous=2, stage=2, block=2)
x = self._res_unit(x, [64, 64, 256], atrous=4, stage=2, block=3)
# 32
x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2)
x = self._res_unit(x, [128, 128, 512], atrous=2, stage=3, block=2)
x = self._res_unit(x, [128, 128, 512], atrous=4, stage=3, block=3)
# 16
x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2)
x = kl.GlobalAveragePooling1D()(x)
x = kl.Dropout(self.dropout)(x)
return self._build(inputs, x)
def list_models():
"""Return the name of models in the module."""
models = dict()
for name, value in globals().items():
if inspect.isclass(value) and name.lower().find('model') == -1:
models[name] = value
return models
def get(name):
"""Return object from module by its name."""
<|code_end|>
using the current file's imports:
import inspect
from keras import layers as kl
from keras import regularizers as kr
from .utils import Model
from ..utils import get_from_module
and any relevant context from other files:
# Path: deepcpg/models/utils.py
# class Model(object):
# """Abstract model call.
#
# Abstract class of DNA, CpG, and Joint models.
#
# Parameters
# ----------
# dropout: float
# Dropout rate.
# l1_decay: float
# L1 weight decay.
# l2_decay: float
# L2 weight decay.
# init: str
# Name of Keras initialization.
# """
#
# def __init__(self, dropout=0.0, l1_decay=0.0, l2_decay=0.0,
# init='glorot_uniform'):
# self.dropout = dropout
# self.l1_decay = l1_decay
# self.l2_decay = l2_decay
# self.init = init
# self.name = self.__class__.__name__
# self.scope = None
#
# def inputs(self, *args, **kwargs):
# """Return list of Keras model inputs."""
# pass
#
# def _build(self, input, output):
# """Build final model at the end of `__call__`."""
# model = km.Model(input, output, name=self.name)
# if self.scope:
# for layer in model.layers:
# if not is_input_layer(layer):
# layer.name = '%s/%s' % (self.scope, layer.name)
# return model
#
# def __call__(self, inputs=None):
# """Build model.
#
# Parameters
# ----------
# inputs: list
# Keras model inputs
# """
# pass
#
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | return get_from_module(name, globals()) |
Continue the code snippet: <|code_start|> p.add_argument(
'--log_file',
help='Write log messages to file')
return p
def main(self, name, opts):
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(opts)
if opts.show:
print('Available models: %s' % MODEL_ZOO)
for name in sorted(list(MODELS.keys())):
print(name)
return 0
if not opts.model_id:
raise ValueError('Model ID required!')
if opts.model_id not in MODELS:
raise ValueError('Invalid model ID "%s"!' % opts.model_id)
log.info('Downloading model ...')
model_url = "%s/%s" % (DATA_HOST, MODELS[opts.model_id])
log.info('Model URL: %s' % model_url)
<|code_end|>
. Use current file imports:
import os
import sys
import argparse
import logging
from deepcpg.utils import make_dir
and context (classes, functions, or code) from other files:
# Path: deepcpg/utils.py
# def make_dir(dirname):
# """Create directory `dirname` if non-existing.
#
# Parameters
# ----------
# dirname: str
# Path of directory to be created.
#
# Returns
# -------
# bool
# `True`, if directory did not exist and was created.
# """
# if os.path.exists(dirname):
# return False
# else:
# os.makedirs(dirname)
# return True
. Output only the next line. | make_dir(opts.out_dir) |
Next line prediction: <|code_start|>
def mean(x):
"""Mean methylation rate."""
if x.ndim > 2:
x = x.mean(axis=2)
return np.mean(x, 1)
def mode(x):
"""Mode of methylation rate."""
if x.ndim > 2:
x = x.mean(axis=2)
return x.mean(axis=1).round().astype(np.int8)
def var(x, *args, **kwargs):
"""Variance between cells."""
if x.ndim > 2:
x = x.mean(axis=2)
return x.var(axis=1)
def cat_var(x, nb_bin=3, *args, **kwargs):
"""Categorical variance between cells.
Discretizes variance from :func:`var` into `nb_bin` equally-spaced bins.
"""
v = var(x, *args, **kwargs)
<|code_end|>
. Use current file imports:
(import numpy as np
from ..utils import EPS, get_from_module)
and context including class names, function names, or small code snippets from other files:
# Path: deepcpg/utils.py
# EPS = 10e-8
#
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | bins = np.linspace(-EPS, 0.25, nb_bin + 1) |
Next line prediction: <|code_start|> cv = np.digitize(v, bins, right=True) - 1
return np.ma.masked_array(cv, v.mask)
def cat2_var(*args, **kwargs):
"""Binary variance between cells."""
cv = cat_var(*args, **kwargs)
cv[cv > 0] = 1
return cv
def entropy(x):
"""Entropy of single CpG sites between cells."""
if x.ndim > 2:
x = x.mean(axis=2)
p1 = x.mean(axis=1)
p1 = np.minimum(1 - EPS, np.maximum(EPS, p1))
p0 = 1 - p1
return -(p1 * np.log(p1) + p0 * np.log(p0))
def diff(x):
"""Test if CpG site is differentially methylated."""
if x.ndim > 2:
x = x.mean(axis=2)
return x.min(axis=1) != x.max(axis=1).astype(np.int8)
def get(name):
"""Return object from module by its name."""
<|code_end|>
. Use current file imports:
(import numpy as np
from ..utils import EPS, get_from_module)
and context including class names, function names, or small code snippets from other files:
# Path: deepcpg/utils.py
# EPS = 10e-8
#
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | return get_from_module(name, globals()) |
Predict the next line for this snippet: <|code_start|>"""Joint models.
Provides models for joining features from DNA and CpG model.
"""
from __future__ import division
from __future__ import print_function
<|code_end|>
with the help of current file imports:
import inspect
from keras import layers as kl
from keras import models as km
from keras import regularizers as kr
from keras.layers.merge import concatenate
from .utils import Model
from ..utils import get_from_module
and context from other files:
# Path: deepcpg/models/utils.py
# class Model(object):
# """Abstract model call.
#
# Abstract class of DNA, CpG, and Joint models.
#
# Parameters
# ----------
# dropout: float
# Dropout rate.
# l1_decay: float
# L1 weight decay.
# l2_decay: float
# L2 weight decay.
# init: str
# Name of Keras initialization.
# """
#
# def __init__(self, dropout=0.0, l1_decay=0.0, l2_decay=0.0,
# init='glorot_uniform'):
# self.dropout = dropout
# self.l1_decay = l1_decay
# self.l2_decay = l2_decay
# self.init = init
# self.name = self.__class__.__name__
# self.scope = None
#
# def inputs(self, *args, **kwargs):
# """Return list of Keras model inputs."""
# pass
#
# def _build(self, input, output):
# """Build final model at the end of `__call__`."""
# model = km.Model(input, output, name=self.name)
# if self.scope:
# for layer in model.layers:
# if not is_input_layer(layer):
# layer.name = '%s/%s' % (self.scope, layer.name)
# return model
#
# def __call__(self, inputs=None):
# """Build model.
#
# Parameters
# ----------
# inputs: list
# Keras model inputs
# """
# pass
#
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
, which may contain function names, class names, or code. Output only the next line. | class JointModel(Model): |
Using the snippet: <|code_start|> def __init__(self, *args, **kwargs):
super(JointL2h512, self).__init__(*args, **kwargs)
self.nb_layer = 2
class JointL3h512(JointL1h512):
"""Three fully-connected layers with 512 units.
.. code::
Parameters: 1,000,000
Specification: fc[512]_fc[512]_fc[512]
"""
def __init__(self, *args, **kwargs):
super(JointL3h512, self).__init__(*args, **kwargs)
self.nb_layer = 3
def list_models():
"""Return the name of models in the module."""
models = dict()
for name, value in globals().items():
if inspect.isclass(value) and name.lower().find('model') == -1:
models[name] = value
return models
def get(name):
"""Return object from module by its name."""
<|code_end|>
, determine the next line of code. You have imports:
import inspect
from keras import layers as kl
from keras import models as km
from keras import regularizers as kr
from keras.layers.merge import concatenate
from .utils import Model
from ..utils import get_from_module
and context (class names, function names, or code) available:
# Path: deepcpg/models/utils.py
# class Model(object):
# """Abstract model call.
#
# Abstract class of DNA, CpG, and Joint models.
#
# Parameters
# ----------
# dropout: float
# Dropout rate.
# l1_decay: float
# L1 weight decay.
# l2_decay: float
# L2 weight decay.
# init: str
# Name of Keras initialization.
# """
#
# def __init__(self, dropout=0.0, l1_decay=0.0, l2_decay=0.0,
# init='glorot_uniform'):
# self.dropout = dropout
# self.l1_decay = l1_decay
# self.l2_decay = l2_decay
# self.init = init
# self.name = self.__class__.__name__
# self.scope = None
#
# def inputs(self, *args, **kwargs):
# """Return list of Keras model inputs."""
# pass
#
# def _build(self, input, output):
# """Build final model at the end of `__call__`."""
# model = km.Model(input, output, name=self.name)
# if self.scope:
# for layer in model.layers:
# if not is_input_layer(layer):
# layer.name = '%s/%s' % (self.scope, layer.name)
# return model
#
# def __call__(self, inputs=None):
# """Build model.
#
# Parameters
# ----------
# inputs: list
# Keras model inputs
# """
# pass
#
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | return get_from_module(name, globals()) |
Given snippet: <|code_start|>
# Create output directories
make_dir(opts.out_dir)
sub_dirs = dict()
names = ['logos', 'fa']
if opts.plot_dens:
names.append('dens')
if opts.plot_heat:
names.append('heat')
if opts.motif_dbs:
names.append('tomtom')
for name in names:
dirname = pt.join(opts.out_dir, name)
sub_dirs[name] = dirname
make_dir(dirname)
meme_filename = pt.join(opts.out_dir, 'meme.txt')
meme_file = open_meme(meme_filename, seqs)
if opts.plot_pca:
tmp = min(len(filters_act), opts.nb_sample_pca)
log.info('Performing PCA on activations using %d samples' % tmp)
# Down-sample activations to at most nb_sample_pca samples to reduce
# memory usage and run-time.
pca_act = filters_act[:tmp, :, filters_idx]
act = pca_act.mean(axis=1)
tmp = self.plot_filename(opts.out_dir, 'pca_mean')
plot_pca(act, labels=filters_idx, filename=tmp)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import OrderedDict
from sklearn.decomposition import PCA
from six.moves import range
from matplotlib import pyplot as plt
from deepcpg.utils import EPS, linear_weights, make_dir
from deepcpg.data import dna
from deepcpg.motifs import read_meme_db, get_report
import sys
import os
import os.path as pt
import argparse
import h5py as h5
import logging
import numpy as np
import pandas as pd
import six
import subprocess
import matplotlib as mpl
import seaborn as sns
and context:
# Path: deepcpg/utils.py
# EPS = 10e-8
#
# def linear_weights(length, start=0.1):
# """Create linear-triangle weights.
#
# Create array `x` of length `length` with linear weights, where the weight is
# highest (one) for the center x[length//2] and lowest (`start` ) at the ends
# x[0] and x[-1].
#
# Parameters
# ----------
# length: int
# Length of the weight array.
# start: float
# Minimum weights.
#
# Returns
# -------
# :class:`np.ndarray`
# Array of length `length` with weight.
# """
# weights = np.linspace(start, 1, np.ceil(length / 2))
# tmp = weights
# if length % 2:
# tmp = tmp[:-1]
# weights = np.hstack((weights, tmp[::-1]))
# return weights
#
# def make_dir(dirname):
# """Create directory `dirname` if non-existing.
#
# Parameters
# ----------
# dirname: str
# Path of directory to be created.
#
# Returns
# -------
# bool
# `True`, if directory did not exist and was created.
# """
# if os.path.exists(dirname):
# return False
# else:
# os.makedirs(dirname)
# return True
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
# INT_TO_CHAR = {v: k for k, v in CHAR_TO_INT.items()}
# def get_alphabet(special=False, reverse=False):
# def char_to_int(seq):
# def int_to_char(seq, join=True):
# def int_to_onehot(seqs, dim=4):
# def onehot_to_int(seqs, axis=-1):
#
# Path: deepcpg/motifs.py
# def read_meme_db(meme_db_file):
# """Read MEME database as Pandas DataFrame.
#
# Parameters
# ----------
# meme_db_file: str
# File name of MEME database.
#
# Returns
# -------
# :class:`pandas.DataFrame`
# :class:`pandas.DataFrame` with columns 'id', 'protein', 'url'.
# """
# motifs = []
# motif = None
# for line in open(meme_db_file):
# if line.startswith('MOTIF'):
# if motif:
# motifs.append(motif)
# motif = None
# tmp = line.split()[1:]
# if len(tmp) < 2:
# continue
# motif = OrderedDict()
# motif['id'] = tmp[0]
# protein = re.sub(r'\(([^)]+)\)', r'\1', tmp[1])
# motif['protein'] = protein.split('_')[0]
# motif['url'] = ''
# elif motif and line.startswith('URL'):
# motif['url'] = line.split()[1]
# if motif:
# motifs.append(motif)
# for i, motif in enumerate(motifs):
# motifs[i] = pd.DataFrame(motif, index=[0])
# motifs = pd.concat(motifs)
# return motifs
#
# def get_report(filter_stats_file, tomtom_file, meme_motifs):
# """Read and join `filter_stats_file` and `tomtom_file`.
#
# Used by `dcpg_filter_motifs.py` to read and join output files.
#
# Paramters
# ---------
# filter_stats_file: str
# Path of stats file created with `dcpg_filter_motifs.py`.
# tomtom_file: str
# Path of Tomtom output file.
# meme_motifs: :class:`pandas.DataFrame`
# :class:`pandas.DataFrame` from `read_meme_db`.
#
# Returns
# -------
# :class:`pandas.DataFrame`
# :class:`pandas.DataFrame` with columns from Tomtom and statistic file.
# """
# filter_stats = pd.read_table(filter_stats_file)
# tomtom = read_tomtom(tomtom_file)
# tomtom = tomtom.sort_values(['idx', 'q-value', 'e-value'])
# tomtom = tomtom.loc[:, ~tomtom.columns.isin(['query id', 'optimal offset'])]
# d = pd.merge(filter_stats, tomtom, on='idx', how='outer')
# meme_motifs = meme_motifs.rename(columns={'id': 'target id'})
# d = pd.merge(d, meme_motifs, on='target id', how='left')
# d.index.name = None
# return d
which might include code, classes, or functions. Output only the next line. | weights = linear_weights(pca_act.shape[1]) |
Predict the next line after this snippet: <|code_start|>.. code:: bash
dcpg_filter_motifs.py
./activations.h5
--out_dir ./motifs
--motif_db ./motif_databases/CIS-BP/Mus_musculus.meme
--plot_heat
--plot_dens
--plot_pca
"""
from __future__ import print_function
from __future__ import division
mpl.use('agg')
sns.set_style('darkgrid')
WEBLOGO_OPTS = '-X NO -Y NO --errorbars NO --fineprint ""'
WEBLOGO_OPTS += ' --logo-font Arial-BoldMT'
WEBLOGO_OPTS += ' -C "#CB2026" A A'
WEBLOGO_OPTS += ' -C "#34459C" C C'
WEBLOGO_OPTS += ' -C "#FBB116" G G'
WEBLOGO_OPTS += ' -C "#0C8040" T T'
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from sklearn.decomposition import PCA
from six.moves import range
from matplotlib import pyplot as plt
from deepcpg.utils import EPS, linear_weights, make_dir
from deepcpg.data import dna
from deepcpg.motifs import read_meme_db, get_report
import sys
import os
import os.path as pt
import argparse
import h5py as h5
import logging
import numpy as np
import pandas as pd
import six
import subprocess
import matplotlib as mpl
import seaborn as sns
and any relevant context from other files:
# Path: deepcpg/utils.py
# EPS = 10e-8
#
# def linear_weights(length, start=0.1):
# """Create linear-triangle weights.
#
# Create array `x` of length `length` with linear weights, where the weight is
# highest (one) for the center x[length//2] and lowest (`start` ) at the ends
# x[0] and x[-1].
#
# Parameters
# ----------
# length: int
# Length of the weight array.
# start: float
# Minimum weights.
#
# Returns
# -------
# :class:`np.ndarray`
# Array of length `length` with weight.
# """
# weights = np.linspace(start, 1, np.ceil(length / 2))
# tmp = weights
# if length % 2:
# tmp = tmp[:-1]
# weights = np.hstack((weights, tmp[::-1]))
# return weights
#
# def make_dir(dirname):
# """Create directory `dirname` if non-existing.
#
# Parameters
# ----------
# dirname: str
# Path of directory to be created.
#
# Returns
# -------
# bool
# `True`, if directory did not exist and was created.
# """
# if os.path.exists(dirname):
# return False
# else:
# os.makedirs(dirname)
# return True
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
# INT_TO_CHAR = {v: k for k, v in CHAR_TO_INT.items()}
# def get_alphabet(special=False, reverse=False):
# def char_to_int(seq):
# def int_to_char(seq, join=True):
# def int_to_onehot(seqs, dim=4):
# def onehot_to_int(seqs, axis=-1):
#
# Path: deepcpg/motifs.py
# def read_meme_db(meme_db_file):
# """Read MEME database as Pandas DataFrame.
#
# Parameters
# ----------
# meme_db_file: str
# File name of MEME database.
#
# Returns
# -------
# :class:`pandas.DataFrame`
# :class:`pandas.DataFrame` with columns 'id', 'protein', 'url'.
# """
# motifs = []
# motif = None
# for line in open(meme_db_file):
# if line.startswith('MOTIF'):
# if motif:
# motifs.append(motif)
# motif = None
# tmp = line.split()[1:]
# if len(tmp) < 2:
# continue
# motif = OrderedDict()
# motif['id'] = tmp[0]
# protein = re.sub(r'\(([^)]+)\)', r'\1', tmp[1])
# motif['protein'] = protein.split('_')[0]
# motif['url'] = ''
# elif motif and line.startswith('URL'):
# motif['url'] = line.split()[1]
# if motif:
# motifs.append(motif)
# for i, motif in enumerate(motifs):
# motifs[i] = pd.DataFrame(motif, index=[0])
# motifs = pd.concat(motifs)
# return motifs
#
# def get_report(filter_stats_file, tomtom_file, meme_motifs):
# """Read and join `filter_stats_file` and `tomtom_file`.
#
# Used by `dcpg_filter_motifs.py` to read and join output files.
#
# Paramters
# ---------
# filter_stats_file: str
# Path of stats file created with `dcpg_filter_motifs.py`.
# tomtom_file: str
# Path of Tomtom output file.
# meme_motifs: :class:`pandas.DataFrame`
# :class:`pandas.DataFrame` from `read_meme_db`.
#
# Returns
# -------
# :class:`pandas.DataFrame`
# :class:`pandas.DataFrame` with columns from Tomtom and statistic file.
# """
# filter_stats = pd.read_table(filter_stats_file)
# tomtom = read_tomtom(tomtom_file)
# tomtom = tomtom.sort_values(['idx', 'q-value', 'e-value'])
# tomtom = tomtom.loc[:, ~tomtom.columns.isin(['query id', 'optimal offset'])]
# d = pd.merge(filter_stats, tomtom, on='idx', how='outer')
# meme_motifs = meme_motifs.rename(columns={'id': 'target id'})
# d = pd.merge(d, meme_motifs, on='target id', how='left')
# d.index.name = None
# return d
. Output only the next line. | ALPHABET = dna.get_alphabet(False) |
Next line prediction: <|code_start|>"""CpG models.
Provides models trained with observed neighboring methylation states of
multiple cells.
"""
from __future__ import division
from __future__ import print_function
<|code_end|>
. Use current file imports:
(import inspect
from keras import layers as kl
from keras import regularizers as kr
from keras import models as km
from keras.layers.merge import concatenate
from .utils import Model
from ..utils import get_from_module)
and context including class names, function names, or small code snippets from other files:
# Path: deepcpg/models/utils.py
# class Model(object):
# """Abstract model call.
#
# Abstract class of DNA, CpG, and Joint models.
#
# Parameters
# ----------
# dropout: float
# Dropout rate.
# l1_decay: float
# L1 weight decay.
# l2_decay: float
# L2 weight decay.
# init: str
# Name of Keras initialization.
# """
#
# def __init__(self, dropout=0.0, l1_decay=0.0, l2_decay=0.0,
# init='glorot_uniform'):
# self.dropout = dropout
# self.l1_decay = l1_decay
# self.l2_decay = l2_decay
# self.init = init
# self.name = self.__class__.__name__
# self.scope = None
#
# def inputs(self, *args, **kwargs):
# """Return list of Keras model inputs."""
# pass
#
# def _build(self, input, output):
# """Build final model at the end of `__call__`."""
# model = km.Model(input, output, name=self.name)
# if self.scope:
# for layer in model.layers:
# if not is_input_layer(layer):
# layer.name = '%s/%s' % (self.scope, layer.name)
# return model
#
# def __call__(self, inputs=None):
# """Build model.
#
# Parameters
# ----------
# inputs: list
# Keras model inputs
# """
# pass
#
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | class CpgModel(Model): |
Based on the snippet: <|code_start|>
shape = getattr(x, '_keras_shape')
replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
x = kl.TimeDistributed(replicate_model)(x)
kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
x = kl.Bidirectional(kl.GRU(128, kernel_regularizer=kernel_regularizer,
return_sequences=True),
merge_mode='concat')(x)
kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
gru = kl.GRU(256, kernel_regularizer=kernel_regularizer)
x = kl.Bidirectional(gru)(x)
x = kl.Dropout(self.dropout)(x)
return self._build(inputs, x)
def list_models():
"""Return the name of models in the module."""
models = dict()
for name, value in globals().items():
if inspect.isclass(value) and name.lower().find('model') == -1:
models[name] = value
return models
def get(name):
"""Return object from module by its name."""
<|code_end|>
, predict the immediate next line with the help of imports:
import inspect
from keras import layers as kl
from keras import regularizers as kr
from keras import models as km
from keras.layers.merge import concatenate
from .utils import Model
from ..utils import get_from_module
and context (classes, functions, sometimes code) from other files:
# Path: deepcpg/models/utils.py
# class Model(object):
# """Abstract model call.
#
# Abstract class of DNA, CpG, and Joint models.
#
# Parameters
# ----------
# dropout: float
# Dropout rate.
# l1_decay: float
# L1 weight decay.
# l2_decay: float
# L2 weight decay.
# init: str
# Name of Keras initialization.
# """
#
# def __init__(self, dropout=0.0, l1_decay=0.0, l2_decay=0.0,
# init='glorot_uniform'):
# self.dropout = dropout
# self.l1_decay = l1_decay
# self.l2_decay = l2_decay
# self.init = init
# self.name = self.__class__.__name__
# self.scope = None
#
# def inputs(self, *args, **kwargs):
# """Return list of Keras model inputs."""
# pass
#
# def _build(self, input, output):
# """Build final model at the end of `__call__`."""
# model = km.Model(input, output, name=self.name)
# if self.scope:
# for layer in model.layers:
# if not is_input_layer(layer):
# layer.name = '%s/%s' % (self.scope, layer.name)
# return model
#
# def __call__(self, inputs=None):
# """Build model.
#
# Parameters
# ----------
# inputs: list
# Keras model inputs
# """
# pass
#
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | return get_from_module(name, globals()) |
Based on the snippet: <|code_start|>from __future__ import division
from __future__ import print_function
class TestKnnCpgFeatureExtractor(object):
def test_larger_equal(self):
# y: 1 5 8 15
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
import numpy.testing as npt
import six
from deepcpg.data import feature_extractor as fe
from deepcpg.data import dna
and context (classes, functions, sometimes code) from other files:
# Path: deepcpg/data/feature_extractor.py
# class KnnCpgFeatureExtractor(object):
# class IntervalFeatureExtractor(object):
# class KmersFeatureExtractor(object):
# def __init__(self, k=1):
# def extract(self, x, y, ys):
# def __larger_equal(self, x, y):
# def join_intervals(s, e):
# def index_intervals(x, ys, ye):
# def extract(self, x, ys, ye):
# def __init__(self, kmer_len, nb_char=4):
# def __call__(self, seqs):
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
# INT_TO_CHAR = {v: k for k, v in CHAR_TO_INT.items()}
# def get_alphabet(special=False, reverse=False):
# def char_to_int(seq):
# def int_to_char(seq, join=True):
# def int_to_onehot(seqs, dim=4):
# def onehot_to_int(seqs, axis=-1):
. Output only the next line. | e = fe.KnnCpgFeatureExtractor() |
Here is a snippet: <|code_start|> expect = []
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
x = [-1, 3, 9, 19]
expect = [-1, -1, -1, -1]
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
x = [-1, 2, 2, 3, 4, 8, 15, 16]
expect = [-1, 0, 0, -1, 1, 1, 2, -1]
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
def test_extract(self):
ys = [2, 4, 12, 17]
ye = [2, 8, 15, 18]
e = fe.IntervalFeatureExtractor()
x = [-1, 2, 2, 3, 4, 8, 15, 16]
expect = [False, True, True, False, True, True, True, False]
result = e.extract(x, ys, ye)
npt.assert_array_equal(result, expect)
class TestKmersFeatureExtractor(object):
def _translate_seqs(self, seqs):
if not isinstance(seqs, list):
seqs = [seqs]
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import numpy.testing as npt
import six
from deepcpg.data import feature_extractor as fe
from deepcpg.data import dna
and context from other files:
# Path: deepcpg/data/feature_extractor.py
# class KnnCpgFeatureExtractor(object):
# class IntervalFeatureExtractor(object):
# class KmersFeatureExtractor(object):
# def __init__(self, k=1):
# def extract(self, x, y, ys):
# def __larger_equal(self, x, y):
# def join_intervals(s, e):
# def index_intervals(x, ys, ye):
# def extract(self, x, ys, ye):
# def __init__(self, kmer_len, nb_char=4):
# def __call__(self, seqs):
#
# Path: deepcpg/data/dna.py
# CHAR_TO_INT = OrderedDict([('A', 0), ('T', 1), ('G', 2), ('C', 3), ('N', 4)])
# INT_TO_CHAR = {v: k for k, v in CHAR_TO_INT.items()}
# def get_alphabet(special=False, reverse=False):
# def char_to_int(seq):
# def int_to_char(seq, join=True):
# def int_to_onehot(seqs, dim=4):
# def onehot_to_int(seqs, axis=-1):
, which may include functions, classes, or code. Output only the next line. | _seqs = np.array([dna.char_to_int(seq) for seq in seqs], dtype=np.int32) |
Given the code snippet: <|code_start|> report = pd.pivot_table(report, index=index, columns='metric',
values='value')
report.reset_index(index, inplace=True)
report.columns.name = None
# Sort columns
columns = list(report.columns)
sorted_columns = []
for fun in CAT_METRICS + CLA_METRICS + REG_METRICS:
for i, column in enumerate(columns):
if column.startswith(fun.__name__):
sorted_columns.append(column)
sorted_columns = index + sorted_columns
sorted_columns += [col for col in columns if col not in sorted_columns]
report = report[sorted_columns]
order = []
if 'auc' in report.columns:
order.append(('auc', False))
elif 'mse' in report.columns:
order.append(('mse', True))
elif 'acc' in report.columns:
order.append(('acc', False))
report.sort_values([x[0] for x in order],
ascending=[x[1] for x in order],
inplace=True)
return report
def get(name):
"""Return object from module by its name."""
<|code_end|>
, generate the next line using the imports in this file:
from collections import OrderedDict
from scipy.stats import kendalltau
from six.moves import range
from .data import CPG_NAN, OUTPUT_SEP
from .utils import get_from_module
import numpy as np
import pandas as pd
import sklearn.metrics as skm
and context (functions, classes, or occasionally code) from other files:
# Path: deepcpg/utils.py
# def get_from_module(identifier, module_params, ignore_case=True):
# """Return object from module.
#
# Return object with name `identifier` from module with items `module_params`.
#
# Parameters
# ----------
# identifier: str
# Name of object, e.g. a function, in module.
# module_params: dict
# `dict` of items in module, e.g. `globals()`
# ignore_case: bool
# If `True`, ignore case of `identifier`.
#
# Returns
# -------
# object
# Object with name `identifier` in module, e.g. a function or class.
# """
# if ignore_case:
# _module_params = dict()
# for key, value in six.iteritems(module_params):
# _module_params[key.lower()] = value
# _identifier = identifier.lower()
# else:
# _module_params = module_params
# _identifier = identifier
# item = _module_params.get(_identifier)
# if not item:
# raise ValueError('Invalid identifier "%s"!' % identifier)
# return item
. Output only the next line. | return get_from_module(name, globals()) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
<|code_end|>
. Use current file imports:
(from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability)
and context including class names, function names, or small code snippets from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
. Output only the next line. | class AuthorizationEndpoint(BaseEndpoint): |
Continue the code snippet: <|code_start|> MUST NOT be included more than once::
# Enforced through the design of oauthlib.common.Request
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_response_type, default_token_type,
response_types):
BaseEndpoint.__init__(self)
self._response_types = response_types
self._default_response_type = default_response_type
self._default_token_type = default_token_type
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token_type(self):
return self._default_token_type
<|code_end|>
. Use current file imports:
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
and context (classes, functions, or code) from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
. Output only the next line. | @catch_errors_and_unavailability |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.endpoint.revocation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the OAuth 2 `Token Revocation`_ spec (draft 11).
.. _`Token Revocation`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11
"""
from __future__ import absolute_import, unicode_literals
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
from ..errors import InvalidClientError, UnsupportedTokenTypeError
from ..errors import InvalidRequestError, OAuth2Error
and context:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidClientError(OAuth2Error):
# """Client authentication failed (e.g. unknown client, no client
# authentication included, or unsupported authentication method).
# The authorization server MAY return an HTTP 401 (Unauthorized) status
# code to indicate which HTTP authentication schemes are supported.
# If the client attempted to authenticate via the "Authorization" request
# header field, the authorization server MUST respond with an
# HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
# response header field matching the authentication scheme used by the
# client.
# """
# error = 'invalid_client'
# status_code = 401
#
# class UnsupportedTokenTypeError(OAuth2Error):
# """The authorization server does not support the revocation of the
# presented token type. I.e. the client tried to revoke an access token
# on a server not supporting this feature.
# """
# error = 'unsupported_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidRequestError(OAuth2Error):
# """The request is missing a required parameter, includes an invalid
# parameter value, includes a parameter more than once, or is
# otherwise malformed.
# """
# error = 'invalid_request'
#
# class OAuth2Error(Exception):
# error = None
# status_code = 400
#
# def __init__(self, description=None, uri=None, state=None, status_code=None,
# request=None):
# """
# description: A human-readable ASCII [USASCII] text providing
# additional information, used to assist the client
# developer in understanding the error that occurred.
# Values for the "error_description" parameter MUST NOT
# include characters outside the set
# x20-21 / x23-5B / x5D-7E.
#
# uri: A URI identifying a human-readable web page with information
# about the error, used to provide the client developer with
# additional information about the error. Values for the
# "error_uri" parameter MUST conform to the URI- Reference
# syntax, and thus MUST NOT include characters outside the set
# x21 / x23-5B / x5D-7E.
#
# state: A CSRF protection value received from the client.
#
# request: Oauthlib Request object
# """
# self.description = description
# self.uri = uri
# self.state = state
#
# if status_code:
# self.status_code = status_code
#
# if request:
# self.redirect_uri = request.redirect_uri
# self.client_id = request.client_id
# self.scopes = request.scopes
# self.response_type = request.response_type
# self.grant_type = request.grant_type
#
# def in_uri(self, uri):
# return add_params_to_uri(uri, self.twotuples)
#
# @property
# def twotuples(self):
# error = [('error', self.error)]
# if self.description:
# error.append(('error_description', self.description))
# if self.uri:
# error.append(('error_uri', self.uri))
# if self.state:
# error.append(('state', self.state))
# return error
#
# @property
# def urlencoded(self):
# return urlencode(self.twotuples)
#
# @property
# def json(self):
# return json.dumps(dict(self.twotuples))
which might include code, classes, or functions. Output only the next line. | class RevocationEndpoint(BaseEndpoint): |
Using the snippet: <|code_start|>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the OAuth 2 `Token Revocation`_ spec (draft 11).
.. _`Token Revocation`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11
"""
from __future__ import absolute_import, unicode_literals
class RevocationEndpoint(BaseEndpoint):
"""Token revocation endpoint.
Endpoint used by authenticated clients to revoke access and refresh tokens.
Commonly this will be part of the Authorization Endpoint.
"""
valid_token_types = ('access_token', 'refresh_token')
@property
def supported_token_types(self):
return self._supported_token_types
def __init__(self, request_validator, supported_token_types=None):
BaseEndpoint.__init__(self)
self.request_validator = request_validator
self._supported_token_types = (
supported_token_types or self.valid_token_types)
<|code_end|>
, determine the next line of code. You have imports:
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
from ..errors import InvalidClientError, UnsupportedTokenTypeError
from ..errors import InvalidRequestError, OAuth2Error
and context (class names, function names, or code) available:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidClientError(OAuth2Error):
# """Client authentication failed (e.g. unknown client, no client
# authentication included, or unsupported authentication method).
# The authorization server MAY return an HTTP 401 (Unauthorized) status
# code to indicate which HTTP authentication schemes are supported.
# If the client attempted to authenticate via the "Authorization" request
# header field, the authorization server MUST respond with an
# HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
# response header field matching the authentication scheme used by the
# client.
# """
# error = 'invalid_client'
# status_code = 401
#
# class UnsupportedTokenTypeError(OAuth2Error):
# """The authorization server does not support the revocation of the
# presented token type. I.e. the client tried to revoke an access token
# on a server not supporting this feature.
# """
# error = 'unsupported_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidRequestError(OAuth2Error):
# """The request is missing a required parameter, includes an invalid
# parameter value, includes a parameter more than once, or is
# otherwise malformed.
# """
# error = 'invalid_request'
#
# class OAuth2Error(Exception):
# error = None
# status_code = 400
#
# def __init__(self, description=None, uri=None, state=None, status_code=None,
# request=None):
# """
# description: A human-readable ASCII [USASCII] text providing
# additional information, used to assist the client
# developer in understanding the error that occurred.
# Values for the "error_description" parameter MUST NOT
# include characters outside the set
# x20-21 / x23-5B / x5D-7E.
#
# uri: A URI identifying a human-readable web page with information
# about the error, used to provide the client developer with
# additional information about the error. Values for the
# "error_uri" parameter MUST conform to the URI- Reference
# syntax, and thus MUST NOT include characters outside the set
# x21 / x23-5B / x5D-7E.
#
# state: A CSRF protection value received from the client.
#
# request: Oauthlib Request object
# """
# self.description = description
# self.uri = uri
# self.state = state
#
# if status_code:
# self.status_code = status_code
#
# if request:
# self.redirect_uri = request.redirect_uri
# self.client_id = request.client_id
# self.scopes = request.scopes
# self.response_type = request.response_type
# self.grant_type = request.grant_type
#
# def in_uri(self, uri):
# return add_params_to_uri(uri, self.twotuples)
#
# @property
# def twotuples(self):
# error = [('error', self.error)]
# if self.description:
# error.append(('error_description', self.description))
# if self.uri:
# error.append(('error_uri', self.uri))
# if self.state:
# error.append(('state', self.state))
# return error
#
# @property
# def urlencoded(self):
# return urlencode(self.twotuples)
#
# @property
# def json(self):
# return json.dumps(dict(self.twotuples))
. Output only the next line. | @catch_errors_and_unavailability |
Based on the snippet: <|code_start|> server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: http://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: http://tools.ietf.org/html/rfc6749
"""
if not request.token:
raise InvalidRequestError(request=request,
description='Missing token parameter.')
if not self.request_validator.authenticate_client(request):
<|code_end|>
, predict the immediate next line with the help of imports:
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
from ..errors import InvalidClientError, UnsupportedTokenTypeError
from ..errors import InvalidRequestError, OAuth2Error
and context (classes, functions, sometimes code) from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidClientError(OAuth2Error):
# """Client authentication failed (e.g. unknown client, no client
# authentication included, or unsupported authentication method).
# The authorization server MAY return an HTTP 401 (Unauthorized) status
# code to indicate which HTTP authentication schemes are supported.
# If the client attempted to authenticate via the "Authorization" request
# header field, the authorization server MUST respond with an
# HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
# response header field matching the authentication scheme used by the
# client.
# """
# error = 'invalid_client'
# status_code = 401
#
# class UnsupportedTokenTypeError(OAuth2Error):
# """The authorization server does not support the revocation of the
# presented token type. I.e. the client tried to revoke an access token
# on a server not supporting this feature.
# """
# error = 'unsupported_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidRequestError(OAuth2Error):
# """The request is missing a required parameter, includes an invalid
# parameter value, includes a parameter more than once, or is
# otherwise malformed.
# """
# error = 'invalid_request'
#
# class OAuth2Error(Exception):
# error = None
# status_code = 400
#
# def __init__(self, description=None, uri=None, state=None, status_code=None,
# request=None):
# """
# description: A human-readable ASCII [USASCII] text providing
# additional information, used to assist the client
# developer in understanding the error that occurred.
# Values for the "error_description" parameter MUST NOT
# include characters outside the set
# x20-21 / x23-5B / x5D-7E.
#
# uri: A URI identifying a human-readable web page with information
# about the error, used to provide the client developer with
# additional information about the error. Values for the
# "error_uri" parameter MUST conform to the URI- Reference
# syntax, and thus MUST NOT include characters outside the set
# x21 / x23-5B / x5D-7E.
#
# state: A CSRF protection value received from the client.
#
# request: Oauthlib Request object
# """
# self.description = description
# self.uri = uri
# self.state = state
#
# if status_code:
# self.status_code = status_code
#
# if request:
# self.redirect_uri = request.redirect_uri
# self.client_id = request.client_id
# self.scopes = request.scopes
# self.response_type = request.response_type
# self.grant_type = request.grant_type
#
# def in_uri(self, uri):
# return add_params_to_uri(uri, self.twotuples)
#
# @property
# def twotuples(self):
# error = [('error', self.error)]
# if self.description:
# error.append(('error_description', self.description))
# if self.uri:
# error.append(('error_uri', self.uri))
# if self.state:
# error.append(('state', self.state))
# return error
#
# @property
# def urlencoded(self):
# return urlencode(self.twotuples)
#
# @property
# def json(self):
# return json.dumps(dict(self.twotuples))
. Output only the next line. | raise InvalidClientError(request=request) |
Given snippet: <|code_start|> defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: http://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: http://tools.ietf.org/html/rfc6749
"""
if not request.token:
raise InvalidRequestError(request=request,
description='Missing token parameter.')
if not self.request_validator.authenticate_client(request):
raise InvalidClientError(request=request)
if (request.token_type_hint in self.valid_token_types and
request.token_type_hint not in self.supported_token_types):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
from ..errors import InvalidClientError, UnsupportedTokenTypeError
from ..errors import InvalidRequestError, OAuth2Error
and context:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidClientError(OAuth2Error):
# """Client authentication failed (e.g. unknown client, no client
# authentication included, or unsupported authentication method).
# The authorization server MAY return an HTTP 401 (Unauthorized) status
# code to indicate which HTTP authentication schemes are supported.
# If the client attempted to authenticate via the "Authorization" request
# header field, the authorization server MUST respond with an
# HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
# response header field matching the authentication scheme used by the
# client.
# """
# error = 'invalid_client'
# status_code = 401
#
# class UnsupportedTokenTypeError(OAuth2Error):
# """The authorization server does not support the revocation of the
# presented token type. I.e. the client tried to revoke an access token
# on a server not supporting this feature.
# """
# error = 'unsupported_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidRequestError(OAuth2Error):
# """The request is missing a required parameter, includes an invalid
# parameter value, includes a parameter more than once, or is
# otherwise malformed.
# """
# error = 'invalid_request'
#
# class OAuth2Error(Exception):
# error = None
# status_code = 400
#
# def __init__(self, description=None, uri=None, state=None, status_code=None,
# request=None):
# """
# description: A human-readable ASCII [USASCII] text providing
# additional information, used to assist the client
# developer in understanding the error that occurred.
# Values for the "error_description" parameter MUST NOT
# include characters outside the set
# x20-21 / x23-5B / x5D-7E.
#
# uri: A URI identifying a human-readable web page with information
# about the error, used to provide the client developer with
# additional information about the error. Values for the
# "error_uri" parameter MUST conform to the URI- Reference
# syntax, and thus MUST NOT include characters outside the set
# x21 / x23-5B / x5D-7E.
#
# state: A CSRF protection value received from the client.
#
# request: Oauthlib Request object
# """
# self.description = description
# self.uri = uri
# self.state = state
#
# if status_code:
# self.status_code = status_code
#
# if request:
# self.redirect_uri = request.redirect_uri
# self.client_id = request.client_id
# self.scopes = request.scopes
# self.response_type = request.response_type
# self.grant_type = request.grant_type
#
# def in_uri(self, uri):
# return add_params_to_uri(uri, self.twotuples)
#
# @property
# def twotuples(self):
# error = [('error', self.error)]
# if self.description:
# error.append(('error_description', self.description))
# if self.uri:
# error.append(('error_uri', self.uri))
# if self.state:
# error.append(('state', self.state))
# return error
#
# @property
# def urlencoded(self):
# return urlencode(self.twotuples)
#
# @property
# def json(self):
# return json.dumps(dict(self.twotuples))
which might include code, classes, or functions. Output only the next line. | raise UnsupportedTokenTypeError(request=request) |
Based on the snippet: <|code_start|> BaseEndpoint.__init__(self)
self.request_validator = request_validator
self._supported_token_types = (
supported_token_types or self.valid_token_types)
@catch_errors_and_unavailability
def create_revocation_response(self, uri, http_method='POST', body=None,
headers=None):
"""Revoke supplied access or refresh token.
The authorization server responds with HTTP status code 200 if the
token has been revoked sucessfully or if the client submitted an
invalid token.
Note: invalid tokens do not cause an error response since the client
cannot handle such an error in a reasonable way. Moreover, the purpose
of the revocation request, invalidating the particular token, is
already achieved.
The content of the response body is ignored by the client as all
necessary information is conveyed in the response code.
An invalid token type hint value is ignored by the authorization server
and does not influence the revocation response.
"""
request = Request(uri, http_method=http_method, body=body, headers=headers)
try:
self.validate_revocation_request(request)
log.debug('Token revocation valid for %r.', request)
<|code_end|>
, predict the immediate next line with the help of imports:
from oauthlib.common import Request, log
from .base import BaseEndpoint, catch_errors_and_unavailability
from ..errors import InvalidClientError, UnsupportedTokenTypeError
from ..errors import InvalidRequestError, OAuth2Error
and context (classes, functions, sometimes code) from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/endpoints/base.py
# class BaseEndpoint(object):
# def __init__(self):
# self._available = True
# self._catch_errors = False
#
# @property
# def available(self):
# return self._available
#
# @available.setter
# def available(self, available):
# self._available = available
#
# @property
# def catch_errors(self):
# return self._catch_errors
#
# @catch_errors.setter
# def catch_errors(self, catch_errors):
# self._catch_errors = catch_errors
#
# def catch_errors_and_unavailability(f):
# @functools.wraps(f)
# def wrapper(endpoint, uri, *args, **kwargs):
# if not endpoint.available:
# e = TemporarilyUnavailableError()
# log.info('Endpoint unavailable, ignoring request %s.' % uri)
# return {}, e.json, 503
#
# if endpoint.catch_errors:
# try:
# return f(endpoint, uri, *args, **kwargs)
# except OAuth2Error:
# raise
# except FatalClientError:
# raise
# except Exception as e:
# error = ServerError()
# log.warning('Exception caught while processing request, %s.' % e)
# return {}, error.json, 500
# else:
# return f(endpoint, uri, *args, **kwargs)
# return wrapper
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidClientError(OAuth2Error):
# """Client authentication failed (e.g. unknown client, no client
# authentication included, or unsupported authentication method).
# The authorization server MAY return an HTTP 401 (Unauthorized) status
# code to indicate which HTTP authentication schemes are supported.
# If the client attempted to authenticate via the "Authorization" request
# header field, the authorization server MUST respond with an
# HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
# response header field matching the authentication scheme used by the
# client.
# """
# error = 'invalid_client'
# status_code = 401
#
# class UnsupportedTokenTypeError(OAuth2Error):
# """The authorization server does not support the revocation of the
# presented token type. I.e. the client tried to revoke an access token
# on a server not supporting this feature.
# """
# error = 'unsupported_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InvalidRequestError(OAuth2Error):
# """The request is missing a required parameter, includes an invalid
# parameter value, includes a parameter more than once, or is
# otherwise malformed.
# """
# error = 'invalid_request'
#
# class OAuth2Error(Exception):
# error = None
# status_code = 400
#
# def __init__(self, description=None, uri=None, state=None, status_code=None,
# request=None):
# """
# description: A human-readable ASCII [USASCII] text providing
# additional information, used to assist the client
# developer in understanding the error that occurred.
# Values for the "error_description" parameter MUST NOT
# include characters outside the set
# x20-21 / x23-5B / x5D-7E.
#
# uri: A URI identifying a human-readable web page with information
# about the error, used to provide the client developer with
# additional information about the error. Values for the
# "error_uri" parameter MUST conform to the URI- Reference
# syntax, and thus MUST NOT include characters outside the set
# x21 / x23-5B / x5D-7E.
#
# state: A CSRF protection value received from the client.
#
# request: Oauthlib Request object
# """
# self.description = description
# self.uri = uri
# self.state = state
#
# if status_code:
# self.status_code = status_code
#
# if request:
# self.redirect_uri = request.redirect_uri
# self.client_id = request.client_id
# self.scopes = request.scopes
# self.response_type = request.response_type
# self.grant_type = request.grant_type
#
# def in_uri(self, uri):
# return add_params_to_uri(uri, self.twotuples)
#
# @property
# def twotuples(self):
# error = [('error', self.error)]
# if self.description:
# error.append(('error_description', self.description))
# if self.uri:
# error.append(('error_uri', self.uri))
# if self.state:
# error.append(('state', self.state))
# return error
#
# @property
# def urlencoded(self):
# return urlencode(self.twotuples)
#
# @property
# def json(self):
# return json.dumps(dict(self.twotuples))
. Output only the next line. | except OAuth2Error as e: |
Given the code snippet: <|code_start|> Pragma: no-cache
{
"access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"expires_in":3600,
"refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter":"example_value"
}
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`RFC4627`: http://tools.ietf.org/html/rfc4627
"""
params = json.loads(body)
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
validate_token_parameters(params, scope)
return params
def validate_token_parameters(params, scope=None):
"""Ensures token precence, token type, expiration and scope in params."""
if 'error' in params:
<|code_end|>
, generate the next line using the imports in this file:
import json
import time
import urlparse
import urllib.parse as urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from .errors import raise_from_error, MissingTokenError, MissingTokenTypeError
from .errors import MismatchingStateError, MissingCodeError
from .errors import InsecureTransportError
from .utils import list_to_scope, scope_to_list, is_secure_transport
and context (functions, classes, or occasionally code) from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# def raise_from_error(error, params=None):
# import inspect
# import sys
# kwargs = {
# 'description': params.get('error_description'),
# 'uri': params.get('error_uri'),
# 'state': params.get('state')
# }
# for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
# if cls.error == error:
# raise cls(**kwargs)
#
# class MissingTokenError(OAuth2Error):
# error = 'missing_token'
#
# class MissingTokenTypeError(OAuth2Error):
# error = 'missing_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class MismatchingStateError(OAuth2Error):
# error = 'mismatching_state'
# description = 'CSRF Warning! State not equal in request and response.'
#
# class MissingCodeError(OAuth2Error):
# error = 'missing_code'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InsecureTransportError(OAuth2Error):
# error = 'insecure_transport'
# description = 'OAuth 2 MUST utilize https.'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/utils.py
# def list_to_scope(scope):
# """Convert a list of scopes to a space separated string."""
# if isinstance(scope, unicode_type) or scope is None:
# return scope
# elif isinstance(scope, list):
# return " ".join([unicode_type(s) for s in scope])
# else:
# raise ValueError("Invalid scope, must be string or list.")
#
# def scope_to_list(scope):
# """Convert a space separated string to a list of scopes."""
# if isinstance(scope, list):
# return [unicode_type(s) for s in scope]
# elif scope is None:
# return None
# else:
# return scope.split(" ")
#
# def is_secure_transport(uri):
# """Check if the uri is over ssl."""
# if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
# return True
# return uri.lower().startswith('https://')
. Output only the next line. | raise_from_error(params.get('error'), params) |
Based on the snippet: <|code_start|> "access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"expires_in":3600,
"refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter":"example_value"
}
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`RFC4627`: http://tools.ietf.org/html/rfc4627
"""
params = json.loads(body)
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
validate_token_parameters(params, scope)
return params
def validate_token_parameters(params, scope=None):
"""Ensures token precence, token type, expiration and scope in params."""
if 'error' in params:
raise_from_error(params.get('error'), params)
if not 'access_token' in params:
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import time
import urlparse
import urllib.parse as urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from .errors import raise_from_error, MissingTokenError, MissingTokenTypeError
from .errors import MismatchingStateError, MissingCodeError
from .errors import InsecureTransportError
from .utils import list_to_scope, scope_to_list, is_secure_transport
and context (classes, functions, sometimes code) from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# def raise_from_error(error, params=None):
# import inspect
# import sys
# kwargs = {
# 'description': params.get('error_description'),
# 'uri': params.get('error_uri'),
# 'state': params.get('state')
# }
# for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
# if cls.error == error:
# raise cls(**kwargs)
#
# class MissingTokenError(OAuth2Error):
# error = 'missing_token'
#
# class MissingTokenTypeError(OAuth2Error):
# error = 'missing_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class MismatchingStateError(OAuth2Error):
# error = 'mismatching_state'
# description = 'CSRF Warning! State not equal in request and response.'
#
# class MissingCodeError(OAuth2Error):
# error = 'missing_code'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InsecureTransportError(OAuth2Error):
# error = 'insecure_transport'
# description = 'OAuth 2 MUST utilize https.'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/utils.py
# def list_to_scope(scope):
# """Convert a list of scopes to a space separated string."""
# if isinstance(scope, unicode_type) or scope is None:
# return scope
# elif isinstance(scope, list):
# return " ".join([unicode_type(s) for s in scope])
# else:
# raise ValueError("Invalid scope, must be string or list.")
#
# def scope_to_list(scope):
# """Convert a space separated string to a list of scopes."""
# if isinstance(scope, list):
# return [unicode_type(s) for s in scope]
# elif scope is None:
# return None
# else:
# return scope.split(" ")
#
# def is_secure_transport(uri):
# """Check if the uri is over ssl."""
# if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
# return True
# return uri.lower().startswith('https://')
. Output only the next line. | raise MissingTokenError(description="Missing access token parameter.") |
Continue the code snippet: <|code_start|> "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter":"example_value"
}
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`RFC4627`: http://tools.ietf.org/html/rfc4627
"""
params = json.loads(body)
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
validate_token_parameters(params, scope)
return params
def validate_token_parameters(params, scope=None):
"""Ensures token precence, token type, expiration and scope in params."""
if 'error' in params:
raise_from_error(params.get('error'), params)
if not 'access_token' in params:
raise MissingTokenError(description="Missing access token parameter.")
if not 'token_type' in params:
<|code_end|>
. Use current file imports:
import json
import time
import urlparse
import urllib.parse as urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from .errors import raise_from_error, MissingTokenError, MissingTokenTypeError
from .errors import MismatchingStateError, MissingCodeError
from .errors import InsecureTransportError
from .utils import list_to_scope, scope_to_list, is_secure_transport
and context (classes, functions, or code) from other files:
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# def raise_from_error(error, params=None):
# import inspect
# import sys
# kwargs = {
# 'description': params.get('error_description'),
# 'uri': params.get('error_uri'),
# 'state': params.get('state')
# }
# for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
# if cls.error == error:
# raise cls(**kwargs)
#
# class MissingTokenError(OAuth2Error):
# error = 'missing_token'
#
# class MissingTokenTypeError(OAuth2Error):
# error = 'missing_token_type'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class MismatchingStateError(OAuth2Error):
# error = 'mismatching_state'
# description = 'CSRF Warning! State not equal in request and response.'
#
# class MissingCodeError(OAuth2Error):
# error = 'missing_code'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/errors.py
# class InsecureTransportError(OAuth2Error):
# error = 'insecure_transport'
# description = 'OAuth 2 MUST utilize https.'
#
# Path: twitter-winner/oauthlib/oauth2/rfc6749/utils.py
# def list_to_scope(scope):
# """Convert a list of scopes to a space separated string."""
# if isinstance(scope, unicode_type) or scope is None:
# return scope
# elif isinstance(scope, list):
# return " ".join([unicode_type(s) for s in scope])
# else:
# raise ValueError("Invalid scope, must be string or list.")
#
# def scope_to_list(scope):
# """Convert a space separated string to a list of scopes."""
# if isinstance(scope, list):
# return [unicode_type(s) for s in scope]
# elif scope is None:
# return None
# else:
# return scope.split(" ")
#
# def is_secure_transport(uri):
# """Check if the uri is over ssl."""
# if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
# return True
# return uri.lower().startswith('https://')
. Output only the next line. | raise MissingTokenTypeError() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.