Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|># pylint: disable=g-direct-tensorflow-import
# pylint: enable=g-direct-tensorflow-import
__all__ = ['annotate_asset', 'make_and_track_object']
_ASSET_KEY_COLLECTION = 'tft_asset_key_collection'
_ASSET_FILENAME_COLLECTION = 'tft_asset_filename_collection'
# Thread-Hostile
_OBJECT_TRACKER = None
VOCABULARY_SIZE_BY_NAME_COLLECTION = 'tft_vocabulary_size_by_name_collection'
class ObjectTracker:
"""A class that tracks a list of trackable objects."""
__slots__ = ['_trackable_objects']
def __init__(self):
self._trackable_objects = []
@property
def trackable_objects(self) -> List[base.Trackable]:
return self._trackable_objects
def add_trackable_object(self, trackable_object: base.Trackable,
name: Optional[str]):
"""Add `trackable_object` to list of objects tracked."""
if name is None:
self._trackable_objects.append(trackable_object)
else:
<|code_end|>
, generate the next line using the imports in this file:
import contextlib
import os
import tensorflow as tf
from typing import Callable, List, Optional
from tensorflow_transform.graph_context import TFGraphContext
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.training.tracking import base
and context (functions, classes, or occasionally code) from other files:
# Path: tensorflow_transform/graph_context.py
# class TFGraphContext:
# """A context manager to pass global state to a TF graph when it is traced.
#
# All the attributes in this context are kept on a thread local state.
#
# Attributes:
# module_to_export: A tf.Module object that can be exported to a SavedModel
# and will be used to track objects created within this TF graph.
# temp_dir: The base path of the directory to write out any temporary files
# in this context block. If None, the TF graph in this context will be
# traced with placeholders for asset filepaths and is not serializable to a
# SavedModel.
# evaluated_replacements: A subset of placeholders/temporary asset files in
# `analyzer_nodes.TENSOR_REPLACEMENTS` that have been evaluated in
# previous TFT phases.
#
# Note that the temp dir should be accessible to worker jobs, e.g. if running
# with the Cloud Dataflow runner, the temp dir should be on GCS and should have
# permissions that allow both launcher and workers to access it.
# """
#
# class _State(
# tfx_namedtuple.namedtuple('_State', [
# 'module_to_export',
# 'temp_dir',
# 'evaluated_replacements',
# ])):
# """A named tuple storing state passed to this context manager."""
#
# @classmethod
# def make_empty(cls):
# """Return `_State` object with all fields set to `None`."""
# return cls(*(None,) * len(cls._fields))
#
# _TEMP_SUBDIR = 'analyzer_temporary_assets'
#
# _thread_local = threading.local()
#
# def __init__(self,
# module_to_export: tf.Module,
# temp_dir: Optional[str] = None,
# evaluated_replacements: Optional[Dict[str, Any]] = None):
# self._module_to_export = module_to_export
# self._temp_dir = temp_dir
# self._evaluated_replacements = evaluated_replacements
#
# def __enter__(self):
# assert getattr(self._thread_local, 'current_state', None) is None
# self._thread_local.current_state = self._State(
# module_to_export=self._module_to_export,
# temp_dir=self._temp_dir,
# evaluated_replacements=self._evaluated_replacements)
#
# def __exit__(self, *exn_info):
# self._thread_local.current_state = None
#
# @property
# def module_to_export(self):
# return self._module_to_export
#
# @classmethod
# def _get_current_state(cls) -> 'TFGraphContext._State':
# if hasattr(cls._thread_local, 'current_state'):
# return cls._thread_local.current_state
# return cls._State.make_empty()
#
# @classmethod
# def get_or_create_temp_dir(cls) -> Optional[str]:
# """Generate a temporary location."""
# current_state = cls._get_current_state()
# if current_state.temp_dir is None:
# return None
# if not current_state.temp_dir:
# raise ValueError('A temp dir was requested, but empty temp_dir was set. '
# 'Use the TFGraphContext context manager.')
# result = os.path.join(current_state.temp_dir, cls._TEMP_SUBDIR)
# tf.io.gfile.makedirs(result)
# return result
#
# @classmethod
# def get_evaluated_replacements(cls) -> Optional[Dict[str, Any]]:
# """Retrieves the value of evaluated_replacements if set.
#
# None otherwise.
#
# Returns:
# A dictionary from graph tensor names to evaluated values for these
# tensors. The keys are a subset of placeholders/temporary asset files in
# `analyzer_nodes.TENSOR_REPLACEMENTS` that have been evaluated in
# previous TFT phases.
# """
# return cls._get_current_state().evaluated_replacements
#
# @classmethod
# def get_module_to_export(cls) -> Optional[tf.Module]:
# """Retrieves the value of module_to_export.
#
# None if called outside a TFGraphContext scope.
#
# Returns:
# A tf.Module object
# """
# return cls._get_current_state().module_to_export
. Output only the next line. | module = TFGraphContext.get_module_to_export() |
Using the snippet: <|code_start|> ... x_int = tft.compute_and_apply_vocabulary(
... inputs['x'], vocab_filename='my_vocab',
... num_oov_buckets=num_oov_buckets)
... depth = (
... tft.experimental.get_vocabulary_size_by_name('my_vocab') +
... num_oov_buckets)
... x_encoded = tf.one_hot(
... x_int, depth=tf.cast(depth, tf.int32), dtype=tf.int64)
... return {'x_encoded': x_encoded}
>>> raw_data = [dict(x='foo'), dict(x='foo'), dict(x='bar')]
>>> feature_spec = dict(x=tf.io.FixedLenFeature([], tf.string))
>>> raw_data_metadata = tft.DatasetMetadata.from_feature_spec(feature_spec)
>>> with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
... transformed_dataset, transform_fn = (
... (raw_data, raw_data_metadata)
... | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
>>> transformed_data, transformed_metadata = transformed_dataset
>>> transformed_data
[{'x_encoded': array([1, 0, 0])}, {'x_encoded': array([1, 0, 0])},
{'x_encoded': array([0, 1, 0])}]
Returns:
An integer tensor containing the size of the requested vocabulary.
Raises:
ValueError: if no vocabulary size found for the given `vocab_filename`.
"""
# pyformat: enable
vocabulary_sizes_coll = ops.get_default_graph().get_collection(
<|code_end|>
, determine the next line of code. You have imports:
import tensorflow as tf
from tensorflow_transform import annotators
from tensorflow.python.framework import ops # pylint: disable=g-direct-tensorflow-import
and context (class names, function names, or code) available:
# Path: tensorflow_transform/annotators.py
# _ASSET_KEY_COLLECTION = 'tft_asset_key_collection'
# _ASSET_FILENAME_COLLECTION = 'tft_asset_filename_collection'
# _OBJECT_TRACKER = None
# VOCABULARY_SIZE_BY_NAME_COLLECTION = 'tft_vocabulary_size_by_name_collection'
# _OBJECT_TRACKER = object_tracker
# _OBJECT_TRACKER = None
# class ObjectTracker:
# def __init__(self):
# def trackable_objects(self) -> List[base.Trackable]:
# def add_trackable_object(self, trackable_object: base.Trackable,
# name: Optional[str]):
# def object_tracker_scope(object_tracker: ObjectTracker):
# def _get_object(name: str) -> Optional[base.Trackable]:
# def track_object(trackable: base.Trackable, name: Optional[str]):
# def make_and_track_object(trackable_factory_callable: Callable[[],
# base.Trackable],
# name: Optional[str] = None) -> base.Trackable:
# def get_asset_annotations(graph: tf.Graph):
# def clear_asset_annotations(graph: tf.Graph):
# def annotate_asset(asset_key: str, asset_filename: str):
# def annotate_vocab_size(vocab_filename: str, vocab_size: tf.Tensor):
. Output only the next line. | annotators.VOCABULARY_SIZE_BY_NAME_COLLECTION) |
Predict the next line after this snippet: <|code_start|>
class Command(NoArgsCommand):
help = "reset all keys, triggering a request for all keys from the hub"
def handle_noargs(self, **options):
print 'resetting all keys for %s' % settings.LOCKSMITH_API_NAME
endpoint = urljoin(settings.LOCKSMITH_HUB_URL, 'reset_keys/')
<|code_end|>
using the current file's imports:
import datetime
from urlparse import urljoin
from django.core.management.base import NoArgsCommand, CommandError
from django.conf import settings
from locksmith.common import apicall
and any relevant context from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
. Output only the next line. | apicall(endpoint, settings.LOCKSMITH_SIGNING_KEY, |
Given the code snippet: <|code_start|>
# loop over the rows
for row in file:
match = log_re.match(row)
if match:
record = match.groupdict()
day = datetime.datetime.strptime(record['date'], log_date_format).date()
if day == log_date and record['status'] == '200' and record['apikey'] and record['apikey'] != '-':
# normalize the endpoint
endpoint = log_custom_transform(record['endpoint']) if log_custom_transform else record['endpoint']
# add it to the tally
if record['apikey'] not in totals:
totals[record['apikey']] = {}
if endpoint not in totals[record['apikey']]:
totals[record['apikey']][endpoint] = 1
else:
totals[record['apikey']][endpoint] += 1
elif day < log_date:
# this is the last log we need to parse
last_loop = True
if last_loop:
break
# submit totals to hub
submit_date = log_date.strftime('%Y-%m-%d')
total_submitted = 0
for api_key in totals:
for endpoint in totals[api_key]:
<|code_end|>
, generate the next line using the imports in this file:
import re, os, datetime, gzip
from locksmith.common import apicall
and context (functions, classes, or occasionally code) from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
. Output only the next line. | apicall( |
Predict the next line for this snippet: <|code_start|>
class Command(NoArgsCommand):
help = 'push keys that are marked as dirty to the hub'
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
actions = {UNPUBLISHED: 0, NEEDS_UPDATE: 0}
failures = []
# get all non-published keys belonging to APIs with push_enabled
dirty = KeyPublicationStatus.objects.exclude(status=PUBLISHED).filter(
api__push_enabled=True).select_related()
for kps in dirty:
endpoint = urljoin(kps.api.url, endpoints[kps.status])
try:
<|code_end|>
with the help of current file imports:
import datetime
import urllib2
from urlparse import urljoin
from django.core.management.base import NoArgsCommand
from django.core.mail import mail_managers
from django.conf import settings
from locksmith.common import apicall
from locksmith.hub.models import KeyPublicationStatus, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
and context from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/hub/models.py
# def resolve_model(model, fields):
# def __unicode__(self):
# def __unicode__(self):
# def mark_for_update(self):
# def __unicode__(self):
# def kps_callback(sender, instance, created, raw, **kwargs):
# def clean_email(self):
# def clean(self):
# class Api(models.Model):
# class Meta:
# class Key(models.Model):
# class Meta:
# class KeyPublicationStatus(models.Model):
# class Meta:
# class Report(models.Model):
# class Meta:
# class KeyForm(forms.ModelForm):
# class Meta:
# class ResendForm(forms.Form):
, which may contain function names, class names, or code. Output only the next line. | apicall(endpoint, kps.api.signing_key, api=kps.api.name, |
Using the snippet: <|code_start|>
class Command(NoArgsCommand):
help = 'push keys that are marked as dirty to the hub'
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
actions = {UNPUBLISHED: 0, NEEDS_UPDATE: 0}
failures = []
# get all non-published keys belonging to APIs with push_enabled
<|code_end|>
, determine the next line of code. You have imports:
import datetime
import urllib2
from urlparse import urljoin
from django.core.management.base import NoArgsCommand
from django.core.mail import mail_managers
from django.conf import settings
from locksmith.common import apicall
from locksmith.hub.models import KeyPublicationStatus, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
and context (class names, function names, or code) available:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/hub/models.py
# def resolve_model(model, fields):
# def __unicode__(self):
# def __unicode__(self):
# def mark_for_update(self):
# def __unicode__(self):
# def kps_callback(sender, instance, created, raw, **kwargs):
# def clean_email(self):
# def clean(self):
# class Api(models.Model):
# class Meta:
# class Key(models.Model):
# class Meta:
# class KeyPublicationStatus(models.Model):
# class Meta:
# class Report(models.Model):
# class Meta:
# class KeyForm(forms.ModelForm):
# class Meta:
# class ResendForm(forms.Form):
. Output only the next line. | dirty = KeyPublicationStatus.objects.exclude(status=PUBLISHED).filter( |
Next line prediction: <|code_start|>
class Command(NoArgsCommand):
help = 'push keys that are marked as dirty to the hub'
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
<|code_end|>
. Use current file imports:
(import datetime
import urllib2
from urlparse import urljoin
from django.core.management.base import NoArgsCommand
from django.core.mail import mail_managers
from django.conf import settings
from locksmith.common import apicall
from locksmith.hub.models import KeyPublicationStatus, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE)
and context including class names, function names, or small code snippets from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/hub/models.py
# def resolve_model(model, fields):
# def __unicode__(self):
# def __unicode__(self):
# def mark_for_update(self):
# def __unicode__(self):
# def kps_callback(sender, instance, created, raw, **kwargs):
# def clean_email(self):
# def clean(self):
# class Api(models.Model):
# class Meta:
# class Key(models.Model):
# class Meta:
# class KeyPublicationStatus(models.Model):
# class Meta:
# class Report(models.Model):
# class Meta:
# class KeyForm(forms.ModelForm):
# class Meta:
# class ResendForm(forms.Form):
. Output only the next line. | endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'} |
Predict the next line after this snippet: <|code_start|>
class Command(NoArgsCommand):
help = 'push keys that are marked as dirty to the hub'
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
actions = {UNPUBLISHED: 0, NEEDS_UPDATE: 0}
failures = []
# get all non-published keys belonging to APIs with push_enabled
<|code_end|>
using the current file's imports:
import datetime
import urllib2
from urlparse import urljoin
from django.core.management.base import NoArgsCommand
from django.core.mail import mail_managers
from django.conf import settings
from locksmith.common import apicall
from locksmith.hub.models import KeyPublicationStatus, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
and any relevant context from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/hub/models.py
# def resolve_model(model, fields):
# def __unicode__(self):
# def __unicode__(self):
# def mark_for_update(self):
# def __unicode__(self):
# def kps_callback(sender, instance, created, raw, **kwargs):
# def clean_email(self):
# def clean(self):
# class Api(models.Model):
# class Meta:
# class Key(models.Model):
# class Meta:
# class KeyPublicationStatus(models.Model):
# class Meta:
# class Report(models.Model):
# class Meta:
# class KeyForm(forms.ModelForm):
# class Meta:
# class ResendForm(forms.Form):
. Output only the next line. | dirty = KeyPublicationStatus.objects.exclude(status=PUBLISHED).filter( |
Given snippet: <|code_start|>
class Command(NoArgsCommand):
help = 'push keys that are marked as dirty to the hub'
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import datetime
import urllib2
from urlparse import urljoin
from django.core.management.base import NoArgsCommand
from django.core.mail import mail_managers
from django.conf import settings
from locksmith.common import apicall
from locksmith.hub.models import KeyPublicationStatus, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
and context:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/hub/models.py
# def resolve_model(model, fields):
# def __unicode__(self):
# def __unicode__(self):
# def mark_for_update(self):
# def __unicode__(self):
# def kps_callback(sender, instance, created, raw, **kwargs):
# def clean_email(self):
# def clean(self):
# class Api(models.Model):
# class Meta:
# class Key(models.Model):
# class Meta:
# class KeyPublicationStatus(models.Model):
# class Meta:
# class Report(models.Model):
# class Meta:
# class KeyForm(forms.ModelForm):
# class Meta:
# class ResendForm(forms.Form):
which might include code, classes, or functions. Output only the next line. | endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'} |
Using the snippet: <|code_start|>
class LocksmithBackend:
"""
Authenticate with email address/key.
* Try to log in with email+password
* If no user exists try and create one with key as password
"""
def authenticate(self, username=None, password=None):
try:
# if user exists with this email try and log in
user = User.objects.get(email=username)
if user.check_password(password):
return user
except User.DoesNotExist:
# if no user exists try and create one based on the key
try:
<|code_end|>
, determine the next line of code. You have imports:
from django.contrib.auth.models import User
from locksmith.hub.models import Key
and context (class names, function names, or code) available:
# Path: locksmith/hub/models.py
# class Key(models.Model):
# '''
# API key to be handed out to Apis
# '''
# user = models.OneToOneField(User, null=True, blank=True, related_name='api_key')
#
# key = models.CharField(max_length=32, db_index=True)
# email = models.EmailField(unique=True)
# alternate_email = models.EmailField(blank=True, null=True) #
# status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
#
# name = models.CharField('Name', max_length=100, blank=True, null=True,
# db_index=True)
# org_name = models.CharField('Organization Name', max_length=100,
# blank=True, null=True, db_index=True)
# org_url = models.CharField('Organization URL', blank=True, null=True,
# max_length=200, db_index=True)
# usage = models.TextField('Intended Usage', blank=True, null=True)
#
# promotable = models.BooleanField(default=False)
# issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
#
# def __unicode__(self):
# return '%s %s [%s]' % (self.key, self.email, self.status)
#
# def mark_for_update(self):
# '''
# Note that a change has been made so all Statuses need update
# '''
# self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
# push_key.delay(self)
#
# class Meta:
# db_table = 'locksmith_hub_key'
. Output only the next line. | key = Key.objects.get(email=username) |
Given snippet: <|code_start|> '''
name = models.CharField(max_length=30)
signing_key = models.CharField(max_length=32)
url = models.URLField()
push_enabled = models.BooleanField(default=True)
description = models.TextField('Description', blank=True)
status = models.IntegerField(choices=API_OPERATING_STATUSES, default=1)
mode = models.IntegerField(choices=list(API_STATUSES), default=1)
status_message = models.TextField('A more detailed status message', null=True, blank=True)
display_name = models.TextField('Display name of the API', blank=False, null=True)
documentation_link = models.TextField('Link to this API\'s documentation', null=True, blank=True)
tools_text = models.TextField('Tools this API powers', null=True, blank=True)
tags = TaggableManager(blank=True)
querybuilder_link = models.TextField('Link to this API\'s query builder page', null=True, blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'locksmith_hub_api'
class Key(models.Model):
'''
API key to be handed out to Apis
'''
user = models.OneToOneField(User, null=True, blank=True, related_name='api_key')
key = models.CharField(max_length=32, db_index=True)
email = models.EmailField(unique=True)
alternate_email = models.EmailField(blank=True, null=True) #
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager
and context:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
which might include code, classes, or functions. Output only the next line. | status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U') |
Next line prediction: <|code_start|> name = models.CharField('Name', max_length=100, blank=True, null=True,
db_index=True)
org_name = models.CharField('Organization Name', max_length=100,
blank=True, null=True, db_index=True)
org_url = models.CharField('Organization URL', blank=True, null=True,
max_length=200, db_index=True)
usage = models.TextField('Intended Usage', blank=True, null=True)
promotable = models.BooleanField(default=False)
issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
def __unicode__(self):
return '%s %s [%s]' % (self.key, self.email, self.status)
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
push_key.delay(self)
class Meta:
db_table = 'locksmith_hub_key'
class KeyPublicationStatus(models.Model):
'''
Status of Key with respect to an API
'''
key = models.ForeignKey(Key, related_name='pub_statuses')
api = models.ForeignKey(Api, related_name='pub_statuses')
<|code_end|>
. Use current file imports:
(import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager)
and context including class names, function names, or small code snippets from other files:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
. Output only the next line. | status = models.IntegerField(default=UNPUBLISHED, choices=PUB_STATUSES) |
Predict the next line after this snippet: <|code_start|>
class Key(models.Model):
'''
API key to be handed out to Apis
'''
user = models.OneToOneField(User, null=True, blank=True, related_name='api_key')
key = models.CharField(max_length=32, db_index=True)
email = models.EmailField(unique=True)
alternate_email = models.EmailField(blank=True, null=True) #
status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
name = models.CharField('Name', max_length=100, blank=True, null=True,
db_index=True)
org_name = models.CharField('Organization Name', max_length=100,
blank=True, null=True, db_index=True)
org_url = models.CharField('Organization URL', blank=True, null=True,
max_length=200, db_index=True)
usage = models.TextField('Intended Usage', blank=True, null=True)
promotable = models.BooleanField(default=False)
issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
def __unicode__(self):
return '%s %s [%s]' % (self.key, self.email, self.status)
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
<|code_end|>
using the current file's imports:
import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager
and any relevant context from other files:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
. Output only the next line. | self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE) |
Using the snippet: <|code_start|>
class Key(models.Model):
'''
API key to be handed out to Apis
'''
user = models.OneToOneField(User, null=True, blank=True, related_name='api_key')
key = models.CharField(max_length=32, db_index=True)
email = models.EmailField(unique=True)
alternate_email = models.EmailField(blank=True, null=True) #
status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
name = models.CharField('Name', max_length=100, blank=True, null=True,
db_index=True)
org_name = models.CharField('Organization Name', max_length=100,
blank=True, null=True, db_index=True)
org_url = models.CharField('Organization URL', blank=True, null=True,
max_length=200, db_index=True)
usage = models.TextField('Intended Usage', blank=True, null=True)
promotable = models.BooleanField(default=False)
issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
def __unicode__(self):
return '%s %s [%s]' % (self.key, self.email, self.status)
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
<|code_end|>
, determine the next line of code. You have imports:
import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager
and context (class names, function names, or code) available:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
. Output only the next line. | self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE) |
Using the snippet: <|code_start|>#from django.forms import Form, ModelForm, ValidationError, BooleanField, EmailField
def resolve_model(model, fields):
"""
model: Model class
fields: List of 2-tuples of the form (field, value) in order of descending priority
"""
for (f, v) in fields:
if v is not None:
try:
kwargs = {f: v}
obj = model.objects.get(**kwargs)
return obj
except model.DoesNotExist:
pass
except model.MultipleObjectsReturned:
pass
raise model.DoesNotExist()
class Api(models.Model):
'''
API that Keys are issued to and Reports come from
'''
name = models.CharField(max_length=30)
signing_key = models.CharField(max_length=32)
url = models.URLField()
push_enabled = models.BooleanField(default=True)
description = models.TextField('Description', blank=True)
<|code_end|>
, determine the next line of code. You have imports:
import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager
and context (class names, function names, or code) available:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
. Output only the next line. | status = models.IntegerField(choices=API_OPERATING_STATUSES, default=1) |
Given the code snippet: <|code_start|>#from django.forms import Form, ModelForm, ValidationError, BooleanField, EmailField
def resolve_model(model, fields):
"""
model: Model class
fields: List of 2-tuples of the form (field, value) in order of descending priority
"""
for (f, v) in fields:
if v is not None:
try:
kwargs = {f: v}
obj = model.objects.get(**kwargs)
return obj
except model.DoesNotExist:
pass
except model.MultipleObjectsReturned:
pass
raise model.DoesNotExist()
class Api(models.Model):
'''
API that Keys are issued to and Reports come from
'''
name = models.CharField(max_length=30)
signing_key = models.CharField(max_length=32)
url = models.URLField()
push_enabled = models.BooleanField(default=True)
description = models.TextField('Description', blank=True)
status = models.IntegerField(choices=API_OPERATING_STATUSES, default=1)
<|code_end|>
, generate the next line using the imports in this file:
import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager
and context (functions, classes, or occasionally code) from other files:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
. Output only the next line. | mode = models.IntegerField(choices=list(API_STATUSES), default=1) |
Given snippet: <|code_start|>class Key(models.Model):
'''
API key to be handed out to Apis
'''
user = models.OneToOneField(User, null=True, blank=True, related_name='api_key')
key = models.CharField(max_length=32, db_index=True)
email = models.EmailField(unique=True)
alternate_email = models.EmailField(blank=True, null=True) #
status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
name = models.CharField('Name', max_length=100, blank=True, null=True,
db_index=True)
org_name = models.CharField('Organization Name', max_length=100,
blank=True, null=True, db_index=True)
org_url = models.CharField('Organization URL', blank=True, null=True,
max_length=200, db_index=True)
usage = models.TextField('Intended Usage', blank=True, null=True)
promotable = models.BooleanField(default=False)
issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
def __unicode__(self):
return '%s %s [%s]' % (self.key, self.email, self.status)
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import datetime
from django.db import models
from django.db.models.signals import post_save
from django import forms
from django.contrib.auth.models import User
from locksmith.common import (KEY_STATUSES,
PUB_STATUSES,
UNPUBLISHED,
NEEDS_UPDATE,
API_OPERATING_STATUSES,
API_STATUSES)
from locksmith.hub.tasks import push_key
from taggit.managers import TaggableManager
and context:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
#
# Path: locksmith/hub/tasks.py
# @task(max_retries=5)
# def push_key(key, replicate_too=True):
# if replicate_too:
# for kps in key.pub_statuses.filter(api__push_enabled=True):
# if kps.api.name in ReplicatedApiNames:
# replicate_key.delay(key, kps.api)
#
# endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
# dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
# api__push_enabled=True).select_related()
# if not dirty:
# print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
#
# # Retrying immediately on failure would allow a broken or unresponsive
# # api to prevent other, properly functioning apis from receiving the key.
# # Thus we use retry_flag to delay the task retry until after attempting
# # to push to all apis.
# retry_flag = False
# for kps in dirty:
# if kps.api.name in ReplicatedApiNames:
# # Skip this API because we've queued a replicate_key task above
# print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
# continue
#
# endpoint = urljoin(kps.api.url, endpoints[kps.status])
# try:
# apicall(endpoint, kps.api.signing_key, api=kps.api.name,
# key=kps.key.key, email=kps.key.email, status=kps.key.status)
# print 'sent key {k} to {a}'.format(k=key.key, a=kps.api.name)
# kps.status = PUBLISHED
# kps.save()
#
# except Exception as e:
# ctx = {
# 'a': str(kps.api.name),
# 'k': str(key.key),
# 'e': str(e.read()) if isinstance(e, urllib2.HTTPError) else str(e)
# }
# print 'Caught exception while pushing key {k} to {a}: {e}'.format(**ctx)
# print 'Will retry'
# retry_flag = True
#
# if retry_flag:
# push_key.retry()
which might include code, classes, or functions. Output only the next line. | push_key.delay(self) |
Predict the next line for this snippet: <|code_start|>
LOG_PATH = getattr(settings, 'LOCKSMITH_LOG_PATH', '/var/log/nginx/access.log')
LOG_REGEX = getattr(
settings,
'LOCKSMITH_LOG_REGEX',
r'.*\[(?P<date>\d{2}/\w{3}/\d{4}):\d{2}:\d{2}:\d{2} \+\d{4}\]\s*"(GET|POST) (?P<endpoint>[/\w\.]*)\?[^"]*apikey=(?P<apikey>[\w\-]*)[^"]*" (?P<status>\d{3}).*'
)
LOG_DATE_FORMAT = getattr(settings, 'LOCKSMITH_DATE_FORMAT', '%d/%b/%Y')
LOG_CUSTOM_TRANSFORM = getattr(settings, 'LOCKSMITH_LOG_CUSTOM_TRANSFORM', None)
class Command(BaseCommand):
help = "Push a given day's logs to the analytics hub by parsing webserver logs."
args = '[date:YYYY-MM-DD]'
requires_model_validation = False
def handle(self, date=None, *args, **options):
if date:
parsed_date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
else:
parsed_date = (datetime.datetime.now() - datetime.timedelta(days=1)).date()
print 'pushing logs for %s' % parsed_date.strftime('%Y-%m-%d')
<|code_end|>
with the help of current file imports:
from django.core.management.base import BaseCommand, CommandError
from locksmith.logparse.report import submit_report
from django.conf import settings
from urlparse import urljoin
import datetime
and context from other files:
# Path: locksmith/logparse/report.py
# def submit_report(log_path, log_regex, log_date_format, log_date, log_custom_transform, locksmith_api_name, locksmith_signing_key, locksmith_endpoint):
# log_re = re.compile(log_regex)
#
# log_directory = os.path.dirname(log_path)
# log_file_re = re.compile(re.escape(os.path.basename(log_path)).replace(r'\*', '.*'))
#
# # only include the ones that match our wildcard pattern
# unsorted_log_files = [file for file in os.listdir(log_directory) if log_file_re.match(file)]
#
# # do some voodoo to make sure they're in the right order, since the numbers may be lexicographically sorted in an odd way
# number_re = re.compile(r'\d+')
# log_files = sorted(unsorted_log_files, key=lambda f: int(number_re.findall(f)[0]) if number_re.search(f) else -1)
#
# totals = {}
#
# # loop over the files
# last_loop = False
# for log_file in log_files:
# if log_file.endswith('.gz'):
# file = gzip.open(os.path.join(log_directory, log_file), 'rb')
# else:
# file = open(os.path.join(log_directory, log_file), 'r')
#
# # loop over the rows
# for row in file:
# match = log_re.match(row)
# if match:
# record = match.groupdict()
# day = datetime.datetime.strptime(record['date'], log_date_format).date()
# if day == log_date and record['status'] == '200' and record['apikey'] and record['apikey'] != '-':
# # normalize the endpoint
# endpoint = log_custom_transform(record['endpoint']) if log_custom_transform else record['endpoint']
#
# # add it to the tally
# if record['apikey'] not in totals:
# totals[record['apikey']] = {}
#
# if endpoint not in totals[record['apikey']]:
# totals[record['apikey']][endpoint] = 1
# else:
# totals[record['apikey']][endpoint] += 1
# elif day < log_date:
# # this is the last log we need to parse
# last_loop = True
# if last_loop:
# break
#
# # submit totals to hub
# submit_date = log_date.strftime('%Y-%m-%d')
# total_submitted = 0
# for api_key in totals:
# for endpoint in totals[api_key]:
# apicall(
# locksmith_endpoint,
# locksmith_signing_key,
# api = locksmith_api_name,
# date = submit_date,
# endpoint = endpoint,
# key = api_key,
# calls = totals[api_key][endpoint]
# )
# total_submitted += totals[api_key][endpoint]
# return total_submitted
, which may contain function names, class names, or code. Output only the next line. | total_submitted = submit_report( |
Next line prediction: <|code_start|>
ReplicatedApiNames = getattr(settings, 'LOCKSMITH_REPLICATED_APIS', [])
@task(max_retries=5)
def push_key(key, replicate_too=True):
if replicate_too:
for kps in key.pub_statuses.filter(api__push_enabled=True):
if kps.api.name in ReplicatedApiNames:
replicate_key.delay(key, kps.api)
endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
dirty = key.pub_statuses.exclude(status=PUBLISHED).filter(
api__push_enabled=True).select_related()
if not dirty:
print u"Skipping push_key for {k} because all KeyPublicationStatus objects are PUBLISHED.".format(k=key.key)
# Retrying immediately on failure would allow a broken or unresponsive
# api to prevent other, properly functioning apis from receiving the key.
# Thus we use retry_flag to delay the task retry until after attempting
# to push to all apis.
retry_flag = False
for kps in dirty:
if kps.api.name in ReplicatedApiNames:
# Skip this API because we've queued a replicate_key task above
print u"push_key for {k} ignoring {a} because it uses replicate_key.".format(k=key.key, a=kps.api.name)
continue
endpoint = urljoin(kps.api.url, endpoints[kps.status])
try:
<|code_end|>
. Use current file imports:
(import urllib2
from urlparse import urljoin
from locksmith.common import apicall, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
from celery.task import task
from django.conf import settings)
and context including class names, function names, or small code snippets from other files:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
. Output only the next line. | apicall(endpoint, kps.api.signing_key, api=kps.api.name, |
Here is a snippet: <|code_start|>
ReplicatedApiNames = getattr(settings, 'LOCKSMITH_REPLICATED_APIS', [])
@task(max_retries=5)
def push_key(key, replicate_too=True):
if replicate_too:
for kps in key.pub_statuses.filter(api__push_enabled=True):
if kps.api.name in ReplicatedApiNames:
replicate_key.delay(key, kps.api)
<|code_end|>
. Write the next line using the current file imports:
import urllib2
from urlparse import urljoin
from locksmith.common import apicall, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
from celery.task import task
from django.conf import settings
and context from other files:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
, which may include functions, classes, or code. Output only the next line. | endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'} |
Using the snippet: <|code_start|>
ReplicatedApiNames = getattr(settings, 'LOCKSMITH_REPLICATED_APIS', [])
@task(max_retries=5)
def push_key(key, replicate_too=True):
if replicate_too:
for kps in key.pub_statuses.filter(api__push_enabled=True):
if kps.api.name in ReplicatedApiNames:
replicate_key.delay(key, kps.api)
endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'}
<|code_end|>
, determine the next line of code. You have imports:
import urllib2
from urlparse import urljoin
from locksmith.common import apicall, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
from celery.task import task
from django.conf import settings
and context (class names, function names, or code) available:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
. Output only the next line. | dirty = key.pub_statuses.exclude(status=PUBLISHED).filter( |
Here is a snippet: <|code_start|>
ReplicatedApiNames = getattr(settings, 'LOCKSMITH_REPLICATED_APIS', [])
@task(max_retries=5)
def push_key(key, replicate_too=True):
if replicate_too:
for kps in key.pub_statuses.filter(api__push_enabled=True):
if kps.api.name in ReplicatedApiNames:
replicate_key.delay(key, kps.api)
<|code_end|>
. Write the next line using the current file imports:
import urllib2
from urlparse import urljoin
from locksmith.common import apicall, UNPUBLISHED, PUBLISHED, NEEDS_UPDATE
from celery.task import task
from django.conf import settings
and context from other files:
# Path: locksmith/common.py
# def enum(name, **enums):
# def __iter__(self):
# def get_signature(params, signkey):
# def apicall(url, signkey, **params):
# def cache(seconds = 900):
# def doCache(f):
# def x(*args, **kwargs):
# E = type(name or 'Enum', (), enums)
# API_OPERATING_STATUSES = (
# (1, 'Normal'),
# (2, 'Degraded Service'),
# (3, 'Service Disruption'),
# (4, 'Undergoing Maintenance')
# )
# API_STATUSES = enum('ApiStatuses',
# Stealth=0,
# Active=1,
# Deprecated=2,
# Disabled=3)
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
# UNPUBLISHED, PUBLISHED, NEEDS_UPDATE = range(3)
# PUB_STATUSES = (
# (UNPUBLISHED, 'Unpublished'),
# (PUBLISHED, 'Published'),
# (NEEDS_UPDATE, 'Needs Update'),
# )
, which may include functions, classes, or code. Output only the next line. | endpoints = {UNPUBLISHED: 'create_key/', NEEDS_UPDATE: 'update_key/'} |
Based on the snippet: <|code_start|>
class Command(BaseCommand):
help = "Push a given day's logs up to the analytics hub"
args = '[date:YYYY-MM-DD]'
requires_model_validation = False
def handle(self, date=None, *args, **options):
if not date:
# set date to yesterday if not passed in
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime('%Y-%m-%d')
print 'pushing logs for %s' % date
dt_begin = datetime.datetime.strptime(date, '%Y-%m-%d')
dt_end = dt_begin + datetime.timedelta(days=1)
# construct database query
results = db.logs.group(['key', 'method'],
{"timestamp": {"$gte": dt_begin, "$lt": dt_end}},
{"count": 0},
"function (obj, prev) {prev.count += 1;}")
endpoint = urljoin(settings.LOCKSMITH_HUB_URL, 'report_calls/')
# report results
for item in results:
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
from urlparse import urljoin
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from locksmith.common import apicall
from locksmith.mongoauth.db import db
and context (classes, functions, sometimes code) from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/mongoauth/db.py
# MONGO_HOST = getattr(settings, 'LOCKSMITH_MONGO_HOST', 'localhost')
# MONGO_PORT = getattr(settings, 'LOCKSMITH_MONGO_PORT', 27017)
# MONGO_DATABASE = getattr(settings, 'LOCKSMITH_MONGO_DATABASE', 'locksmith')
. Output only the next line. | apicall(endpoint, settings.LOCKSMITH_SIGNING_KEY, |
Given snippet: <|code_start|>
class Command(BaseCommand):
help = "Push a given day's logs up to the analytics hub"
args = '[date:YYYY-MM-DD]'
requires_model_validation = False
def handle(self, date=None, *args, **options):
if not date:
# set date to yesterday if not passed in
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime('%Y-%m-%d')
print 'pushing logs for %s' % date
dt_begin = datetime.datetime.strptime(date, '%Y-%m-%d')
dt_end = dt_begin + datetime.timedelta(days=1)
# construct database query
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import datetime
from urlparse import urljoin
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from locksmith.common import apicall
from locksmith.mongoauth.db import db
and context:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
#
# Path: locksmith/mongoauth/db.py
# MONGO_HOST = getattr(settings, 'LOCKSMITH_MONGO_HOST', 'localhost')
# MONGO_PORT = getattr(settings, 'LOCKSMITH_MONGO_PORT', 27017)
# MONGO_DATABASE = getattr(settings, 'LOCKSMITH_MONGO_DATABASE', 'locksmith')
which might include code, classes, or functions. Output only the next line. | results = db.logs.group(['key', 'method'], |
Next line prediction: <|code_start|>
QS_PARAM = getattr(settings, 'LOCKSMITH_QS_PARAM', 'apikey')
HTTP_HEADER = getattr(settings, 'LOCKSMITH_HTTP_HEADER', 'HTTP_X_APIKEY')
class APIKeyMiddleware(object):
def process_request(self, request):
key = request.GET.get(QS_PARAM, None) or request.META.get(HTTP_HEADER, None)
if key is not None:
try:
<|code_end|>
. Use current file imports:
(from django.conf import settings
from locksmith.auth.models import ApiKey
from django.http import HttpResponse)
and context including class names, function names, or small code snippets from other files:
# Path: locksmith/auth/models.py
# class ApiKey(models.Model):
# key = models.CharField(max_length=32, primary_key=True)
# email = models.EmailField('Email Address')
# status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
#
# issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
#
# def active(self):
# return status == 'A'
#
# def __unicode__(self):
# return '%s (%s) [%s]' % (self.email, self.key, self.get_status_display())
#
# class Meta:
# db_table = 'locksmith_auth_apikey'
. Output only the next line. | request.apikey = ApiKey.objects.get(key=key) |
Using the snippet: <|code_start|>MODEL = getattr(settings, 'LOCKSMITH_STATS_MODEL', 'LogEntry')
DATE_FIELD = getattr(settings, 'LOCKSMITH_STATS_DATE_FIELD', 'timestamp')
ENDPOINT_FIELD = getattr(settings, 'LOCKSMITH_STATS_ENDPOINT_FIELD', 'method')
USER_FIELD = getattr(settings, 'LOCKSMITH_STATS_USER_FIELD', 'caller_key')
LogModel = get_model(APP, MODEL)
class Command(BaseCommand):
help = "Push a given day's logs up to the analytics hub"
args = '[date:YYYY-MM-DD]'
requires_model_validation = False
def handle(self, date=None, *args, **options):
if date:
# ensure that date entered can be parsed
entered_date = datetime.datetime.strptime(date, '%Y-%m-%d')
else:
# set date to yesterday if not passed in
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime('%Y-%m-%d')
print 'pushing logs for %s' % date
# construct database query
qs = LogModel.objects.extra(where=["date_trunc('day', {0}) = '{1}'".format(DATE_FIELD, date)]).order_by()
results = qs.values(ENDPOINT_FIELD, USER_FIELD).annotate(calls=Count('id'))
endpoint = urljoin(settings.LOCKSMITH_HUB_URL, 'report_calls/')
# report results
for item in results:
<|code_end|>
, determine the next line of code. You have imports:
import datetime
from urlparse import urljoin
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db.models import get_model, Count
from locksmith.common import apicall
and context (class names, function names, or code) available:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
. Output only the next line. | apicall(endpoint, settings.LOCKSMITH_SIGNING_KEY, |
Given snippet: <|code_start|>
def verify_signature(post):
return get_signature(post, settings.LOCKSMITH_SIGNING_KEY) == post['signature']
@require_POST
@csrf_exempt
def create_key(request):
if not verify_signature(request.POST):
return HttpResponseBadRequest('bad signature')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from uuid import UUID
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from locksmith.common import get_signature
from locksmith.mongoauth.db import db
and context:
# Path: locksmith/common.py
# def get_signature(params, signkey):
# # sorted k,v pairs of everything but signature
# data = sorted([(k,unicode(v).encode('utf-8'))
# for k,v in params.iteritems()
# if k != 'signature'])
# qs = urllib.urlencode(data)
# return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
#
# Path: locksmith/mongoauth/db.py
# MONGO_HOST = getattr(settings, 'LOCKSMITH_MONGO_HOST', 'localhost')
# MONGO_PORT = getattr(settings, 'LOCKSMITH_MONGO_PORT', 27017)
# MONGO_DATABASE = getattr(settings, 'LOCKSMITH_MONGO_DATABASE', 'locksmith')
which might include code, classes, or functions. Output only the next line. | db.keys.insert({'_id':request.POST['key'], |
Continue the code snippet: <|code_start|>
class ApiKey(models.Model):
key = models.CharField(max_length=32, primary_key=True)
email = models.EmailField('Email Address')
<|code_end|>
. Use current file imports:
import datetime
from django.db import models
from locksmith.common import KEY_STATUSES
and context (classes, functions, or code) from other files:
# Path: locksmith/common.py
# KEY_STATUSES = (
# ('U', 'Unactivated'),
# ('A', 'Active'),
# ('S', 'Suspended')
# )
. Output only the next line. | status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U') |
Predict the next line for this snippet: <|code_start|>
class ApiAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'push_enabled')
admin.site.register(Api, ApiAdmin)
class KeyAdmin(admin.ModelAdmin):
list_display = ('key', 'status', 'email', 'name', 'org_name', 'org_url')
search_fields = ('key', 'email', 'name', 'org_name')
def save_model(self, request, obj, form, change):
obj.save()
obj.mark_for_update()
<|code_end|>
with the help of current file imports:
from django.contrib import admin
from locksmith.hub.models import Api, Key
and context from other files:
# Path: locksmith/hub/models.py
# class Api(models.Model):
# '''
# API that Keys are issued to and Reports come from
# '''
# name = models.CharField(max_length=30)
# signing_key = models.CharField(max_length=32)
# url = models.URLField()
# push_enabled = models.BooleanField(default=True)
# description = models.TextField('Description', blank=True)
# status = models.IntegerField(choices=API_OPERATING_STATUSES, default=1)
# mode = models.IntegerField(choices=list(API_STATUSES), default=1)
# status_message = models.TextField('A more detailed status message', null=True, blank=True)
# display_name = models.TextField('Display name of the API', blank=False, null=True)
# documentation_link = models.TextField('Link to this API\'s documentation', null=True, blank=True)
# tools_text = models.TextField('Tools this API powers', null=True, blank=True)
# tags = TaggableManager(blank=True)
# querybuilder_link = models.TextField('Link to this API\'s query builder page', null=True, blank=True)
#
# def __unicode__(self):
# return self.name
#
# class Meta:
# db_table = 'locksmith_hub_api'
#
# class Key(models.Model):
# '''
# API key to be handed out to Apis
# '''
# user = models.OneToOneField(User, null=True, blank=True, related_name='api_key')
#
# key = models.CharField(max_length=32, db_index=True)
# email = models.EmailField(unique=True)
# alternate_email = models.EmailField(blank=True, null=True) #
# status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
#
# name = models.CharField('Name', max_length=100, blank=True, null=True,
# db_index=True)
# org_name = models.CharField('Organization Name', max_length=100,
# blank=True, null=True, db_index=True)
# org_url = models.CharField('Organization URL', blank=True, null=True,
# max_length=200, db_index=True)
# usage = models.TextField('Intended Usage', blank=True, null=True)
#
# promotable = models.BooleanField(default=False)
# issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
#
# def __unicode__(self):
# return '%s %s [%s]' % (self.key, self.email, self.status)
#
# def mark_for_update(self):
# '''
# Note that a change has been made so all Statuses need update
# '''
# self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
# push_key.delay(self)
#
# class Meta:
# db_table = 'locksmith_hub_key'
, which may contain function names, class names, or code. Output only the next line. | admin.site.register(Key, KeyAdmin) |
Next line prediction: <|code_start|>
try:
SIGNING_KEY = settings.LOCKSMITH_SIGNING_KEY
API_NAME = settings.LOCKSMITH_API_NAME
ENDPOINT = settings.LOCKSMITH_HUB_URL.replace('analytics', 'accounts') + 'checkkey/'
except:
SIGNING_KEY = ""
API_NAME = ""
ENDPOINT = ""
def check_key(key, signing_key=SIGNING_KEY, api=API_NAME, endpoint=ENDPOINT):
try:
<|code_end|>
. Use current file imports:
(from locksmith.common import apicall
from django.conf import settings
import urllib2)
and context including class names, function names, or small code snippets from other files:
# Path: locksmith/common.py
# def apicall(url, signkey, **params):
# params['signature'] = get_signature(params, signkey)
# data = sorted([(k,v) for k,v in params.iteritems()])
# body = urllib.urlencode(data)
# urllib2.urlopen(url, body)
. Output only the next line. | apicall(endpoint, signing_key, |
Continue the code snippet: <|code_start|>
def verify_signature(post):
return get_signature(post, settings.LOCKSMITH_SIGNING_KEY) == post['signature']
@csrf_exempt
@require_POST
def create_key(request):
if not verify_signature(request.POST):
return HttpResponseBadRequest('bad signature')
<|code_end|>
. Use current file imports:
from uuid import UUID
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from locksmith.common import get_signature
from locksmith.auth.models import ApiKey
and context (classes, functions, or code) from other files:
# Path: locksmith/common.py
# def get_signature(params, signkey):
# # sorted k,v pairs of everything but signature
# data = sorted([(k,unicode(v).encode('utf-8'))
# for k,v in params.iteritems()
# if k != 'signature'])
# qs = urllib.urlencode(data)
# return hmac.new(str(signkey), qs, hashlib.sha1).hexdigest()
#
# Path: locksmith/auth/models.py
# class ApiKey(models.Model):
# key = models.CharField(max_length=32, primary_key=True)
# email = models.EmailField('Email Address')
# status = models.CharField(max_length=1, choices=KEY_STATUSES, default='U')
#
# issued_on = models.DateTimeField(default=datetime.datetime.now, editable=False)
#
# def active(self):
# return status == 'A'
#
# def __unicode__(self):
# return '%s (%s) [%s]' % (self.email, self.key, self.get_status_display())
#
# class Meta:
# db_table = 'locksmith_auth_apikey'
. Output only the next line. | ApiKey.objects.create(key=request.POST['key'], |
Predict the next line for this snippet: <|code_start|>
QS_PARAM = getattr(settings, 'LOCKSMITH_QS_PARAM', 'apikey')
HTTP_HEADER = getattr(settings, 'LOCKSMITH_HTTP_HEADER', 'HTTP_X_APIKEY')
class APIKeyMiddleware(object):
def process_request(self, request):
key = request.GET.get(QS_PARAM, None) or request.META.get(HTTP_HEADER,
None)
if key is not None:
<|code_end|>
with the help of current file imports:
import datetime
from django.conf import settings
from locksmith.mongoauth.db import db
from django.http import HttpResponse
and context from other files:
# Path: locksmith/mongoauth/db.py
# MONGO_HOST = getattr(settings, 'LOCKSMITH_MONGO_HOST', 'localhost')
# MONGO_PORT = getattr(settings, 'LOCKSMITH_MONGO_PORT', 27017)
# MONGO_DATABASE = getattr(settings, 'LOCKSMITH_MONGO_DATABASE', 'locksmith')
, which may contain function names, class names, or code. Output only the next line. | apikey = db.keys.find_one({'_id':key}) |
Given the following code snippet before the placeholder: <|code_start|> """Rescale radius of gyration of structure to 1"""
rg = radius_of_gyration(self)
for i, point in enumerate(self.points):
if point != 0:
x, y, z = point.pos
self.points[i].pos = (x/rg, y/rg, z/rg)
class Point(object):
"""Point in 3-D space"""
def __init__(self, pos, chrom, absolute_index, relative_index):
self.pos = pos #3D coordinates
self.chrom = chrom #chromosome parameters
self.absolute_index = absolute_index #index relative to all points in structure (including null/zero points)
self.relative_index = relative_index #index relative to only non-zero points
def structureFromBed(path, size=None, chrom=None, start=None, end=None, offset=0):
"""Initializes structure from intrachromosomal BED file."""
if chrom is None:
chrom = chromFromBed(path)
if start is None:
start = chrom.minPos
if end is None:
end = chrom.maxPos
structure = Structure([], [], chrom, offset)
structure.points = np.zeros(int((end - start)/chrom.res) + 1, dtype=object) #true if locus should be added
if size is not None:
<|code_end|>
, predict the next line using imports from the current file:
import sys
import numpy as np
import array_tools as at
from tools import Tracker
from linear_algebra import *
from tad import *
from hic_oe import get_expected
and context including class names, function names, and sometimes code from other files:
# Path: tools.py
# class Tracker(object):
# """Tracks progress of task"""
# def __init__(self, name, size, currPercentage=0, count=0):
# self.name = name #name of task
# self.size = size #total size of task (e.g. number of lines in file)
# self.currPercentage = currPercentage #current percentage of task complete
# self.count = count #absolute amount of task complete (e.g. number of lines of file read)
# def increment(self):
# if self.size !=0 and self.size is not None:
# self.count += 1
# newPercentage = self.currPercentage + 1
# if float(self.count)/self.size >= float(newPercentage)/100: #if at least X% of the file has been read, print percentage
# self.currPercentage = newPercentage
# print("{} {}% complete".format(self.name, self.currPercentage))
#
# Path: hic_oe.py
# def get_expected(mat):
# n = len(mat)
#
# tots = np.zeros(n-1)
# counts = np.zeros(n-1)
#
# for i in range(n):
# for j in range(i):
# observed = mat[i,j]
# if observed != 0:
# s = i - j
# tots[s - 1] += observed
# counts[s - 1] += 1
#
# avgs = np.zeros(n-1)
# for i, count in enumerate(counts):
# if count != 0:
# avgs[i] = tots[i]/count
#
# return avgs
. Output only the next line. | tracker = Tracker("Identifying loci", size) |
Here is a snippet: <|code_start|> for structure in structures:
new_points = np.zeros(structure.chrom.getLength(), dtype=object)
for i, gen_coord in enumerate(consensus):
abs_index = structure.chrom.getAbsoluteIndex(gen_coord)
pos = structure.points[abs_index - structure.offset].pos
new_points[abs_index - structure.offset] = Point(pos, structure.chrom, abs_index, i)
structure.points = new_points
def transform(trueLow, highSubstructure, res_ratio):
#approximate as low resolution
inferredLow = highToLow(highSubstructure, res_ratio)
scaling_factor = radius_of_gyration(trueLow)/radius_of_gyration(inferredLow)
for i, point in enumerate(inferredLow.points):
if point != 0:
x, y, z = point.pos
inferredLow.points[i].pos = (x*scaling_factor, y*scaling_factor, z*scaling_factor)
#recover the transformation for inferred from true low structure
r, t = getTransformation(inferredLow, trueLow)
t /= scaling_factor
#transform high structure
highSubstructure.transform(r, t)
def distmat(path, structure, size=None, alpha=4, weight=0.05):
contactMat = matFromBed(path, size, structure)
assert len(structure.nonzero_abs_indices()) == len(contactMat)
<|code_end|>
. Write the next line using the current file imports:
import sys
import numpy as np
import array_tools as at
from tools import Tracker
from linear_algebra import *
from tad import *
from hic_oe import get_expected
and context from other files:
# Path: tools.py
# class Tracker(object):
# """Tracks progress of task"""
# def __init__(self, name, size, currPercentage=0, count=0):
# self.name = name #name of task
# self.size = size #total size of task (e.g. number of lines in file)
# self.currPercentage = currPercentage #current percentage of task complete
# self.count = count #absolute amount of task complete (e.g. number of lines of file read)
# def increment(self):
# if self.size !=0 and self.size is not None:
# self.count += 1
# newPercentage = self.currPercentage + 1
# if float(self.count)/self.size >= float(newPercentage)/100: #if at least X% of the file has been read, print percentage
# self.currPercentage = newPercentage
# print("{} {}% complete".format(self.name, self.currPercentage))
#
# Path: hic_oe.py
# def get_expected(mat):
# n = len(mat)
#
# tots = np.zeros(n-1)
# counts = np.zeros(n-1)
#
# for i in range(n):
# for j in range(i):
# observed = mat[i,j]
# if observed != 0:
# s = i - j
# tots[s - 1] += observed
# counts[s - 1] += 1
#
# avgs = np.zeros(n-1)
# for i, count in enumerate(counts):
# if count != 0:
# avgs[i] = tots[i]/count
#
# return avgs
, which may include functions, classes, or code. Output only the next line. | expected = get_expected(contactMat) |
Predict the next line for this snippet: <|code_start|>sys.path.append("..")
def matFromDixon(path, chrom):
"""Creates contact matrix from Dixon tsv data"""
numBins = chrom.getLength()
mat = np.zeros((numBins, numBins))
<|code_end|>
with the help of current file imports:
import sys
import data_tools as dt
import simple_tad as tad
import heatmap as hm
import numpy as np
from tools import Tracker
and context from other files:
# Path: tools.py
# class Tracker(object):
# """Tracks progress of task"""
# def __init__(self, name, size, currPercentage=0, count=0):
# self.name = name #name of task
# self.size = size #total size of task (e.g. number of lines in file)
# self.currPercentage = currPercentage #current percentage of task complete
# self.count = count #absolute amount of task complete (e.g. number of lines of file read)
# def increment(self):
# if self.size !=0 and self.size is not None:
# self.count += 1
# newPercentage = self.currPercentage + 1
# if float(self.count)/self.size >= float(newPercentage)/100: #if at least X% of the file has been read, print percentage
# self.currPercentage = newPercentage
# print("{} {}% complete".format(self.name, self.currPercentage))
, which may contain function names, class names, or code. Output only the next line. | tracker = Tracker("Reading " + path, chrom.size) |
Based on the snippet: <|code_start|> if bin1 > bin2:
row = bin1
col = bin2
else:
row = bin1
col = bin2
mat[row, col] += 1
tracker.increment()
infile.close()
return mat
def plotLevels(mat):
smoothingFactors = [1, 2, 3, 8, 33] #these smoothing factors were selected to demonstrate to best demonstrate TAD levels
domainsToInclude = [range(1, 15), [2,3,4,5], [7], [1,6], [3]] #selected domains from these smoothing factors to maximize prettiness
all_tads = []
for i in range(len(smoothingFactors)):
smoothingFactor = smoothingFactors[i]
indices = domainsToInclude[i]
tads = tad.getDomains(mat, smoothingFactor, 0)
for index in indices:
all_tads.append(tads[index])
hm.heatMapFromMat(mat, 100, all_tads, "Fig2") #all levels combined
minPos = 49000000 #from Dixon
maxPos = 54066692 #from Dixon
res = 40000 #from Dixon
name = "chr22"
size = 30949158
path = "mESC_chr6.tsv"
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import heatmap as hm
import simple_tad as tad
import numpy as np
from data_tools import ChromParameters
from tools import Tracker
and context (classes, functions, sometimes code) from other files:
# Path: data_tools.py
# class ChromParameters(object):
# """Basic information on chromosome, inferred from input file"""
# def __init__(self, minPos, maxPos, res, name):
# self.minPos = minPos #minimum genomic coordinate
# self.maxPos = maxPos #maximum genomic coordinate
# self.res = res #resolution (bp)
# self.name = name #e.g. "chr22"
#
# def getLength(self):
# """Number of possible loci"""
# return int((self.maxPos - self.minPos)/self.res) + 1
#
# def getAbsoluteIndex(self, genCoord):
# """Converts genomic coordinate into absolute index. Absolute indexing includes empty (zero) points."""
# if genCoord < self.minPos or genCoord > self.maxPos + self.res:
# return None
# else:
# return int((genCoord - self.minPos)/self.res)
#
# def getGenCoord(self, abs_index):
# """Converts absolute index into genomic coordinate"""
# return self.minPos + self.res * abs_index
#
# def reduceRes(self, resRatio):
# """Creates low-res version of this chromosome"""
# lowRes = self.res * resRatio
# lowMinPos = (self.minPos/lowRes)*lowRes #approximate at low resolution
# lowMaxPos = (self.maxPos/lowRes)*lowRes
# return ChromParameters(lowMinPos, lowMaxPos, lowRes, self.name)
#
# Path: tools.py
# class Tracker(object):
# """Tracks progress of task"""
# def __init__(self, name, size, currPercentage=0, count=0):
# self.name = name #name of task
# self.size = size #total size of task (e.g. number of lines in file)
# self.currPercentage = currPercentage #current percentage of task complete
# self.count = count #absolute amount of task complete (e.g. number of lines of file read)
# def increment(self):
# if self.size !=0 and self.size is not None:
# self.count += 1
# newPercentage = self.currPercentage + 1
# if float(self.count)/self.size >= float(newPercentage)/100: #if at least X% of the file has been read, print percentage
# self.currPercentage = newPercentage
# print("{} {}% complete".format(self.name, self.currPercentage))
. Output only the next line. | chrom = ChromParameters(minPos, maxPos, res, name, size) |
Next line prediction: <|code_start|>sys.path.append("..")
def matFromDixon(path, chrom):
"""Creates contact matrix from Dixon tsv data"""
numBins = chrom.getLength()
mat = np.zeros((numBins, numBins))
<|code_end|>
. Use current file imports:
(import sys
import heatmap as hm
import simple_tad as tad
import numpy as np
from data_tools import ChromParameters
from tools import Tracker)
and context including class names, function names, or small code snippets from other files:
# Path: data_tools.py
# class ChromParameters(object):
# """Basic information on chromosome, inferred from input file"""
# def __init__(self, minPos, maxPos, res, name):
# self.minPos = minPos #minimum genomic coordinate
# self.maxPos = maxPos #maximum genomic coordinate
# self.res = res #resolution (bp)
# self.name = name #e.g. "chr22"
#
# def getLength(self):
# """Number of possible loci"""
# return int((self.maxPos - self.minPos)/self.res) + 1
#
# def getAbsoluteIndex(self, genCoord):
# """Converts genomic coordinate into absolute index. Absolute indexing includes empty (zero) points."""
# if genCoord < self.minPos or genCoord > self.maxPos + self.res:
# return None
# else:
# return int((genCoord - self.minPos)/self.res)
#
# def getGenCoord(self, abs_index):
# """Converts absolute index into genomic coordinate"""
# return self.minPos + self.res * abs_index
#
# def reduceRes(self, resRatio):
# """Creates low-res version of this chromosome"""
# lowRes = self.res * resRatio
# lowMinPos = (self.minPos/lowRes)*lowRes #approximate at low resolution
# lowMaxPos = (self.maxPos/lowRes)*lowRes
# return ChromParameters(lowMinPos, lowMaxPos, lowRes, self.name)
#
# Path: tools.py
# class Tracker(object):
# """Tracks progress of task"""
# def __init__(self, name, size, currPercentage=0, count=0):
# self.name = name #name of task
# self.size = size #total size of task (e.g. number of lines in file)
# self.currPercentage = currPercentage #current percentage of task complete
# self.count = count #absolute amount of task complete (e.g. number of lines of file read)
# def increment(self):
# if self.size !=0 and self.size is not None:
# self.count += 1
# newPercentage = self.currPercentage + 1
# if float(self.count)/self.size >= float(newPercentage)/100: #if at least X% of the file has been read, print percentage
# self.currPercentage = newPercentage
# print("{} {}% complete".format(self.name, self.currPercentage))
. Output only the next line. | tracker = Tracker("Reading " + path, chrom.size) |
Predict the next line for this snippet: <|code_start|>from __future__ import print_function
batch_size = 128
nb_epoch = 100
nb_classes = 19 * 19 # One class for each position on the board
go_board_rows, go_board_cols = 19, 19 # input dimensions of go board
nb_filters = 32 # number of convolutional filters to use
nb_pool = 2 # size of pooling area for max pooling
nb_conv = 3 # convolution kernel size
# SevenPlaneProcessor loads seven planes (doh!) of 19*19 data points, so we need 7 input channels
<|code_end|>
with the help of current file imports:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
from betago.processor import SevenPlaneProcessor
and context from other files:
# Path: betago/processor.py
# class SevenPlaneProcessor(GoDataProcessor):
# '''
# Implementation of a Go data processor, using seven planes of 19x19 values to represent the position of
# a go board, as explained below.
#
# This closely reflects the representation suggested in Clark, Storkey:
# http://arxiv.org/abs/1412.3409
# '''
#
# def __init__(self, data_directory='data', num_planes=7, consolidate=True, use_generator=False):
# super(SevenPlaneProcessor, self).__init__(data_directory=data_directory,
# num_planes=num_planes,
# consolidate=consolidate,
# use_generator=use_generator)
#
# def feature_and_label(self, color, move, go_board, num_planes):
# '''
# Parameters
# ----------
# color: color of the next person to move
# move: move they decided to make
# go_board: represents the state of the board before they moved
#
# Planes we write:
# 0: our stones with 1 liberty
# 1: our stones with 2 liberty
# 2: our stones with 3 or more liberties
# 3: their stones with 1 liberty
# 4: their stones with 2 liberty
# 5: their stones with 3 or more liberties
# 6: simple ko
# '''
# row, col = move
# enemy_color = go_board.other_color(color)
# label = row * 19 + col
# move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
# for row in range(0, go_board.board_size):
# for col in range(0, go_board.board_size):
# pos = (row, col)
# if go_board.board.get(pos) == color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[0, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[1, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[2, row, col] = 1
# if go_board.board.get(pos) == enemy_color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[3, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[4, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[5, row, col] = 1
# if go_board.is_simple_ko(color, pos):
# move_array[6, row, col] = 1
# return move_array, label
, which may contain function names, class names, or code. Output only the next line. | processor = SevenPlaneProcessor() |
Here is a snippet: <|code_start|>from __future__ import print_function
parser = argparse.ArgumentParser()
parser.add_argument('--bot-name', default='new_bot')
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--sample-size', type=int, default=1000)
args = parser.parse_args()
here = os.path.dirname(os.path.abspath(__file__))
model_zoo = os.path.join(here, '..', 'model_zoo')
weight_file = os.path.join(model_zoo, args.bot_name + '_weights.hd5')
checkpoint_file_pattern = os.path.join(model_zoo, args.bot_name + '_epoch_{epoch}.hd5')
model_file = os.path.join(model_zoo, args.bot_name + '_model.yml')
batch_size = 128
nb_classes = 19 * 19 # One class for each position on the board
go_board_rows, go_board_cols = 19, 19 # input dimensions of go board
nb_filters = 32 # number of convolutional filters to use
nb_pool = 2 # size of pooling area for max pooling
nb_conv = 3 # convolution kernel size
# SevenPlaneProcessor loads seven planes (doh!) of 19*19 data points, so we need 7 input channels
<|code_end|>
. Write the next line using the current file imports:
import argparse
import os
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from betago.processor import SevenPlaneProcessor
and context from other files:
# Path: betago/processor.py
# class SevenPlaneProcessor(GoDataProcessor):
# '''
# Implementation of a Go data processor, using seven planes of 19x19 values to represent the position of
# a go board, as explained below.
#
# This closely reflects the representation suggested in Clark, Storkey:
# http://arxiv.org/abs/1412.3409
# '''
#
# def __init__(self, data_directory='data', num_planes=7, consolidate=True, use_generator=False):
# super(SevenPlaneProcessor, self).__init__(data_directory=data_directory,
# num_planes=num_planes,
# consolidate=consolidate,
# use_generator=use_generator)
#
# def feature_and_label(self, color, move, go_board, num_planes):
# '''
# Parameters
# ----------
# color: color of the next person to move
# move: move they decided to make
# go_board: represents the state of the board before they moved
#
# Planes we write:
# 0: our stones with 1 liberty
# 1: our stones with 2 liberty
# 2: our stones with 3 or more liberties
# 3: their stones with 1 liberty
# 4: their stones with 2 liberty
# 5: their stones with 3 or more liberties
# 6: simple ko
# '''
# row, col = move
# enemy_color = go_board.other_color(color)
# label = row * 19 + col
# move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
# for row in range(0, go_board.board_size):
# for col in range(0, go_board.board_size):
# pos = (row, col)
# if go_board.board.get(pos) == color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[0, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[1, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[2, row, col] = 1
# if go_board.board.get(pos) == enemy_color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[3, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[4, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[5, row, col] = 1
# if go_board.is_simple_ko(color, pos):
# move_array[6, row, col] = 1
# return move_array, label
, which may include functions, classes, or code. Output only the next line. | processor = SevenPlaneProcessor(use_generator=True) |
Predict the next line after this snippet: <|code_start|>
class ModelTestCase(unittest.TestCase):
def test_all_empty_points(self):
board = goboard.from_string('''
.b.
bb.
.ww
''')
<|code_end|>
using the current file's imports:
import unittest
import six
from betago import model
from betago.dataloader import goboard
and any relevant context from other files:
# Path: betago/model.py
# class HTTPFrontend(object):
# class GoModel(object):
# class KerasBot(GoModel):
# class RandomizedKerasBot(GoModel):
# class IdiotBot(GoModel):
# def __init__(self, bot, graph, port=8080):
# def start_server(self):
# def stop_server(self):
# def run(self):
# def static_file_dist(path):
# def static_file_large(path):
# def home():
# def exportJSON():
# def next_move():
# def __init__(self, model, processor):
# def set_board(self, board):
# def apply_move(self, color, move):
# def select_move(self, bot_color):
# def __init__(self, model, processor, top_n=10):
# def apply_move(self, color, move):
# def select_move(self, bot_color):
# def _move_generator(self, bot_color):
# def _model_moves(self, bot_color):
# def __init__(self, model, processor):
# def apply_move(self, color, move):
# def select_move(self, bot_color):
# def _move_generator(self, bot_color):
# def _model_moves(self, bot_color):
# def __init__(self, model=None, processor=ThreePlaneProcessor()):
# def apply_move(self, color, move):
# def select_move(self, bot_color):
# def get_first_valid_move(board, color, move_generator):
# def generate_in_random_order(point_list):
# def all_empty_points(board):
# def fill_dame(board):
# X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
# X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
#
# Path: betago/dataloader/goboard.py
# class GoBoard(object):
# class BoardSequence(object):
# class GoString(object):
# def __init__(self, board_size=19):
# def fold_go_strings(self, target, source, join_position):
# def add_adjacent_liberty(self, pos, go_string):
# def is_move_on_board(self, move):
# def is_move_suicide(self, color, pos):
# def is_move_legal(self, color, pos):
# def create_go_string(self, color, pos):
# def other_color(self, color):
# def is_simple_ko(self, play_color, pos):
# def check_enemy_liberty(self, play_color, enemy_pos, our_pos):
# def apply_move(self, play_color, pos):
# def add_liberty_to_adjacent_string(self, string_pos, liberty_pos, color):
# def fold_our_moves(self, first_string, color, pos, join_position):
# def __str__(self):
# def __init__(self, board_size=19):
# def insert(self, combo):
# def erase(self, combo):
# def exists(self, combo):
# def size(self):
# def __getitem__(self, iid):
# def __str__(self):
# def __init__(self, board_size, color):
# def get_stone(self, index):
# def get_liberty(self, index):
# def insert_stone(self, combo):
# def get_num_stones(self):
# def remove_liberty(self, combo):
# def get_num_liberties(self):
# def insert_liberty(self, combo):
# def copy_liberties_from(self, source):
# def __str__(self):
# def from_string(board_string):
# def to_string(board):
. Output only the next line. | empty_points = model.all_empty_points(board) |
Given the following code snippet before the placeholder: <|code_start|>
class CommandTestCase(unittest.TestCase):
def test_parse(self):
command_string = 'play white D4'
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from betago.gtp import command
and context including class names, function names, and sometimes code from other files:
# Path: betago/gtp/command.py
# class Command(object):
# def __init__(self, sequence, name, args):
# def __eq__(self, other):
# def __repr__(self):
# def __str__(self):
# def parse(command_string):
. Output only the next line. | expected = command.Command( |
Next line prediction: <|code_start|>from __future__ import print_function
argparser = argparse.ArgumentParser()
argparser.add_argument('handicap', type=int, nargs=1)
argparser.add_argument('output_sgf', nargs='?', default='output.sgf')
args = argparser.parse_args()
<|code_end|>
. Use current file imports:
(import yaml
import subprocess
import re
import argparse
from keras.models import model_from_yaml
from betago.model import KerasBot
from betago.processor import SevenPlaneProcessor
from betago.gtp.board import gtp_position_to_coords, coords_to_gtp_position)
and context including class names, function names, or small code snippets from other files:
# Path: betago/model.py
# class KerasBot(GoModel):
# '''
# KerasBot takes top_n predictions of a keras model and tries to apply the best move. If that move is illegal,
# choose the next best, until the list is exhausted. If no more moves are left to play, continue with random
# moves until a legal move is found.
# '''
#
# def __init__(self, model, processor, top_n=10):
# super(KerasBot, self).__init__(model=model, processor=processor)
# self.top_n = top_n
#
# def apply_move(self, color, move):
# # Apply human move
# self.go_board.apply_move(color, move)
#
# def select_move(self, bot_color):
# move = get_first_valid_move(self.go_board, bot_color,
# self._move_generator(bot_color))
# if move is not None:
# self.go_board.apply_move(bot_color, move)
# return move
#
# def _move_generator(self, bot_color):
# return chain(
# # First try the model.
# self._model_moves(bot_color),
# # If none of the model moves are valid, fill in a random
# # dame point. This is probably not a very good move, but
# # it's better than randomly filling in our own eyes.
# fill_dame(self.go_board),
# # Lastly just try any open space.
# generate_in_random_order(all_empty_points(self.go_board)),
# )
#
# def _model_moves(self, bot_color):
# # Turn the board into a feature vector.
# # The (0, 0) is for generating the label, which we ignore.
# X, label = self.processor.feature_and_label(
# bot_color, (0, 0), self.go_board, self.num_planes)
# X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
#
# # Generate bot move.
# pred = np.squeeze(self.model.predict(X))
# top_n_pred_idx = pred.argsort()[-self.top_n:][::-1]
# for idx in top_n_pred_idx:
# prediction = int(idx)
# pred_row = prediction // 19
# pred_col = prediction % 19
# pred_move = (pred_row, pred_col)
# yield pred_move
#
# Path: betago/processor.py
# class SevenPlaneProcessor(GoDataProcessor):
# '''
# Implementation of a Go data processor, using seven planes of 19x19 values to represent the position of
# a go board, as explained below.
#
# This closely reflects the representation suggested in Clark, Storkey:
# http://arxiv.org/abs/1412.3409
# '''
#
# def __init__(self, data_directory='data', num_planes=7, consolidate=True, use_generator=False):
# super(SevenPlaneProcessor, self).__init__(data_directory=data_directory,
# num_planes=num_planes,
# consolidate=consolidate,
# use_generator=use_generator)
#
# def feature_and_label(self, color, move, go_board, num_planes):
# '''
# Parameters
# ----------
# color: color of the next person to move
# move: move they decided to make
# go_board: represents the state of the board before they moved
#
# Planes we write:
# 0: our stones with 1 liberty
# 1: our stones with 2 liberty
# 2: our stones with 3 or more liberties
# 3: their stones with 1 liberty
# 4: their stones with 2 liberty
# 5: their stones with 3 or more liberties
# 6: simple ko
# '''
# row, col = move
# enemy_color = go_board.other_color(color)
# label = row * 19 + col
# move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
# for row in range(0, go_board.board_size):
# for col in range(0, go_board.board_size):
# pos = (row, col)
# if go_board.board.get(pos) == color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[0, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[1, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[2, row, col] = 1
# if go_board.board.get(pos) == enemy_color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[3, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[4, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[5, row, col] = 1
# if go_board.is_simple_ko(color, pos):
# move_array[6, row, col] = 1
# return move_array, label
#
# Path: betago/gtp/board.py
# def gtp_position_to_coords(gtp_position):
# """Convert a GTP board location to a (row, col) tuple.
#
# Example:
# >>> gtp_position_to_coords('A1')
# (0, 0)
# """
# col_str, row_str = gtp_position[0], gtp_position[1:]
# return (int(row_str) - 1, COLS.find(col_str))
#
# def coords_to_gtp_position(coords):
# """Convert (row, col) tuple to GTP board locations.
#
# Example:
# >>> coords_to_gtp_position((0, 0))
# 'A1'
# """
# row, col = coords
# # coords are zero-indexed, GTP is 1-indexed.
# return COLS[col] + str(row + 1)
. Output only the next line. | processor = SevenPlaneProcessor() |
Continue the code snippet: <|code_start|># This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
from __future__ import absolute_import
class Sampler(object):
'''
Sample training and test data from zipped sgf files such that test data is kept stable.
'''
def __init__(self, data_dir='data', num_test_games=100, cap_year=2015, seed=1337):
self.data_dir = data_dir
self.num_test_games = num_test_games
self.test_games = []
self.train_games = []
self.test_folder = 'test_samples.py'
self.cap_year = cap_year
random.seed(seed)
self.compute_test_samples()
def draw_samples(self, num_sample_games):
'''
Draw num_sample_games many training games from index.
'''
available_games = []
<|code_end|>
. Use current file imports:
import os
import random
from .index_processor import KGSIndex
from six.moves import range
and context (classes, functions, or code) from other files:
# Path: betago/dataloader/index_processor.py
# class KGSIndex(object):
#
# def __init__(self,
# kgs_url='http://u-go.net/gamerecords/',
# index_page='kgs_index.html',
# data_directory='data'):
# '''
# Create an index of zip files containing SGF data of actual Go Games on KGS.
#
# Parameters:
# -----------
# kgs_url: URL with links to zip files of games
# index_page: Name of local html file of kgs_url
# data_directory: name of directory relative to current path to store SGF data
# '''
# self.kgs_url = kgs_url
# self.index_page = index_page
# self.data_directory = data_directory
# self.file_info = []
# self.urls = []
# self.load_index() # Load index on creation
#
# def download_files(self):
# '''
# Download zip files by distributing work on all available CPUs
# '''
# if not os.path.isdir(self.data_directory):
# os.makedirs(self.data_directory)
#
# urls_to_download = []
# for file_info in self.file_info:
# url = file_info['url']
# file_name = file_info['filename']
# if not os.path.isfile(self.data_directory + '/' + file_name):
# urls_to_download.append((url, self.data_directory + '/' + file_name))
# cores = multiprocessing.cpu_count()
# pool = multiprocessing.Pool(processes=cores)
# try:
# it = pool.imap(worker, urls_to_download)
# for i in it:
# pass
# pool.close()
# pool.join()
# except KeyboardInterrupt:
# print(">>> Caught KeyboardInterrupt, terminating workers")
# pool.terminate()
# pool.join()
# sys.exit(-1)
#
# def create_index_page(self):
# '''
# If there is no local html containing links to files, create one.
# '''
# if os.path.isfile(self.index_page):
# print('>>> Reading cached index page')
# index_file = open(self.index_page, 'r')
# index_contents = index_file.read()
# index_file.close()
# else:
# print('>>> Downloading index page')
# fp = urlopen(self.kgs_url)
# data = six.text_type(fp.read())
# fp.close()
# index_contents = data
# index_file = open(self.index_page, 'w')
# index_file.write(index_contents)
# index_file.close()
# return index_contents
#
# def load_index(self):
# '''
# Create the actual index representation from the previously downloaded or cached html.
# '''
# index_contents = self.create_index_page()
# split_page = [item for item in index_contents.split('<a href="') if item.startswith("https://")]
# for item in split_page:
# download_url = item.split('">Download')[0]
# if download_url.endswith('.tar.gz'):
# self.urls.append(download_url)
# for url in self.urls:
# filename = os.path.basename(url)
# split_file_name = filename.split('-')
# num_games = int(split_file_name[len(split_file_name) - 2])
# print(filename + ' ' + str(num_games))
# self.file_info.append({'url': url, 'filename': filename, 'num_games': num_games})
. Output only the next line. | index = KGSIndex(data_directory=self.data_dir) |
Based on the snippet: <|code_start|>
class ResponseTestCase(unittest.TestCase):
def setUp(self):
self.cmd = command.Command(None, 'genmove', ('black',))
self.cmd_with_sequence = command.Command(99, 'genmove', ('black',))
def test_serialize(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
from betago.gtp import command, response
and context (classes, functions, sometimes code) from other files:
# Path: betago/gtp/command.py
# class Command(object):
# def __init__(self, sequence, name, args):
# def __eq__(self, other):
# def __repr__(self):
# def __str__(self):
# def parse(command_string):
#
# Path: betago/gtp/response.py
# class Response(object):
# def __init__(self, status, body):
# def success(body=''):
# def error(body=''):
# def serialize(gtp_command, gtp_response):
. Output only the next line. | resp = response.success('D4') |
Predict the next line for this snippet: <|code_start|>"""Tests for sgf_properties.py."""
class SgfPropertiesTestCase(unittest.TestCase):
def test_interpret_simpletext(self):
def interpret(s, encoding):
<|code_end|>
with the help of current file imports:
import unittest
from textwrap import dedent
from betago.gosgf import sgf_properties
and context from other files:
# Path: betago/gosgf/sgf_properties.py
# def identity(x):
# def normalise_charset_name(s):
# def interpret_go_point(s, size):
# def serialise_go_point(move, size):
# def __init__(self, size, encoding):
# def interpret_none(s, context=None):
# def serialise_none(b, context=None):
# def interpret_number(s, context=None):
# def serialise_number(i, context=None):
# def interpret_real(s, context=None):
# def serialise_real(f, context=None):
# def interpret_double(s, context=None):
# def serialise_double(i, context=None):
# def interpret_colour(s, context=None):
# def serialise_colour(colour, context=None):
# def _transcode(s, encoding):
# def interpret_simpletext(s, context):
# def serialise_simpletext(s, context):
# def interpret_text(s, context):
# def serialise_text(s, context):
# def interpret_point(s, context):
# def serialise_point(point, context):
# def interpret_move(s, context):
# def serialise_move(move, context):
# def interpret_point_list(values, context):
# def serialise_point_list(points, context):
# def interpret_AP(s, context):
# def serialise_AP(value, context):
# def interpret_ARLN_list(values, context):
# def serialise_ARLN_list(values, context):
# def interpret_FG(s, context):
# def serialise_FG(value, context):
# def interpret_LB_list(values, context):
# def serialise_LB_list(values, context):
# def __init__(self, interpreter, serialiser, uses_list,
# allows_empty_list=False):
# def _make_property_type(type_name, allows_empty_list=False):
# def __init__(self, size, encoding):
# def get_property_type(self, identifier):
# def register_property(self, identifier, property_type):
# def deregister_property(self, identifier):
# def set_private_property_type(self, property_type):
# def _get_effective_property_type(self, identifier):
# def interpret_as_type(self, property_type, raw_values):
# def interpret(self, identifier, raw_values):
# def serialise_as_type(self, property_type, value):
# def serialise(self, identifier, value):
# class _Context(object):
# class Property_type(object):
# class Presenter(_Context):
# P = _property_types_by_name
, which may contain function names, class names, or code. Output only the next line. | context = sgf_properties._Context(19, encoding) |
Using the snippet: <|code_start|>from __future__ import print_function
batch_size = 128
nb_epoch = 20
nb_classes = 19 * 19 # One class for each position on the board
go_board_rows, go_board_cols = 19, 19 # input dimensions of go board
nb_filters = 32 # number of convolutional filters to use
nb_pool = 2 # size of pooling area for max pooling
nb_conv = 3 # convolution kernel size
# SevenPlaneProcessor loads seven planes (doh!) of 19*19 data points, so we need 7 input channels
<|code_end|>
, determine the next line of code. You have imports:
import os
import webbrowser
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from betago.model import KerasBot
from betago.processor import SevenPlaneProcessor
and context (class names, function names, or code) available:
# Path: betago/model.py
# class KerasBot(GoModel):
# '''
# KerasBot takes top_n predictions of a keras model and tries to apply the best move. If that move is illegal,
# choose the next best, until the list is exhausted. If no more moves are left to play, continue with random
# moves until a legal move is found.
# '''
#
# def __init__(self, model, processor, top_n=10):
# super(KerasBot, self).__init__(model=model, processor=processor)
# self.top_n = top_n
#
# def apply_move(self, color, move):
# # Apply human move
# self.go_board.apply_move(color, move)
#
# def select_move(self, bot_color):
# move = get_first_valid_move(self.go_board, bot_color,
# self._move_generator(bot_color))
# if move is not None:
# self.go_board.apply_move(bot_color, move)
# return move
#
# def _move_generator(self, bot_color):
# return chain(
# # First try the model.
# self._model_moves(bot_color),
# # If none of the model moves are valid, fill in a random
# # dame point. This is probably not a very good move, but
# # it's better than randomly filling in our own eyes.
# fill_dame(self.go_board),
# # Lastly just try any open space.
# generate_in_random_order(all_empty_points(self.go_board)),
# )
#
# def _model_moves(self, bot_color):
# # Turn the board into a feature vector.
# # The (0, 0) is for generating the label, which we ignore.
# X, label = self.processor.feature_and_label(
# bot_color, (0, 0), self.go_board, self.num_planes)
# X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
#
# # Generate bot move.
# pred = np.squeeze(self.model.predict(X))
# top_n_pred_idx = pred.argsort()[-self.top_n:][::-1]
# for idx in top_n_pred_idx:
# prediction = int(idx)
# pred_row = prediction // 19
# pred_col = prediction % 19
# pred_move = (pred_row, pred_col)
# yield pred_move
#
# Path: betago/processor.py
# class SevenPlaneProcessor(GoDataProcessor):
# '''
# Implementation of a Go data processor, using seven planes of 19x19 values to represent the position of
# a go board, as explained below.
#
# This closely reflects the representation suggested in Clark, Storkey:
# http://arxiv.org/abs/1412.3409
# '''
#
# def __init__(self, data_directory='data', num_planes=7, consolidate=True, use_generator=False):
# super(SevenPlaneProcessor, self).__init__(data_directory=data_directory,
# num_planes=num_planes,
# consolidate=consolidate,
# use_generator=use_generator)
#
# def feature_and_label(self, color, move, go_board, num_planes):
# '''
# Parameters
# ----------
# color: color of the next person to move
# move: move they decided to make
# go_board: represents the state of the board before they moved
#
# Planes we write:
# 0: our stones with 1 liberty
# 1: our stones with 2 liberty
# 2: our stones with 3 or more liberties
# 3: their stones with 1 liberty
# 4: their stones with 2 liberty
# 5: their stones with 3 or more liberties
# 6: simple ko
# '''
# row, col = move
# enemy_color = go_board.other_color(color)
# label = row * 19 + col
# move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
# for row in range(0, go_board.board_size):
# for col in range(0, go_board.board_size):
# pos = (row, col)
# if go_board.board.get(pos) == color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[0, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[1, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[2, row, col] = 1
# if go_board.board.get(pos) == enemy_color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[3, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[4, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[5, row, col] = 1
# if go_board.is_simple_ko(color, pos):
# move_array[6, row, col] = 1
# return move_array, label
. Output only the next line. | processor = SevenPlaneProcessor() |
Predict the next line after this snippet: <|code_start|>
class GTPCoordinateTest(unittest.TestCase):
def test_coords_to_gtp_position(self):
self.assertEqual('A1', coords_to_gtp_position((0, 0)))
self.assertEqual('J3', coords_to_gtp_position((2, 8)))
self.assertEqual('B15', coords_to_gtp_position((14, 1)))
def test_gtp_position_to_coords(self):
<|code_end|>
using the current file's imports:
import unittest
from betago.gtp.board import coords_to_gtp_position, gtp_position_to_coords
and any relevant context from other files:
# Path: betago/gtp/board.py
# def coords_to_gtp_position(coords):
# """Convert (row, col) tuple to GTP board locations.
#
# Example:
# >>> coords_to_gtp_position((0, 0))
# 'A1'
# """
# row, col = coords
# # coords are zero-indexed, GTP is 1-indexed.
# return COLS[col] + str(row + 1)
#
# def gtp_position_to_coords(gtp_position):
# """Convert a GTP board location to a (row, col) tuple.
#
# Example:
# >>> gtp_position_to_coords('A1')
# (0, 0)
# """
# col_str, row_str = gtp_position[0], gtp_position[1:]
# return (int(row_str) - 1, COLS.find(col_str))
. Output only the next line. | self.assertEqual((0, 0), gtp_position_to_coords('A1')) |
Next line prediction: <|code_start|>from __future__ import print_function
argparser = argparse.ArgumentParser()
argparser.add_argument('handicap', type=int, nargs=1)
argparser.add_argument('output_sgf', nargs='?', default='output.sgf')
args = argparser.parse_args()
<|code_end|>
. Use current file imports:
(import yaml
import subprocess
import re
import argparse
from keras.models import model_from_yaml
from betago.model import KerasBot
from betago.processor import SevenPlaneProcessor
from betago.gtp.board import gtp_position_to_coords, coords_to_gtp_position)
and context including class names, function names, or small code snippets from other files:
# Path: betago/model.py
# class KerasBot(GoModel):
# '''
# KerasBot takes top_n predictions of a keras model and tries to apply the best move. If that move is illegal,
# choose the next best, until the list is exhausted. If no more moves are left to play, continue with random
# moves until a legal move is found.
# '''
#
# def __init__(self, model, processor, top_n=10):
# super(KerasBot, self).__init__(model=model, processor=processor)
# self.top_n = top_n
#
# def apply_move(self, color, move):
# # Apply human move
# self.go_board.apply_move(color, move)
#
# def select_move(self, bot_color):
# move = get_first_valid_move(self.go_board, bot_color,
# self._move_generator(bot_color))
# if move is not None:
# self.go_board.apply_move(bot_color, move)
# return move
#
# def _move_generator(self, bot_color):
# return chain(
# # First try the model.
# self._model_moves(bot_color),
# # If none of the model moves are valid, fill in a random
# # dame point. This is probably not a very good move, but
# # it's better than randomly filling in our own eyes.
# fill_dame(self.go_board),
# # Lastly just try any open space.
# generate_in_random_order(all_empty_points(self.go_board)),
# )
#
# def _model_moves(self, bot_color):
# # Turn the board into a feature vector.
# # The (0, 0) is for generating the label, which we ignore.
# X, label = self.processor.feature_and_label(
# bot_color, (0, 0), self.go_board, self.num_planes)
# X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
#
# # Generate bot move.
# pred = np.squeeze(self.model.predict(X))
# top_n_pred_idx = pred.argsort()[-self.top_n:][::-1]
# for idx in top_n_pred_idx:
# prediction = int(idx)
# pred_row = prediction // 19
# pred_col = prediction % 19
# pred_move = (pred_row, pred_col)
# yield pred_move
#
# Path: betago/processor.py
# class SevenPlaneProcessor(GoDataProcessor):
# '''
# Implementation of a Go data processor, using seven planes of 19x19 values to represent the position of
# a go board, as explained below.
#
# This closely reflects the representation suggested in Clark, Storkey:
# http://arxiv.org/abs/1412.3409
# '''
#
# def __init__(self, data_directory='data', num_planes=7, consolidate=True, use_generator=False):
# super(SevenPlaneProcessor, self).__init__(data_directory=data_directory,
# num_planes=num_planes,
# consolidate=consolidate,
# use_generator=use_generator)
#
# def feature_and_label(self, color, move, go_board, num_planes):
# '''
# Parameters
# ----------
# color: color of the next person to move
# move: move they decided to make
# go_board: represents the state of the board before they moved
#
# Planes we write:
# 0: our stones with 1 liberty
# 1: our stones with 2 liberty
# 2: our stones with 3 or more liberties
# 3: their stones with 1 liberty
# 4: their stones with 2 liberty
# 5: their stones with 3 or more liberties
# 6: simple ko
# '''
# row, col = move
# enemy_color = go_board.other_color(color)
# label = row * 19 + col
# move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
# for row in range(0, go_board.board_size):
# for col in range(0, go_board.board_size):
# pos = (row, col)
# if go_board.board.get(pos) == color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[0, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[1, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[2, row, col] = 1
# if go_board.board.get(pos) == enemy_color:
# if go_board.go_strings[pos].liberties.size() == 1:
# move_array[3, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() == 2:
# move_array[4, row, col] = 1
# elif go_board.go_strings[pos].liberties.size() >= 3:
# move_array[5, row, col] = 1
# if go_board.is_simple_ko(color, pos):
# move_array[6, row, col] = 1
# return move_array, label
#
# Path: betago/gtp/board.py
# def gtp_position_to_coords(gtp_position):
# """Convert a GTP board location to a (row, col) tuple.
#
# Example:
# >>> gtp_position_to_coords('A1')
# (0, 0)
# """
# col_str, row_str = gtp_position[0], gtp_position[1:]
# return (int(row_str) - 1, COLS.find(col_str))
#
# def coords_to_gtp_position(coords):
# """Convert (row, col) tuple to GTP board locations.
#
# Example:
# >>> coords_to_gtp_position((0, 0))
# 'A1'
# """
# row, col = coords
# # coords are zero-indexed, GTP is 1-indexed.
# return COLS[col] + str(row + 1)
. Output only the next line. | processor = SevenPlaneProcessor() |
Given the code snippet: <|code_start|>
class ScoringTestCase(unittest.TestCase):
def test_identify_territory(self):
board = goboard.from_string('''
...b..w..
...b..w..
bbbb..w..
wwwww.www
wwbbw..w.
wb.bbwww.
wbbbb....
..b.b....
..bbb....
''')
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from betago import scoring
from betago.dataloader import goboard
and context (functions, classes, or occasionally code) from other files:
# Path: betago/scoring.py
# class Territory(object):
# def __init__(self, territory_map):
# def evaluate_territory(board):
# def _collect_region(start_pos, board, visited=None):
#
# Path: betago/dataloader/goboard.py
# class GoBoard(object):
# class BoardSequence(object):
# class GoString(object):
# def __init__(self, board_size=19):
# def fold_go_strings(self, target, source, join_position):
# def add_adjacent_liberty(self, pos, go_string):
# def is_move_on_board(self, move):
# def is_move_suicide(self, color, pos):
# def is_move_legal(self, color, pos):
# def create_go_string(self, color, pos):
# def other_color(self, color):
# def is_simple_ko(self, play_color, pos):
# def check_enemy_liberty(self, play_color, enemy_pos, our_pos):
# def apply_move(self, play_color, pos):
# def add_liberty_to_adjacent_string(self, string_pos, liberty_pos, color):
# def fold_our_moves(self, first_string, color, pos, join_position):
# def __str__(self):
# def __init__(self, board_size=19):
# def insert(self, combo):
# def erase(self, combo):
# def exists(self, combo):
# def size(self):
# def __getitem__(self, iid):
# def __str__(self):
# def __init__(self, board_size, color):
# def get_stone(self, index):
# def get_liberty(self, index):
# def insert_stone(self, combo):
# def get_num_stones(self):
# def remove_liberty(self, combo):
# def get_num_liberties(self):
# def insert_liberty(self, combo):
# def copy_liberties_from(self, source):
# def __str__(self):
# def from_string(board_string):
# def to_string(board):
. Output only the next line. | territory = scoring.evaluate_territory(board) |
Continue the code snippet: <|code_start|>
"""
Selects the best `elite_size` results according to `fitness()` from
the current experiment. Uniformly samples one of them as a parent, and
perturbs it using the experiment's samplers.
Note that in order to change parent, you should call experiment sample_parent()
Parameters:
* experiment - (Experiment) experiment to wrap.
* elite_size - (int) number of results to consider as parents.
* fitness - (function) function to determine fitness of result.
Return type: n/a
Example:
experiment = Experiment('name', params={'lr': Gaussian()})
evo = Evolutionary(experiment)
evo.sample_parent()
evo.sample_all_params()
evo.add_result(loss(evo.params))
"""
def __init__(self, experiment, elite_size=10, fitness=None):
self.__dict__.update(experiment.__dict__)
self.experiment = experiment
self.elite_size = 10
if fitness is None:
<|code_end|>
. Use current file imports:
from random import sample
from .experiment import Experiment, leq
and context (classes, functions, or code) from other files:
# Path: randopt/experiment/experiment.py
# ATTACHMENT_DIR = '_attachments'
# ATTACHMENT_EXT = '.pk'
# class SummaryList(list):
# class JSONSummary(dict):
# class Experiment(object):
# def __init__(self, results):
# def __getitem__(self, key):
# def __getslice__(self, i, j):
# def __str__(self):
# def count(self):
# def filter(self, fn):
# def values(self, key='result'):
# def map(self, fn, key='result'):
# def min(self, key='result'):
# def max(self, key='result'):
# def mean(self, key='result'):
# def variance(self, key='result'):
# def std(self, key='result'):
# def median(self, key='result'):
# def __init__(self, path):
# def __getattr__(self, attr):
# def __str__(self):
# def _load_attachment(self):
# def __getstate__(self):
# def __setstate__(self, state):
# def __init__(self, name, params={}, directory='randopt_results'):
# def current(self):
# def _search(self, fn=leq):
# def top(self, count, fn=leq):
# def maximum(self):
# def minimum(self):
# def all(self):
# def list(self):
# def count(self):
# def seed(self, seed):
# def set(self, key, value):
# def sample(self, key):
# def sample_all_params(self):
# def add_result(self, result, data=None, attachment=None):
# def all_results(self):
# def save_state(self, path):
# def set_state(self, path):
. Output only the next line. | fitness = leq |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python3
try:
except ImportError:
try:
except ImportError:
<|code_end|>
with the help of current file imports:
import os
import random
import ujson as json
import json
import cPickle as pk
import pickle as pk
from time import time
from math import log, ceil
from collections import namedtuple
from randopt.samplers import Uniform
from .experiment import Experiment, leq, geq
and context from other files:
# Path: randopt/samplers.py
# class Uniform(Sampler):
# '''
# Generates a randomly sampled value from low to high with equal probability.
#
# Parameters:
#
# * low - (float) minimum value.
# * high - (float) maximum value.
# * dtype - (string) data type. Default: float
#
# Return type: n/a
#
# Example:
#
# randopt.Uniform(low=-1.0, high=1.0, dtype='float')
# '''
# def __init__(self, low=0.0, high=1.0, dtype='float'):
# super(Uniform, self).__init__()
# self.low = low
# self.high = high
# self.dtype = dtype
#
# def sample(self):
# res = self.rng.uniform(self.low, self.high)
# if 'fl' in self.dtype:
# return res
# return int(res)
#
# Path: randopt/experiment/experiment.py
# ATTACHMENT_DIR = '_attachments'
# ATTACHMENT_EXT = '.pk'
# class SummaryList(list):
# class JSONSummary(dict):
# class Experiment(object):
# def __init__(self, results):
# def __getitem__(self, key):
# def __getslice__(self, i, j):
# def __str__(self):
# def count(self):
# def filter(self, fn):
# def values(self, key='result'):
# def map(self, fn, key='result'):
# def min(self, key='result'):
# def max(self, key='result'):
# def mean(self, key='result'):
# def variance(self, key='result'):
# def std(self, key='result'):
# def median(self, key='result'):
# def __init__(self, path):
# def __getattr__(self, attr):
# def __str__(self):
# def _load_attachment(self):
# def __getstate__(self):
# def __setstate__(self, state):
# def __init__(self, name, params={}, directory='randopt_results'):
# def current(self):
# def _search(self, fn=leq):
# def top(self, count, fn=leq):
# def maximum(self):
# def minimum(self):
# def all(self):
# def list(self):
# def count(self):
# def seed(self, seed):
# def set(self, key, value):
# def sample(self, key):
# def sample_all_params(self):
# def add_result(self, result, data=None, attachment=None):
# def all_results(self):
# def save_state(self, path):
# def set_state(self, path):
, which may contain function names, class names, or code. Output only the next line. | class HyperBand(Experiment): |
Using the snippet: <|code_start|>#!/usr/bin/env python3
try:
except ImportError:
try:
except ImportError:
class HyperBand(Experiment):
"""
HyperBand implementation, based on
http://people.eecs.berkeley.edu/~kjamieson/hyperband.html
"""
def __init__(self, name, params, num_iter, eta=None, comparator=None):
super(HyperBand, self).__init__(name, params)
if eta is None:
eta = 2.718281828
self.eta = eta
if comparator is None:
<|code_end|>
, determine the next line of code. You have imports:
import os
import random
import ujson as json
import json
import cPickle as pk
import pickle as pk
from time import time
from math import log, ceil
from collections import namedtuple
from randopt.samplers import Uniform
from .experiment import Experiment, leq, geq
and context (class names, function names, or code) available:
# Path: randopt/samplers.py
# class Uniform(Sampler):
# '''
# Generates a randomly sampled value from low to high with equal probability.
#
# Parameters:
#
# * low - (float) minimum value.
# * high - (float) maximum value.
# * dtype - (string) data type. Default: float
#
# Return type: n/a
#
# Example:
#
# randopt.Uniform(low=-1.0, high=1.0, dtype='float')
# '''
# def __init__(self, low=0.0, high=1.0, dtype='float'):
# super(Uniform, self).__init__()
# self.low = low
# self.high = high
# self.dtype = dtype
#
# def sample(self):
# res = self.rng.uniform(self.low, self.high)
# if 'fl' in self.dtype:
# return res
# return int(res)
#
# Path: randopt/experiment/experiment.py
# ATTACHMENT_DIR = '_attachments'
# ATTACHMENT_EXT = '.pk'
# class SummaryList(list):
# class JSONSummary(dict):
# class Experiment(object):
# def __init__(self, results):
# def __getitem__(self, key):
# def __getslice__(self, i, j):
# def __str__(self):
# def count(self):
# def filter(self, fn):
# def values(self, key='result'):
# def map(self, fn, key='result'):
# def min(self, key='result'):
# def max(self, key='result'):
# def mean(self, key='result'):
# def variance(self, key='result'):
# def std(self, key='result'):
# def median(self, key='result'):
# def __init__(self, path):
# def __getattr__(self, attr):
# def __str__(self):
# def _load_attachment(self):
# def __getstate__(self):
# def __setstate__(self, state):
# def __init__(self, name, params={}, directory='randopt_results'):
# def current(self):
# def _search(self, fn=leq):
# def top(self, count, fn=leq):
# def maximum(self):
# def minimum(self):
# def all(self):
# def list(self):
# def count(self):
# def seed(self, seed):
# def set(self, key, value):
# def sample(self, key):
# def sample_all_params(self):
# def add_result(self, result, data=None, attachment=None):
# def all_results(self):
# def save_state(self, path):
# def set_state(self, path):
. Output only the next line. | comparator = leq |
Given the code snippet: <|code_start|> self.s0 = val[:, 1]
self._initial_state = val
def get_solution(
self, t: ndarray, stacked: bool = True, with_keys: bool = False
) -> Union[Dict, ndarray]:
"""Calculate solution of dynamics.
Arguments
---------
t
Time steps at which to evaluate solution.
stacked
Whether to stack states or return them individually. Defaults to `True`.
with_keys
Whether to return solution labelled by variables in form of a dictionary.
Defaults to `False`.
Returns
-------
Union[Dict, ndarray]
Solution of system. If `with_keys=True`, the solution is returned in form of
a dictionary with variables as keys. Otherwise, the solution is given as
a `numpy.ndarray` of form `(n_steps, 2)`.
"""
expu = np.exp(-self.beta * t)
exps = np.exp(-self.gamma * t)
unspliced = self.u0 * expu + self.alpha / self.beta * (1 - expu)
<|code_end|>
, generate the next line using the imports in this file:
from typing import Dict, List, Tuple, Union
from numpy import ndarray
from ._arithmetic import invert
from ._base import DynamicsBase
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: scvelo/core/_arithmetic.py
# def invert(x: ndarray) -> ndarray:
# """Invert array and set infinity to NaN.
#
# Arguments
# ---------
# x
# Array to invert.
#
# Returns
# -------
# ndarray
# Inverted array.
# """
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# x_inv = 1 / x * (x != 0)
# return x_inv
#
# Path: scvelo/core/_base.py
# class DynamicsBase(ABC):
# @abstractmethod
# def get_solution(
# self, t: ndarray, stacked: True, with_keys: bool = False
# ) -> Union[Dict, Tuple[ndarray], ndarray]:
# """Calculate solution of dynamics.
#
# Arguments
# ---------
# t
# Time steps at which to evaluate solution.
# stacked
# Whether to stack states or return them individually. Defaults to `True`.
# with_keys
# Whether to return solution labelled by variables in form of a dictionary.
# Defaults to `False`.
#
# Returns
# -------
# Union[Dict, Tuple[ndarray], ndarray]
# Solution of system. If `with_keys=True`, the solution is returned in form of
# a dictionary with variables as keys. Otherwise, the solution is given as
# a `numpy.ndarray` of form `(n_steps, n_vars)`.
# """
#
# return
#
# @abstractmethod
# def get_steady_states(
# self, stacked: True, with_keys: False
# ) -> Union[Dict[str, ndarray], Tuple[ndarray], ndarray]:
# """Return steady state of system.
#
# Arguments
# ---------
# stacked
# Whether to stack states or return them individually. Defaults to `True`.
# with_keys
# Whether to return solution labelled by variables in form of a dictionary.
# Defaults to `False`.
#
# Returns
# -------
# Union[Dict[str, ndarray], Tuple[ndarray], ndarray]
# Steady state of system.
# """
#
# return
. Output only the next line. | c = (self.alpha - self.u0 * self.beta) * invert(self.gamma - self.beta) |
Given the following code snippet before the placeholder: <|code_start|> data.obs.keys(),
data.var.keys(),
data.obsm.keys(),
data.varm.keys(),
data.uns.keys(),
data.layers.keys(),
]
if hasattr(data, "obsp") and hasattr(data, "varp"):
s_keys.extend(["obsp", "varp"])
d_keys.extend([data.obsp.keys(), data.varp.keys()])
if keys is None:
df = data.to_df()
elif key in data.var_names:
df = obs_df(data, keys, layer=layer)
elif key in data.obs_names:
df = var_df(data, keys, layer=layer)
else:
if keys_split is not None:
keys = [
k
for k in list(data.obs.keys()) + list(data.var.keys())
if key in k and keys_split in k
]
key = keys[0]
s_key = [s for (s, d_key) in zip(s_keys, d_keys) if key in d_key]
if len(s_key) == 0:
raise ValueError(f"'{key}' not found in any of {', '.join(s_keys)}.")
if len(s_key) > 1:
<|code_end|>
, predict the next line using imports from the current file:
import re
import numpy as np
import pandas as pd
from typing import List, Optional, Union
from typing_extensions import Literal
from numpy import ndarray
from pandas import DataFrame
from pandas.api.types import is_categorical_dtype
from scipy.sparse import csr_matrix, issparse, spmatrix
from anndata import AnnData
from scvelo import logging as logg
from ._arithmetic import sum
and context including class names, function names, and sometimes code from other files:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
#
# Path: scvelo/core/_arithmetic.py
# def sum(a: Union[ndarray, spmatrix], axis: Optional[int] = None) -> ndarray:
# """Sum array elements over a given axis.
#
# Arguments
# ---------
# a
# Elements to sum.
# axis
# Axis along which to sum elements. If `None`, all elements will be summed.
# Defaults to `None`.
#
# Returns
# -------
# ndarray
# Sum of array along given axis.
# """
#
# if a.ndim == 1:
# axis = 0
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# return a.sum(axis=axis).A1 if issparse(a) else a.sum(axis=axis)
. Output only the next line. | logg.warn(f"'{key}' found multiple times in {', '.join(s_key)}.") |
Using the snippet: <|code_start|> s_key = [s for (s, d_key) in zip(s_keys, d_keys) if key in d_key]
if len(s_key) == 0:
raise ValueError(f"'{key}' not found in any of {', '.join(s_keys)}.")
if len(s_key) > 1:
logg.warn(f"'{key}' found multiple times in {', '.join(s_key)}.")
s_key = s_key[-1]
df = getattr(data, s_key)[keys if len(keys) > 1 else key]
if key_add is not None:
df = df[key_add]
if index is None:
index = (
data.var_names
if s_key == "varm"
else data.obs_names
if s_key in {"obsm", "layers"}
else None
)
if index is None and s_key == "uns" and hasattr(df, "shape"):
key_cats = np.array(
[
key
for key in data.obs.keys()
if is_categorical_dtype(data.obs[key])
]
)
num_cats = [
len(data.obs[key].cat.categories) == df.shape[0]
for key in key_cats
]
<|code_end|>
, determine the next line of code. You have imports:
import re
import numpy as np
import pandas as pd
from typing import List, Optional, Union
from typing_extensions import Literal
from numpy import ndarray
from pandas import DataFrame
from pandas.api.types import is_categorical_dtype
from scipy.sparse import csr_matrix, issparse, spmatrix
from anndata import AnnData
from scvelo import logging as logg
from ._arithmetic import sum
and context (class names, function names, or code) available:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
#
# Path: scvelo/core/_arithmetic.py
# def sum(a: Union[ndarray, spmatrix], axis: Optional[int] = None) -> ndarray:
# """Sum array elements over a given axis.
#
# Arguments
# ---------
# a
# Elements to sum.
# axis
# Axis along which to sum elements. If `None`, all elements will be summed.
# Defaults to `None`.
#
# Returns
# -------
# ndarray
# Sum of array along given axis.
# """
#
# if a.ndim == 1:
# axis = 0
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# return a.sum(axis=axis).A1 if issparse(a) else a.sum(axis=axis)
. Output only the next line. | if np.sum(num_cats) == 1: |
Based on the snippet: <|code_start|> """\
Score cell cycle genes.
Calculates scores and assigns a cell cycle phase (G1, S, G2M) using the list of cell
cycle genes defined in Tirosh et al, 2015 (https://doi.org/10.1126/science.aad0501).
Parameters
----------
adata
The annotated data matrix.
s_genes
List of genes associated with S phase.
g2m_genes
List of genes associated with G2M phase.
copy
Copy `adata` or modify it inplace.
**kwargs
Are passed to :func:`~scanpy.tl.score_genes`. `ctrl_size` is not
possible, as it's set as `min(len(s_genes), len(g2m_genes))`.
Returns
-------
S_score: `adata.obs`, dtype `object`
The score for S phase for each cell.
G2M_score: `adata.obs`, dtype `object`
The score for G2M phase for each cell.
phase: `adata.obs`, dtype `object`
The cell cycle phase (`S`, `G2M` or `G1`) for each cell.
"""
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
import pandas as pd
from scvelo import logging as logg
from scanpy.tools._score_genes import score_genes
and context (classes, functions, sometimes code) from other files:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
. Output only the next line. | logg.info("calculating cell cycle phase") |
Predict the next line for this snippet: <|code_start|>
neighs = NearestNeighbors(n_neighbors=100)
neighs.fit(adata.obsm[f"X_{basis_constraint}"])
basis_graph = neighs.kneighbors_graph(mode="connectivity") > 0
graph = graph.multiply(basis_graph)
if self_transitions:
confidence = graph.max(1).A.flatten()
ub = np.percentile(confidence, 98)
self_prob = np.clip(ub - confidence, 0, 1)
graph.setdiag(self_prob)
T = np.expm1(graph * scale) # equivalent to np.exp(graph.A * scale) - 1
if graph_neg is not None:
graph_neg = adata.uns[f"{vkey}_graph_neg"]
if use_negative_cosines:
T -= np.expm1(-graph_neg * scale)
else:
T += np.expm1(graph_neg * scale)
T.data += 1
# weight direct and indirect (recursed) neighbors
if weight_indirect_neighbors is not None and weight_indirect_neighbors < 1:
direct_neighbors = get_neighs(adata, "distances") > 0
direct_neighbors.setdiag(1)
w = weight_indirect_neighbors
T = w * T + (1 - w) * direct_neighbors.multiply(T)
if n_neighbors is not None:
T = T.multiply(
<|code_end|>
with the help of current file imports:
import warnings
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, SparseEfficiencyWarning
from scipy.spatial.distance import pdist, squareform
from scvelo.preprocessing.neighbors import get_connectivities, get_neighs
from .utils import normalize
from sklearn.neighbors import NearestNeighbors
and context from other files:
# Path: scvelo/preprocessing/neighbors.py
# def get_connectivities(
# adata, mode="connectivities", n_neighbors=None, recurse_neighbors=False
# ):
# if "neighbors" in adata.uns.keys():
# C = get_neighs(adata, mode)
# if n_neighbors is not None and n_neighbors < get_n_neighs(adata):
# if mode == "connectivities":
# C = select_connectivities(C, n_neighbors)
# else:
# C = select_distances(C, n_neighbors)
# connectivities = C > 0
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# connectivities.setdiag(1)
# if recurse_neighbors:
# connectivities += connectivities.dot(connectivities * 0.5)
# connectivities.data = np.clip(connectivities.data, 0, 1)
# connectivities = connectivities.multiply(1.0 / connectivities.sum(1))
# return connectivities.tocsr().astype(np.float32)
# else:
# return None
#
# def get_neighs(adata, mode="distances"):
# if hasattr(adata, "obsp") and mode in adata.obsp.keys():
# return adata.obsp[mode]
# elif "neighbors" in adata.uns.keys() and mode in adata.uns["neighbors"]:
# return adata.uns["neighbors"][mode]
# else:
# raise ValueError("The selected mode is not valid.")
#
# Path: scvelo/tools/utils.py
# def normalize(X):
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# if issparse(X):
# return X.multiply(csr_matrix(1.0 / np.abs(X).sum(1)))
# else:
# return X / X.sum(1)
, which may contain function names, class names, or code. Output only the next line. | get_connectivities( |
Predict the next line for this snippet: <|code_start|> graph_neg = adata.obsp[f"{vkey}_graph_neg"]
else:
graph = csr_matrix(adata.uns[f"{vkey}_graph"]).copy()
if f"{vkey}_graph_neg" in adata.uns.keys():
graph_neg = adata.uns[f"{vkey}_graph_neg"]
if basis_constraint is not None and f"X_{basis_constraint}" in adata.obsm.keys():
neighs = NearestNeighbors(n_neighbors=100)
neighs.fit(adata.obsm[f"X_{basis_constraint}"])
basis_graph = neighs.kneighbors_graph(mode="connectivity") > 0
graph = graph.multiply(basis_graph)
if self_transitions:
confidence = graph.max(1).A.flatten()
ub = np.percentile(confidence, 98)
self_prob = np.clip(ub - confidence, 0, 1)
graph.setdiag(self_prob)
T = np.expm1(graph * scale) # equivalent to np.exp(graph.A * scale) - 1
if graph_neg is not None:
graph_neg = adata.uns[f"{vkey}_graph_neg"]
if use_negative_cosines:
T -= np.expm1(-graph_neg * scale)
else:
T += np.expm1(graph_neg * scale)
T.data += 1
# weight direct and indirect (recursed) neighbors
if weight_indirect_neighbors is not None and weight_indirect_neighbors < 1:
<|code_end|>
with the help of current file imports:
import warnings
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, SparseEfficiencyWarning
from scipy.spatial.distance import pdist, squareform
from scvelo.preprocessing.neighbors import get_connectivities, get_neighs
from .utils import normalize
from sklearn.neighbors import NearestNeighbors
and context from other files:
# Path: scvelo/preprocessing/neighbors.py
# def get_connectivities(
# adata, mode="connectivities", n_neighbors=None, recurse_neighbors=False
# ):
# if "neighbors" in adata.uns.keys():
# C = get_neighs(adata, mode)
# if n_neighbors is not None and n_neighbors < get_n_neighs(adata):
# if mode == "connectivities":
# C = select_connectivities(C, n_neighbors)
# else:
# C = select_distances(C, n_neighbors)
# connectivities = C > 0
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# connectivities.setdiag(1)
# if recurse_neighbors:
# connectivities += connectivities.dot(connectivities * 0.5)
# connectivities.data = np.clip(connectivities.data, 0, 1)
# connectivities = connectivities.multiply(1.0 / connectivities.sum(1))
# return connectivities.tocsr().astype(np.float32)
# else:
# return None
#
# def get_neighs(adata, mode="distances"):
# if hasattr(adata, "obsp") and mode in adata.obsp.keys():
# return adata.obsp[mode]
# elif "neighbors" in adata.uns.keys() and mode in adata.uns["neighbors"]:
# return adata.uns["neighbors"][mode]
# else:
# raise ValueError("The selected mode is not valid.")
#
# Path: scvelo/tools/utils.py
# def normalize(X):
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# if issparse(X):
# return X.multiply(csr_matrix(1.0 / np.abs(X).sum(1)))
# else:
# return X / X.sum(1)
, which may contain function names, class names, or code. Output only the next line. | direct_neighbors = get_neighs(adata, "distances") > 0 |
Given the following code snippet before the placeholder: <|code_start|> if graph_neg is not None:
graph_neg = adata.uns[f"{vkey}_graph_neg"]
if use_negative_cosines:
T -= np.expm1(-graph_neg * scale)
else:
T += np.expm1(graph_neg * scale)
T.data += 1
# weight direct and indirect (recursed) neighbors
if weight_indirect_neighbors is not None and weight_indirect_neighbors < 1:
direct_neighbors = get_neighs(adata, "distances") > 0
direct_neighbors.setdiag(1)
w = weight_indirect_neighbors
T = w * T + (1 - w) * direct_neighbors.multiply(T)
if n_neighbors is not None:
T = T.multiply(
get_connectivities(
adata, mode="distances", n_neighbors=n_neighbors, recurse_neighbors=True
)
)
if perc is not None or threshold is not None:
if threshold is None:
threshold = np.percentile(T.data, perc)
T.data[T.data < threshold] = 0
T.eliminate_zeros()
if backward:
T = T.T
<|code_end|>
, predict the next line using imports from the current file:
import warnings
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, SparseEfficiencyWarning
from scipy.spatial.distance import pdist, squareform
from scvelo.preprocessing.neighbors import get_connectivities, get_neighs
from .utils import normalize
from sklearn.neighbors import NearestNeighbors
and context including class names, function names, and sometimes code from other files:
# Path: scvelo/preprocessing/neighbors.py
# def get_connectivities(
# adata, mode="connectivities", n_neighbors=None, recurse_neighbors=False
# ):
# if "neighbors" in adata.uns.keys():
# C = get_neighs(adata, mode)
# if n_neighbors is not None and n_neighbors < get_n_neighs(adata):
# if mode == "connectivities":
# C = select_connectivities(C, n_neighbors)
# else:
# C = select_distances(C, n_neighbors)
# connectivities = C > 0
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# connectivities.setdiag(1)
# if recurse_neighbors:
# connectivities += connectivities.dot(connectivities * 0.5)
# connectivities.data = np.clip(connectivities.data, 0, 1)
# connectivities = connectivities.multiply(1.0 / connectivities.sum(1))
# return connectivities.tocsr().astype(np.float32)
# else:
# return None
#
# def get_neighs(adata, mode="distances"):
# if hasattr(adata, "obsp") and mode in adata.obsp.keys():
# return adata.obsp[mode]
# elif "neighbors" in adata.uns.keys() and mode in adata.uns["neighbors"]:
# return adata.uns["neighbors"][mode]
# else:
# raise ValueError("The selected mode is not valid.")
#
# Path: scvelo/tools/utils.py
# def normalize(X):
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# if issparse(X):
# return X.multiply(csr_matrix(1.0 / np.abs(X).sum(1)))
# else:
# return X / X.sum(1)
. Output only the next line. | T = normalize(T) |
Predict the next line after this snippet: <|code_start|> If `None`, split into :paramref:`n_jobs` chunks.
unit
Unit of the progress bar.
as_array
Whether to convert the results not :class:`numpy.ndarray`.
use_ixs
Whether to pass indices to the callback.
backend
Which backend to use for multiprocessing. See :class:`joblib.Parallel` for valid
options.
extractor
Function to apply to the result after all jobs have finished.
show_progress_bar
Whether to show a progress bar.
Returns
-------
:class:`numpy.ndarray`
Result depending on :paramref:`extractor` and :paramref:`as_array`.
"""
if show_progress_bar:
try:
try:
except ImportError:
except ImportError:
global _msg_shown
tqdm = None
if not _msg_shown:
<|code_end|>
using the current file's imports:
import os
import numpy as np
import ipywidgets # noqa
from multiprocessing import Manager
from threading import Thread
from typing import Any, Callable, Optional, Sequence, Union
from joblib import delayed, Parallel
from scipy.sparse import issparse, spmatrix
from scvelo import logging as logg
from tqdm.notebook import tqdm
from tqdm import tqdm_notebook as tqdm
and any relevant context from other files:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
. Output only the next line. | logg.warn( |
Given the code snippet: <|code_start|> else:
trimmer = csr_matrix(
(normalized_data <= bound[0]) | (normalized_data >= bound[1])
).astype(bool)
return [trimmer.getnnz(axis=0)] + [
trimmer.multiply(data_mat).tocsr() for data_mat in data
]
def fit(self, x: ndarray, y: ndarray):
"""Fit linear model per column.
Arguments
---------
x
Training data of shape `(n_obs, n_vars)`.
y
Target values of shape `(n_obs, n_vars)`.
Returns
-------
self
Returns an instance of self.
"""
n_obs = x.shape[0]
if self.percentile is not None:
n_obs, x, y = self._trim_data(data=[x, y])
<|code_end|>
, generate the next line using the imports in this file:
from typing import List, Optional, Tuple, Union
from numpy import ndarray
from scipy.sparse import csr_matrix, issparse
from ._arithmetic import prod_sum, sum
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: scvelo/core/_arithmetic.py
# def prod_sum(
# a1: Union[ndarray, spmatrix], a2: Union[ndarray, spmatrix], axis: Optional[int]
# ) -> ndarray:
# """Take sum of product of two arrays along given axis.
#
# Arguments
# ---------
# a1
# First array.
# a2
# Second array.
# axis
# Axis along which to sum elements. If `None`, all elements will be summed.
# Defaults to `None`.
#
# Returns
# -------
# ndarray
# Sum of product of arrays along given axis.
# """
#
# if issparse(a1):
# return a1.multiply(a2).sum(axis=axis).A1
# elif axis == 0:
# return np.einsum("ij, ij -> j", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()
# elif axis == 1:
# return np.einsum("ij, ij -> i", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()
#
# def sum(a: Union[ndarray, spmatrix], axis: Optional[int] = None) -> ndarray:
# """Sum array elements over a given axis.
#
# Arguments
# ---------
# a
# Elements to sum.
# axis
# Axis along which to sum elements. If `None`, all elements will be summed.
# Defaults to `None`.
#
# Returns
# -------
# ndarray
# Sum of array along given axis.
# """
#
# if a.ndim == 1:
# axis = 0
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# return a.sum(axis=axis).A1 if issparse(a) else a.sum(axis=axis)
. Output only the next line. | _xx = prod_sum(x, x, axis=0) |
Predict the next line after this snippet: <|code_start|>
if constrain_ratio is None:
self.constrain_ratio = [-np.inf, np.inf]
elif len(constrain_ratio) == 1:
self.constrain_ratio = [-np.inf, constrain_ratio]
else:
self.constrain_ratio = constrain_ratio
def _trim_data(self, data: List) -> List:
"""Trim data to extreme values.
Arguments
---------
data
Data to be trimmed to extreme quantiles.
Returns
-------
List
Number of non-trivial entries per column and trimmed data.
"""
if not isinstance(data, List):
data = [data]
data = np.array(
[data_mat.A if issparse(data_mat) else data_mat for data_mat in data]
)
# TODO: Add explanatory comment
<|code_end|>
using the current file's imports:
from typing import List, Optional, Tuple, Union
from numpy import ndarray
from scipy.sparse import csr_matrix, issparse
from ._arithmetic import prod_sum, sum
import numpy as np
and any relevant context from other files:
# Path: scvelo/core/_arithmetic.py
# def prod_sum(
# a1: Union[ndarray, spmatrix], a2: Union[ndarray, spmatrix], axis: Optional[int]
# ) -> ndarray:
# """Take sum of product of two arrays along given axis.
#
# Arguments
# ---------
# a1
# First array.
# a2
# Second array.
# axis
# Axis along which to sum elements. If `None`, all elements will be summed.
# Defaults to `None`.
#
# Returns
# -------
# ndarray
# Sum of product of arrays along given axis.
# """
#
# if issparse(a1):
# return a1.multiply(a2).sum(axis=axis).A1
# elif axis == 0:
# return np.einsum("ij, ij -> j", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()
# elif axis == 1:
# return np.einsum("ij, ij -> i", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()
#
# def sum(a: Union[ndarray, spmatrix], axis: Optional[int] = None) -> ndarray:
# """Sum array elements over a given axis.
#
# Arguments
# ---------
# a
# Elements to sum.
# axis
# Axis along which to sum elements. If `None`, all elements will be summed.
# Defaults to `None`.
#
# Returns
# -------
# ndarray
# Sum of array along given axis.
# """
#
# if a.ndim == 1:
# axis = 0
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# return a.sum(axis=axis).A1 if issparse(a) else a.sum(axis=axis)
. Output only the next line. | normalized_data = np.sum( |
Predict the next line after this snippet: <|code_start|> if isinstance(groups, str) and groups == "all":
if color is None:
color = default_color(adata)
if is_categorical(adata, color):
vc = adata.obs[color].value_counts()
groups = [[c] for c in vc[vc > 0].index]
if isinstance(add_text, (list, tuple, np.ndarray, np.record)):
add_text = list(np.array(add_text, dtype=str))
# create list of each mkey and check if all bases are valid.
color = to_list(color, max_len=None)
layer, components = to_list(layer), to_list(components)
x, y, basis = to_list(x), to_list(y), to_valid_bases_list(adata, basis)
# get multikey (with more than one element)
multikeys = eval(f"[{','.join(mkeys)}]")
if is_list_of_list(groups):
multikeys.append(groups)
key_lengths = np.array([len(key) if is_list(key) else 1 for key in multikeys])
multikey = (
multikeys[np.where(key_lengths > 1)[0][0]] if np.max(key_lengths) > 1 else None
)
# gridspec frame for plotting multiple keys (mkeys: list or tuple)
if multikey is not None:
if np.sum(key_lengths > 1) == 1 and is_list_of_str(multikey):
multikey = unique(multikey) # take unique set if no more than one multikey
if len(multikey) > 20:
raise ValueError("Please restrict the passed list to max 20 elements.")
if ax is not None:
<|code_end|>
using the current file's imports:
from inspect import signature
from pandas import unique
from matplotlib.colors import is_color_like
from anndata import AnnData
from scvelo import logging as logg
from scvelo import settings
from scvelo.preprocessing.neighbors import get_connectivities
from .docs import doc_params, doc_scatter
from .utils import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
and any relevant context from other files:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
#
# Path: scvelo/settings.py
# def set_rcParams_scvelo(fontsize=12, color_map=None, frameon=None):
# def set_rcParams_scanpy(fontsize=12, color_map=None, frameon=None):
# def set_figure_params(
# style="scvelo",
# dpi=100,
# dpi_save=150,
# frameon=None,
# vector_friendly=True,
# transparent=True,
# fontsize=12,
# figsize=None,
# color_map=None,
# facecolor=None,
# format="pdf",
# ipython_format="png2x",
# ):
# def set_rcParams_defaults():
# def _set_ipython(ipython_format="png2x"):
# def _set_start_time():
#
# Path: scvelo/preprocessing/neighbors.py
# def get_connectivities(
# adata, mode="connectivities", n_neighbors=None, recurse_neighbors=False
# ):
# if "neighbors" in adata.uns.keys():
# C = get_neighs(adata, mode)
# if n_neighbors is not None and n_neighbors < get_n_neighs(adata):
# if mode == "connectivities":
# C = select_connectivities(C, n_neighbors)
# else:
# C = select_distances(C, n_neighbors)
# connectivities = C > 0
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# connectivities.setdiag(1)
# if recurse_neighbors:
# connectivities += connectivities.dot(connectivities * 0.5)
# connectivities.data = np.clip(connectivities.data, 0, 1)
# connectivities = connectivities.multiply(1.0 / connectivities.sum(1))
# return connectivities.tocsr().astype(np.float32)
# else:
# return None
#
# Path: scvelo/plotting/docs.py
# def doc_params(**kwds):
# def dec(obj):
. Output only the next line. | logg.warn("Cannot specify `ax` when plotting multiple panels.") |
Continue the code snippet: <|code_start|> adata = AnnData(np.stack([x, y]).T)
# restore old conventions
add_assignments = kwargs.pop("show_assignments", add_assignments)
add_linfit = kwargs.pop("show_linear_fit", add_linfit)
add_polyfit = kwargs.pop("show_polyfit", add_polyfit)
add_density = kwargs.pop("show_density", add_density)
add_rug = kwargs.pop("rug", add_rug)
basis = kwargs.pop("var_names", basis)
# keys for figures (fkeys) and multiple plots (mkeys)
fkeys = ["adata", "show", "save", "groups", "ncols", "nrows", "wspace", "hspace"]
fkeys += ["add_margin", "ax", "kwargs"]
mkeys = ["color", "layer", "basis", "components", "x", "y", "xlabel", "ylabel"]
mkeys += ["title", "color_map", "add_text"]
scatter_kwargs = {"show": False, "save": False}
for key in signature(scatter).parameters:
if key not in mkeys + fkeys:
scatter_kwargs[key] = eval(key)
mkwargs = {}
for key in mkeys: # mkwargs[key] = key for key in mkeys
mkwargs[key] = eval("{0}[0] if is_list({0}) else {0}".format(key))
# use c & color and cmap & color_map interchangeably,
# and plot each group separately if groups is 'all'
if "c" in kwargs:
color = kwargs.pop("c")
if "cmap" in kwargs:
color_map = kwargs.pop("cmap")
if "rasterized" not in kwargs:
<|code_end|>
. Use current file imports:
from inspect import signature
from pandas import unique
from matplotlib.colors import is_color_like
from anndata import AnnData
from scvelo import logging as logg
from scvelo import settings
from scvelo.preprocessing.neighbors import get_connectivities
from .docs import doc_params, doc_scatter
from .utils import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
and context (classes, functions, or code) from other files:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
#
# Path: scvelo/settings.py
# def set_rcParams_scvelo(fontsize=12, color_map=None, frameon=None):
# def set_rcParams_scanpy(fontsize=12, color_map=None, frameon=None):
# def set_figure_params(
# style="scvelo",
# dpi=100,
# dpi_save=150,
# frameon=None,
# vector_friendly=True,
# transparent=True,
# fontsize=12,
# figsize=None,
# color_map=None,
# facecolor=None,
# format="pdf",
# ipython_format="png2x",
# ):
# def set_rcParams_defaults():
# def _set_ipython(ipython_format="png2x"):
# def _set_start_time():
#
# Path: scvelo/preprocessing/neighbors.py
# def get_connectivities(
# adata, mode="connectivities", n_neighbors=None, recurse_neighbors=False
# ):
# if "neighbors" in adata.uns.keys():
# C = get_neighs(adata, mode)
# if n_neighbors is not None and n_neighbors < get_n_neighs(adata):
# if mode == "connectivities":
# C = select_connectivities(C, n_neighbors)
# else:
# C = select_distances(C, n_neighbors)
# connectivities = C > 0
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# connectivities.setdiag(1)
# if recurse_neighbors:
# connectivities += connectivities.dot(connectivities * 0.5)
# connectivities.data = np.clip(connectivities.data, 0, 1)
# connectivities = connectivities.multiply(1.0 / connectivities.sum(1))
# return connectivities.tocsr().astype(np.float32)
# else:
# return None
#
# Path: scvelo/plotting/docs.py
# def doc_params(**kwds):
# def dec(obj):
. Output only the next line. | kwargs["rasterized"] = settings._vector_friendly |
Continue the code snippet: <|code_start|> )
zorder -= 1
# if color is in {'ascending', 'descending'}
elif isinstance(color, str):
if color == "ascending":
color = np.linspace(0, 1, len(x))
elif color == "descending":
color = np.linspace(1, 0, len(x))
# set palette if categorical color vals
if is_categorical(adata, color):
set_colors_for_categorical_obs(adata, color, palette)
# set color
if (
basis in adata.var_names
and isinstance(color, str)
and color in adata.layers.keys()
):
# phase portrait: color=basis, layer=color
c = interpret_colorkey(adata, basis, color, perc, use_raw)
else:
# embedding, gene trend etc.
c = interpret_colorkey(adata, color, layer, perc, use_raw)
if c is not None and not isinstance(c, str) and not isinstance(c[0], str):
# smooth color values across neighbors and rescale
if smooth and len(c) == adata.n_obs:
n_neighbors = None if isinstance(smooth, bool) else smooth
<|code_end|>
. Use current file imports:
from inspect import signature
from pandas import unique
from matplotlib.colors import is_color_like
from anndata import AnnData
from scvelo import logging as logg
from scvelo import settings
from scvelo.preprocessing.neighbors import get_connectivities
from .docs import doc_params, doc_scatter
from .utils import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
and context (classes, functions, or code) from other files:
# Path: scvelo/logging.py
# _VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
# def info(*args, **kwargs):
# def error(*args, **kwargs):
# def warn(*args, **kwargs):
# def hint(*args, **kwargs):
# def _settings_verbosity_greater_or_equal_than(v):
# def msg(
# *msg,
# v=None,
# time=False,
# memory=False,
# reset=False,
# end="\n",
# no_indent=False,
# t=None,
# m=None,
# r=None,
# ):
# def _write_log(*msg, end="\n"):
# def _sec_to_str(t, show_microseconds=False):
# def get_passed_time():
# def print_passed_time():
# def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
# def __init__(self):
# def run(self):
# def get_latest_pypi_version():
# def check_if_latest_version():
# def print_version():
# def print_versions():
# def get_date_string():
# def switch_verbosity(mode="on", module=None):
# def __init__(self, total, interval=3):
# def update(self):
# def finish(self):
# def profiler(command, filename="profile.stats", n_stats=10):
# class InterruptableThread(threading.Thread):
# class ProgressReporter:
#
# Path: scvelo/settings.py
# def set_rcParams_scvelo(fontsize=12, color_map=None, frameon=None):
# def set_rcParams_scanpy(fontsize=12, color_map=None, frameon=None):
# def set_figure_params(
# style="scvelo",
# dpi=100,
# dpi_save=150,
# frameon=None,
# vector_friendly=True,
# transparent=True,
# fontsize=12,
# figsize=None,
# color_map=None,
# facecolor=None,
# format="pdf",
# ipython_format="png2x",
# ):
# def set_rcParams_defaults():
# def _set_ipython(ipython_format="png2x"):
# def _set_start_time():
#
# Path: scvelo/preprocessing/neighbors.py
# def get_connectivities(
# adata, mode="connectivities", n_neighbors=None, recurse_neighbors=False
# ):
# if "neighbors" in adata.uns.keys():
# C = get_neighs(adata, mode)
# if n_neighbors is not None and n_neighbors < get_n_neighs(adata):
# if mode == "connectivities":
# C = select_connectivities(C, n_neighbors)
# else:
# C = select_distances(C, n_neighbors)
# connectivities = C > 0
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# connectivities.setdiag(1)
# if recurse_neighbors:
# connectivities += connectivities.dot(connectivities * 0.5)
# connectivities.data = np.clip(connectivities.data, 0, 1)
# connectivities = connectivities.multiply(1.0 / connectivities.sum(1))
# return connectivities.tocsr().astype(np.float32)
# else:
# return None
#
# Path: scvelo/plotting/docs.py
# def doc_params(**kwds):
# def dec(obj):
. Output only the next line. | c = get_connectivities(adata, n_neighbors=n_neighbors).dot(c) |
Based on the snippet: <|code_start|>"""Logging and Profiling
"""
_VERBOSITY_LEVELS_FROM_STRINGS = {"error": 0, "warn": 1, "info": 2, "hint": 3}
def info(*args, **kwargs):
return msg(*args, v="info", **kwargs)
def error(*args, **kwargs):
args = ("Error:",) + args
return msg(*args, v="error", **kwargs)
def warn(*args, **kwargs):
args = ("WARNING:",) + args
return msg(*args, v="warn", **kwargs)
def hint(*args, **kwargs):
return msg(*args, v="hint", **kwargs)
def _settings_verbosity_greater_or_equal_than(v):
<|code_end|>
, predict the immediate next line with the help of imports:
from datetime import datetime
from platform import python_version
from sys import stdout
from time import time as get_time
from packaging.version import parse
from anndata.logging import get_memory_usage
from scvelo import settings
from .settings import logfile
from functools import reduce
from subprocess import CalledProcessError, check_output
from . import __version__
from . import __version__
from . import settings
from scanpy import settings
import threading
import cProfile
import pstats
and context (classes, functions, sometimes code) from other files:
# Path: scvelo/settings.py
# def set_rcParams_scvelo(fontsize=12, color_map=None, frameon=None):
# def set_rcParams_scanpy(fontsize=12, color_map=None, frameon=None):
# def set_figure_params(
# style="scvelo",
# dpi=100,
# dpi_save=150,
# frameon=None,
# vector_friendly=True,
# transparent=True,
# fontsize=12,
# figsize=None,
# color_map=None,
# facecolor=None,
# format="pdf",
# ipython_format="png2x",
# ):
# def set_rcParams_defaults():
# def _set_ipython(ipython_format="png2x"):
# def _set_start_time():
. Output only the next line. | if isinstance(settings.verbosity, str): |
Given the code snippet: <|code_start|>"""
Mural views
===========
This module defines views that will be used to browse murals.
"""
mural_blueprint = Blueprint('mural', __name__, url_prefix='/')
@mural_blueprint.route('/', methods=['GET', ])
def entrypoint():
""" The main entrypoint of our murals search engine. """
<|code_end|>
, generate the next line using the imports in this file:
from flask import Blueprint
from flask import render_template
from .models import Mural
and context (functions, classes, or occasionally code) from other files:
# Path: mtlmurals/modules/mural/models.py
# class Mural(models.Model, Timestamp):
# """ Represents a mural. """
#
# __tablename__ = 'mural'
# id = db.Column(db.Integer, primary_key=True)
#
# # The import ID is the identifier associated with each mural that is embedded inside exports of
# # the murals set provided by the Montréal open data website.
# import_id = db.Column(db.String(length=64), unique=True, index=True)
#
# # A mural can be associated with many artists. It can (potentially) be realized by an
# # organization which was financed to create murals. Fundings can come from a specific program.
# artists = db.relationship(
# 'Artist', secondary=mural_artists, backref=db.backref('murals', lazy='dynamic'))
# organization_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable=True)
# program_id = db.Column(db.Integer, db.ForeignKey('program.id'), nullable=True)
#
# # The mural itself!
# image = db.Column(db.Unicode(length=255), nullable=True)
#
# # The year when the mural was created is stored in this field.
# year = db.Column(db.SmallInteger)
#
# # These fields define the address of a mural.
# address = db.Column(db.Unicode(length=124))
# address_2 = db.Column(db.Unicode(length=124), nullable=True)
# zip_code = db.Column(db.String(length=20), nullable=True, index=True)
# city = db.Column(db.Unicode(length=124), index=True)
# country = db.Column(db.Unicode(length=124), index=True)
#
# # The fields define the coordinates of a mural.
# latitude = db.Column(db.Numeric, index=True)
# longitude = db.Column(db.Numeric, index=True)
. Output only the next line. | years = next(zip(*Mural.query.order_by(Mural.year).distinct().values(Mural.year)), []) |
Predict the next line after this snippet: <|code_start|>def _set_data_field(fields, args):
if 'location' not in fields and 'copy_from' not in fields:
fields['data'] = utils.get_data_file(args)
@utils.arg('image', metavar='<IMAGE>', help='Name or ID of image to describe.')
@utils.arg('--human-readable', action='store_true', default=False,
help='Print image size in a human-friendly format.')
@utils.arg('--max-column-width', metavar='<integer>', default=80,
help='The max column width of the printed table.')
def do_image_show(gc, args):
"""Describe a specific image."""
image_id = utils.find_resource(gc.images, args.image).id
image = gc.images.get(image_id)
_image_show(image, args.human_readable,
max_column_width=int(args.max_column_width))
@utils.arg('--file', metavar='<FILE>',
help='Local file to save downloaded image data to. '
'If this is not specified and there is no redirection '
'the image data will not be saved.')
@utils.arg('image', metavar='<IMAGE>', help='Name or ID of image to download.')
@utils.arg('--progress', action='store_true', default=False,
help='Show download progress bar.')
def do_image_download(gc, args):
"""Download a specific image."""
image = utils.find_resource(gc.images, args.image)
body = image.data()
if args.progress:
<|code_end|>
using the current file's imports:
import copy
import functools
import os
import sys
import glanceclient.v1.images
from oslo_utils import encodeutils
from oslo_utils import strutils
from glanceclient.common import progressbar
from glanceclient.common import utils
from glanceclient import exc
and any relevant context from other files:
# Path: glanceclient/common/progressbar.py
# class _ProgressBarBase(object):
# class VerboseFileWrapper(_ProgressBarBase):
# class VerboseIteratorWrapper(_ProgressBarBase):
# def __init__(self, wrapped, totalsize):
# def _display_progress_bar(self, size_read):
# def __getattr__(self, attr):
# def read(self, *args, **kwargs):
# def __iter__(self):
# def next(self):
#
# Path: glanceclient/common/utils.py
# SENSITIVE_HEADERS = ('X-Auth-Token', )
# REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# def arg(*args, **kwargs):
# def _decorator(func):
# def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
# def args_decorator(func):
# def prepare_fields(fields):
# def func_wrapper(gc, args):
# def schema_args(schema_getter, omit=None):
# def _decorator(func):
# def pretty_choice_list(l):
# def has_version(client, version):
# def print_cached_images(cached_images):
# def print_dict_list(objects, fields):
# def print_list(objs, fields, formatters=None, field_settings=None):
# def _encode(src):
# def unicode_key_value_to_string(src):
# def print_dict(d, max_column_width=80):
# def find_resource(manager, name_or_id):
# def env(*vars, **kwargs):
# def exit(msg='', exit_code=1):
# def print_err(msg):
# def save_image(data, path):
# def make_size_human_readable(size):
# def get_file_size(file_obj):
# def get_data_file(args):
# def strip_version(endpoint):
# def print_image(image_obj, human_readable=False, max_col_width=None):
# def integrity_iter(iter, checksum):
# def serious_integrity_iter(iter, hasher, hash_value):
# def memoized_property(fn):
# def _memoized_property(self):
# def safe_header(name, value):
# def endpoint_version_from_url(endpoint, default_version=None):
# def debug_enabled(argv):
# def __init__(self, iterable, length):
# def __iter__(self):
# def next(self):
# def __len__(self):
# def __init__(self, wrapped):
# def request_ids(self):
# def wrapped(self):
# def next(self):
# def __init__(self, wrapped):
# def _set_request_ids(self, resp):
# def _next(self):
# def next(self):
# def __next__(self):
# def __iter__(self):
# def request_ids(self):
# def wrapped(self):
# def add_req_id_to_object():
# def inner(wrapped, instance, args, kwargs):
# def add_req_id_to_generator():
# def inner(wrapped, instance, args, kwargs):
# def _extract_request_id(resp):
# class IterableWithLength(object):
# class RequestIdProxy(wrapt.ObjectProxy):
# class GeneratorProxy(wrapt.ObjectProxy):
#
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
. Output only the next line. | body = progressbar.VerboseIteratorWrapper(body, len(body)) |
Given the following code snippet before the placeholder: <|code_start|># Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CONTAINER_FORMATS = ('Acceptable formats: ami, ari, aki, bare, ovf, ova,'
'docker.')
DISK_FORMATS = ('Acceptable formats: ami, ari, aki, vhd, vhdx, vmdk, raw, '
'qcow2, vdi, iso, and ploop.')
DATA_FIELDS = ('location', 'copy_from', 'file')
_bool_strict = functools.partial(strutils.bool_from_string, strict=True)
<|code_end|>
, predict the next line using imports from the current file:
import copy
import functools
import os
import sys
import glanceclient.v1.images
from oslo_utils import encodeutils
from oslo_utils import strutils
from glanceclient.common import progressbar
from glanceclient.common import utils
from glanceclient import exc
and context including class names, function names, and sometimes code from other files:
# Path: glanceclient/common/progressbar.py
# class _ProgressBarBase(object):
# class VerboseFileWrapper(_ProgressBarBase):
# class VerboseIteratorWrapper(_ProgressBarBase):
# def __init__(self, wrapped, totalsize):
# def _display_progress_bar(self, size_read):
# def __getattr__(self, attr):
# def read(self, *args, **kwargs):
# def __iter__(self):
# def next(self):
#
# Path: glanceclient/common/utils.py
# SENSITIVE_HEADERS = ('X-Auth-Token', )
# REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# def arg(*args, **kwargs):
# def _decorator(func):
# def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
# def args_decorator(func):
# def prepare_fields(fields):
# def func_wrapper(gc, args):
# def schema_args(schema_getter, omit=None):
# def _decorator(func):
# def pretty_choice_list(l):
# def has_version(client, version):
# def print_cached_images(cached_images):
# def print_dict_list(objects, fields):
# def print_list(objs, fields, formatters=None, field_settings=None):
# def _encode(src):
# def unicode_key_value_to_string(src):
# def print_dict(d, max_column_width=80):
# def find_resource(manager, name_or_id):
# def env(*vars, **kwargs):
# def exit(msg='', exit_code=1):
# def print_err(msg):
# def save_image(data, path):
# def make_size_human_readable(size):
# def get_file_size(file_obj):
# def get_data_file(args):
# def strip_version(endpoint):
# def print_image(image_obj, human_readable=False, max_col_width=None):
# def integrity_iter(iter, checksum):
# def serious_integrity_iter(iter, hasher, hash_value):
# def memoized_property(fn):
# def _memoized_property(self):
# def safe_header(name, value):
# def endpoint_version_from_url(endpoint, default_version=None):
# def debug_enabled(argv):
# def __init__(self, iterable, length):
# def __iter__(self):
# def next(self):
# def __len__(self):
# def __init__(self, wrapped):
# def request_ids(self):
# def wrapped(self):
# def next(self):
# def __init__(self, wrapped):
# def _set_request_ids(self, resp):
# def _next(self):
# def next(self):
# def __next__(self):
# def __iter__(self):
# def request_ids(self):
# def wrapped(self):
# def add_req_id_to_object():
# def inner(wrapped, instance, args, kwargs):
# def add_req_id_to_generator():
# def inner(wrapped, instance, args, kwargs):
# def _extract_request_id(resp):
# class IterableWithLength(object):
# class RequestIdProxy(wrapt.ObjectProxy):
# class GeneratorProxy(wrapt.ObjectProxy):
#
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
. Output only the next line. | @utils.arg('--name', metavar='<NAME>', |
Continue the code snippet: <|code_start|> UPDATE_PARAMS = glanceclient.v1.images.UPDATE_PARAMS
fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items()))
if image.status == 'queued':
_set_data_field(fields, args)
if args.progress:
filesize = utils.get_file_size(fields['data'])
fields['data'] = progressbar.VerboseFileWrapper(
fields['data'], filesize
)
elif _is_image_data_provided(args):
# NOTE(kragniz): Exit with an error if the status is not queued
# and image data was provided
utils.exit('Unable to upload image data to an image which '
'is %s.' % image.status)
image = gc.images.update(image, purge_props=args.purge_props, **fields)
_image_show(image, args.human_readable)
@utils.arg('images', metavar='<IMAGE>', nargs='+',
help='Name or ID of image(s) to delete.')
def do_image_delete(gc, args):
"""Delete specified image(s)."""
for args_image in args.images:
image = utils.find_resource(gc.images, args_image)
if image and image.status == "deleted":
msg = "No image with an ID of '%s' exists." % image.id
<|code_end|>
. Use current file imports:
import copy
import functools
import os
import sys
import glanceclient.v1.images
from oslo_utils import encodeutils
from oslo_utils import strutils
from glanceclient.common import progressbar
from glanceclient.common import utils
from glanceclient import exc
and context (classes, functions, or code) from other files:
# Path: glanceclient/common/progressbar.py
# class _ProgressBarBase(object):
# class VerboseFileWrapper(_ProgressBarBase):
# class VerboseIteratorWrapper(_ProgressBarBase):
# def __init__(self, wrapped, totalsize):
# def _display_progress_bar(self, size_read):
# def __getattr__(self, attr):
# def read(self, *args, **kwargs):
# def __iter__(self):
# def next(self):
#
# Path: glanceclient/common/utils.py
# SENSITIVE_HEADERS = ('X-Auth-Token', )
# REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# def arg(*args, **kwargs):
# def _decorator(func):
# def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
# def args_decorator(func):
# def prepare_fields(fields):
# def func_wrapper(gc, args):
# def schema_args(schema_getter, omit=None):
# def _decorator(func):
# def pretty_choice_list(l):
# def has_version(client, version):
# def print_cached_images(cached_images):
# def print_dict_list(objects, fields):
# def print_list(objs, fields, formatters=None, field_settings=None):
# def _encode(src):
# def unicode_key_value_to_string(src):
# def print_dict(d, max_column_width=80):
# def find_resource(manager, name_or_id):
# def env(*vars, **kwargs):
# def exit(msg='', exit_code=1):
# def print_err(msg):
# def save_image(data, path):
# def make_size_human_readable(size):
# def get_file_size(file_obj):
# def get_data_file(args):
# def strip_version(endpoint):
# def print_image(image_obj, human_readable=False, max_col_width=None):
# def integrity_iter(iter, checksum):
# def serious_integrity_iter(iter, hasher, hash_value):
# def memoized_property(fn):
# def _memoized_property(self):
# def safe_header(name, value):
# def endpoint_version_from_url(endpoint, default_version=None):
# def debug_enabled(argv):
# def __init__(self, iterable, length):
# def __iter__(self):
# def next(self):
# def __len__(self):
# def __init__(self, wrapped):
# def request_ids(self):
# def wrapped(self):
# def next(self):
# def __init__(self, wrapped):
# def _set_request_ids(self, resp):
# def _next(self):
# def next(self):
# def __next__(self):
# def __iter__(self):
# def request_ids(self):
# def wrapped(self):
# def add_req_id_to_object():
# def inner(wrapped, instance, args, kwargs):
# def add_req_id_to_generator():
# def inner(wrapped, instance, args, kwargs):
# def _extract_request_id(resp):
# class IterableWithLength(object):
# class RequestIdProxy(wrapt.ObjectProxy):
# class GeneratorProxy(wrapt.ObjectProxy):
#
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
. Output only the next line. | raise exc.CommandError(msg) |
Continue the code snippet: <|code_start|> body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
class ManagerWithFind(BaseManager, metaclass=abc.ABCMeta):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
<|code_end|>
. Use current file imports:
import abc
import copy
import urllib.parse
from oslo_utils import strutils
from glanceclient._i18n import _
from glanceclient.v1.apiclient import exceptions
and context (classes, functions, or code) from other files:
# Path: glanceclient/_i18n.py
#
# Path: glanceclient/v1/apiclient/exceptions.py
# class ClientException(Exception):
# class ValidationError(ClientException):
# class UnsupportedVersion(ClientException):
# class CommandError(ClientException):
# class AuthorizationFailure(ClientException):
# class ConnectionError(ClientException):
# class ConnectionRefused(ConnectionError):
# class AuthPluginOptionsMissing(AuthorizationFailure):
# class AuthSystemNotFound(AuthorizationFailure):
# class NoUniqueMatch(ClientException):
# class EndpointException(ClientException):
# class EndpointNotFound(EndpointException):
# class AmbiguousEndpoints(EndpointException):
# class HttpError(ClientException):
# class HTTPRedirection(HttpError):
# class HTTPClientError(HttpError):
# class HttpServerError(HttpError):
# class MultipleChoices(HTTPRedirection):
# class BadRequest(HTTPClientError):
# class Unauthorized(HTTPClientError):
# class PaymentRequired(HTTPClientError):
# class Forbidden(HTTPClientError):
# class NotFound(HTTPClientError):
# class MethodNotAllowed(HTTPClientError):
# class NotAcceptable(HTTPClientError):
# class ProxyAuthenticationRequired(HTTPClientError):
# class RequestTimeout(HTTPClientError):
# class Conflict(HTTPClientError):
# class Gone(HTTPClientError):
# class LengthRequired(HTTPClientError):
# class PreconditionFailed(HTTPClientError):
# class RequestEntityTooLarge(HTTPClientError):
# class RequestUriTooLong(HTTPClientError):
# class UnsupportedMediaType(HTTPClientError):
# class RequestedRangeNotSatisfiable(HTTPClientError):
# class ExpectationFailed(HTTPClientError):
# class UnprocessableEntity(HTTPClientError):
# class InternalServerError(HttpServerError):
# class HttpNotImplemented(HttpServerError):
# class BadGateway(HttpServerError):
# class ServiceUnavailable(HttpServerError):
# class GatewayTimeout(HttpServerError):
# class HttpVersionNotSupported(HttpServerError):
# def __init__(self, opt_names):
# def __init__(self, auth_system):
# def __init__(self, endpoints=None):
# def __init__(self, message=None, details=None,
# response=None, request_id=None,
# url=None, method=None, http_status=None):
# def __init__(self, *args, **kwargs):
# def from_response(response, method, url):
. Output only the next line. | msg = _("No %(name)s matching %(args)s.") % { |
Predict the next line for this snippet: <|code_start|> return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
class ManagerWithFind(BaseManager, metaclass=abc.ABCMeta):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
<|code_end|>
with the help of current file imports:
import abc
import copy
import urllib.parse
from oslo_utils import strutils
from glanceclient._i18n import _
from glanceclient.v1.apiclient import exceptions
and context from other files:
# Path: glanceclient/_i18n.py
#
# Path: glanceclient/v1/apiclient/exceptions.py
# class ClientException(Exception):
# class ValidationError(ClientException):
# class UnsupportedVersion(ClientException):
# class CommandError(ClientException):
# class AuthorizationFailure(ClientException):
# class ConnectionError(ClientException):
# class ConnectionRefused(ConnectionError):
# class AuthPluginOptionsMissing(AuthorizationFailure):
# class AuthSystemNotFound(AuthorizationFailure):
# class NoUniqueMatch(ClientException):
# class EndpointException(ClientException):
# class EndpointNotFound(EndpointException):
# class AmbiguousEndpoints(EndpointException):
# class HttpError(ClientException):
# class HTTPRedirection(HttpError):
# class HTTPClientError(HttpError):
# class HttpServerError(HttpError):
# class MultipleChoices(HTTPRedirection):
# class BadRequest(HTTPClientError):
# class Unauthorized(HTTPClientError):
# class PaymentRequired(HTTPClientError):
# class Forbidden(HTTPClientError):
# class NotFound(HTTPClientError):
# class MethodNotAllowed(HTTPClientError):
# class NotAcceptable(HTTPClientError):
# class ProxyAuthenticationRequired(HTTPClientError):
# class RequestTimeout(HTTPClientError):
# class Conflict(HTTPClientError):
# class Gone(HTTPClientError):
# class LengthRequired(HTTPClientError):
# class PreconditionFailed(HTTPClientError):
# class RequestEntityTooLarge(HTTPClientError):
# class RequestUriTooLong(HTTPClientError):
# class UnsupportedMediaType(HTTPClientError):
# class RequestedRangeNotSatisfiable(HTTPClientError):
# class ExpectationFailed(HTTPClientError):
# class UnprocessableEntity(HTTPClientError):
# class InternalServerError(HttpServerError):
# class HttpNotImplemented(HttpServerError):
# class BadGateway(HttpServerError):
# class ServiceUnavailable(HttpServerError):
# class GatewayTimeout(HttpServerError):
# class HttpVersionNotSupported(HttpServerError):
# def __init__(self, opt_names):
# def __init__(self, auth_system):
# def __init__(self, endpoints=None):
# def __init__(self, message=None, details=None,
# response=None, request_id=None,
# url=None, method=None, http_status=None):
# def __init__(self, *args, **kwargs):
# def from_response(response, method, url):
, which may contain function names, class names, or code. Output only the next line. | raise exceptions.NotFound(msg) |
Given snippet: <|code_start|> "aligned with Heat resource types "
"whenever possible: http://docs."
"openstack.org/developer/heat/"
"template_guide/openstack.html",
"maxLength": 80
},
"created_at": {
"type": "string",
"readOnly": True,
"description": "Date and time of resource type "
"association",
"format": "date-time"
},
"updated_at": {
"type": "string",
"readOnly": True,
"description": "Date and time of the last resource "
"type association modification ",
"format": "date-time"
},
}
}
)
}
}
class TestResoureTypeController(testtools.TestCase):
def setUp(self):
super(TestResoureTypeController, self).setUp()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import metadefs
and context:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/metadefs.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('created_at', 'namespace')
# class NamespaceController(object):
# class ResourceTypeController(object):
# class PropertyController(object):
# class ObjectController(object):
# class TagController(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, **kwargs):
# def update(self, namespace_name, **kwargs):
# def get(self, namespace, **kwargs):
# def _get(self, namespace, header=None, **kwargs):
# def list(self, **kwargs):
# def paginate(url):
# def delete(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def associate(self, namespace, **kwargs):
# def deassociate(self, namespace, resource):
# def list(self):
# def get(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, prop_name, **kwargs):
# def get(self, namespace, prop_name):
# def _get(self, namespace, prop_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, prop_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, object_name, **kwargs):
# def get(self, namespace, object_name):
# def _get(self, namespace, object_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, object_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, tag_name):
# def create_multiple(self, namespace, **kwargs):
# def update(self, namespace, tag_name, **kwargs):
# def get(self, namespace, tag_name):
# def _get(self, namespace, tag_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, tag_name):
# def delete_all(self, namespace):
which might include code, classes, or functions. Output only the next line. | self.api = utils.FakeAPI(data_fixtures) |
Using the snippet: <|code_start|> "type": "string"
},
"required": {
"$ref": "#/definitions/stringArray"
},
"properties": {
"$ref": "#/definitions/property"
},
"schema": {
"type": "string"
},
"updated_at": {
"type": "string",
"readOnly": True,
"description": "Date and time of the last object "
"modification",
"format": "date-time"
},
}
}
)
}
}
class TestObjectController(testtools.TestCase):
def setUp(self):
super(TestObjectController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
<|code_end|>
, determine the next line of code. You have imports:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import metadefs
and context (class names, function names, or code) available:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/metadefs.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('created_at', 'namespace')
# class NamespaceController(object):
# class ResourceTypeController(object):
# class PropertyController(object):
# class ObjectController(object):
# class TagController(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, **kwargs):
# def update(self, namespace_name, **kwargs):
# def get(self, namespace, **kwargs):
# def _get(self, namespace, header=None, **kwargs):
# def list(self, **kwargs):
# def paginate(url):
# def delete(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def associate(self, namespace, **kwargs):
# def deassociate(self, namespace, resource):
# def list(self):
# def get(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, prop_name, **kwargs):
# def get(self, namespace, prop_name):
# def _get(self, namespace, prop_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, prop_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, object_name, **kwargs):
# def get(self, namespace, object_name):
# def _get(self, namespace, object_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, object_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, tag_name):
# def create_multiple(self, namespace, **kwargs):
# def update(self, namespace, tag_name, **kwargs):
# def get(self, namespace, tag_name):
# def _get(self, namespace, tag_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, tag_name):
# def delete_all(self, namespace):
. Output only the next line. | self.controller = base.BaseController(self.api, self.schema_api, |
Given the code snippet: <|code_start|> },
"self": {
"type": "string"
},
"required": {
"$ref": "#/definitions/stringArray"
},
"properties": {
"$ref": "#/definitions/property"
},
"schema": {
"type": "string"
},
"updated_at": {
"type": "string",
"readOnly": True,
"description": "Date and time of the last object "
"modification",
"format": "date-time"
},
}
}
)
}
}
class TestObjectController(testtools.TestCase):
def setUp(self):
super(TestObjectController, self).setUp()
<|code_end|>
, generate the next line using the imports in this file:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import metadefs
and context (functions, classes, or occasionally code) from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/metadefs.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('created_at', 'namespace')
# class NamespaceController(object):
# class ResourceTypeController(object):
# class PropertyController(object):
# class ObjectController(object):
# class TagController(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, **kwargs):
# def update(self, namespace_name, **kwargs):
# def get(self, namespace, **kwargs):
# def _get(self, namespace, header=None, **kwargs):
# def list(self, **kwargs):
# def paginate(url):
# def delete(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def associate(self, namespace, **kwargs):
# def deassociate(self, namespace, resource):
# def list(self):
# def get(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, prop_name, **kwargs):
# def get(self, namespace, prop_name):
# def _get(self, namespace, prop_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, prop_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, object_name, **kwargs):
# def get(self, namespace, object_name):
# def _get(self, namespace, object_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, object_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, tag_name):
# def create_multiple(self, namespace, **kwargs):
# def update(self, namespace, tag_name, **kwargs):
# def get(self, namespace, tag_name):
# def _get(self, namespace, tag_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, tag_name):
# def delete_all(self, namespace):
. Output only the next line. | self.api = utils.FakeAPI(data_fixtures) |
Based on the snippet: <|code_start|> },
"required": {
"$ref": "#/definitions/stringArray"
},
"properties": {
"$ref": "#/definitions/property"
},
"schema": {
"type": "string"
},
"updated_at": {
"type": "string",
"readOnly": True,
"description": "Date and time of the last object "
"modification",
"format": "date-time"
},
}
}
)
}
}
class TestObjectController(testtools.TestCase):
def setUp(self):
super(TestObjectController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
self.controller = base.BaseController(self.api, self.schema_api,
<|code_end|>
, predict the immediate next line with the help of imports:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import metadefs
and context (classes, functions, sometimes code) from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/metadefs.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('created_at', 'namespace')
# class NamespaceController(object):
# class ResourceTypeController(object):
# class PropertyController(object):
# class ObjectController(object):
# class TagController(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, **kwargs):
# def update(self, namespace_name, **kwargs):
# def get(self, namespace, **kwargs):
# def _get(self, namespace, header=None, **kwargs):
# def list(self, **kwargs):
# def paginate(url):
# def delete(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def associate(self, namespace, **kwargs):
# def deassociate(self, namespace, resource):
# def list(self):
# def get(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, prop_name, **kwargs):
# def get(self, namespace, prop_name):
# def _get(self, namespace, prop_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, prop_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, object_name, **kwargs):
# def get(self, namespace, object_name):
# def _get(self, namespace, object_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, object_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, tag_name):
# def create_multiple(self, namespace, **kwargs):
# def update(self, namespace, tag_name, **kwargs):
# def get(self, namespace, tag_name):
# def _get(self, namespace, tag_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, tag_name):
# def delete_all(self, namespace):
. Output only the next line. | metadefs.ObjectController) |
Continue the code snippet: <|code_start|> # now try to get entity as uuid
try:
tmp_id = encodeutils.safe_decode(name_or_id)
if uuidutils.is_uuid_like(tmp_id):
return manager.get(tmp_id)
except (TypeError, ValueError, exceptions.NotFound):
pass
# for str id which is not uuid
if getattr(manager, 'is_alphanum_id_allowed', False):
try:
return manager.get(name_or_id)
except exceptions.NotFound:
pass
try:
try:
return manager.find(human_id=name_or_id, **find_args)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
resource = getattr(manager, 'resource_class', None)
name_attr = resource.NAME_ATTR if resource else 'name'
kwargs = {name_attr: name_or_id}
kwargs.update(find_args)
return manager.find(**kwargs)
except exceptions.NotFound:
<|code_end|>
. Use current file imports:
from oslo_utils import encodeutils
from oslo_utils import uuidutils
from glanceclient._i18n import _
from glanceclient.v1.apiclient import exceptions
and context (classes, functions, or code) from other files:
# Path: glanceclient/_i18n.py
#
# Path: glanceclient/v1/apiclient/exceptions.py
# class ClientException(Exception):
# class ValidationError(ClientException):
# class UnsupportedVersion(ClientException):
# class CommandError(ClientException):
# class AuthorizationFailure(ClientException):
# class ConnectionError(ClientException):
# class ConnectionRefused(ConnectionError):
# class AuthPluginOptionsMissing(AuthorizationFailure):
# class AuthSystemNotFound(AuthorizationFailure):
# class NoUniqueMatch(ClientException):
# class EndpointException(ClientException):
# class EndpointNotFound(EndpointException):
# class AmbiguousEndpoints(EndpointException):
# class HttpError(ClientException):
# class HTTPRedirection(HttpError):
# class HTTPClientError(HttpError):
# class HttpServerError(HttpError):
# class MultipleChoices(HTTPRedirection):
# class BadRequest(HTTPClientError):
# class Unauthorized(HTTPClientError):
# class PaymentRequired(HTTPClientError):
# class Forbidden(HTTPClientError):
# class NotFound(HTTPClientError):
# class MethodNotAllowed(HTTPClientError):
# class NotAcceptable(HTTPClientError):
# class ProxyAuthenticationRequired(HTTPClientError):
# class RequestTimeout(HTTPClientError):
# class Conflict(HTTPClientError):
# class Gone(HTTPClientError):
# class LengthRequired(HTTPClientError):
# class PreconditionFailed(HTTPClientError):
# class RequestEntityTooLarge(HTTPClientError):
# class RequestUriTooLong(HTTPClientError):
# class UnsupportedMediaType(HTTPClientError):
# class RequestedRangeNotSatisfiable(HTTPClientError):
# class ExpectationFailed(HTTPClientError):
# class UnprocessableEntity(HTTPClientError):
# class InternalServerError(HttpServerError):
# class HttpNotImplemented(HttpServerError):
# class BadGateway(HttpServerError):
# class ServiceUnavailable(HttpServerError):
# class GatewayTimeout(HttpServerError):
# class HttpVersionNotSupported(HttpServerError):
# def __init__(self, opt_names):
# def __init__(self, auth_system):
# def __init__(self, endpoints=None):
# def __init__(self, message=None, details=None,
# response=None, request_id=None,
# url=None, method=None, http_status=None):
# def __init__(self, *args, **kwargs):
# def from_response(response, method, url):
. Output only the next line. | msg = _("No %(name)s with a name or " |
Predict the next line for this snippet: <|code_start|>#
# THIS MODULE IS DEPRECATED
#
# Please refer to
# https://etherpad.openstack.org/p/kilo-glanceclient-library-proposals for
# the discussion leading to this deprecation.
#
# We recommend checking out the python-openstacksdk project
# (https://launchpad.net/python-openstacksdk) instead.
#
########################################################################
def find_resource(manager, name_or_id, **find_args):
"""Look for resource in a given manager.
Used as a helper for the _find_* methods.
Example:
.. code-block:: python
def _find_hypervisor(cs, hypervisor):
#Get a hypervisor by name or ID.
return cliutils.find_resource(cs.hypervisors, hypervisor)
"""
# first try to get entity as integer id
try:
return manager.get(int(name_or_id))
<|code_end|>
with the help of current file imports:
from oslo_utils import encodeutils
from oslo_utils import uuidutils
from glanceclient._i18n import _
from glanceclient.v1.apiclient import exceptions
and context from other files:
# Path: glanceclient/_i18n.py
#
# Path: glanceclient/v1/apiclient/exceptions.py
# class ClientException(Exception):
# class ValidationError(ClientException):
# class UnsupportedVersion(ClientException):
# class CommandError(ClientException):
# class AuthorizationFailure(ClientException):
# class ConnectionError(ClientException):
# class ConnectionRefused(ConnectionError):
# class AuthPluginOptionsMissing(AuthorizationFailure):
# class AuthSystemNotFound(AuthorizationFailure):
# class NoUniqueMatch(ClientException):
# class EndpointException(ClientException):
# class EndpointNotFound(EndpointException):
# class AmbiguousEndpoints(EndpointException):
# class HttpError(ClientException):
# class HTTPRedirection(HttpError):
# class HTTPClientError(HttpError):
# class HttpServerError(HttpError):
# class MultipleChoices(HTTPRedirection):
# class BadRequest(HTTPClientError):
# class Unauthorized(HTTPClientError):
# class PaymentRequired(HTTPClientError):
# class Forbidden(HTTPClientError):
# class NotFound(HTTPClientError):
# class MethodNotAllowed(HTTPClientError):
# class NotAcceptable(HTTPClientError):
# class ProxyAuthenticationRequired(HTTPClientError):
# class RequestTimeout(HTTPClientError):
# class Conflict(HTTPClientError):
# class Gone(HTTPClientError):
# class LengthRequired(HTTPClientError):
# class PreconditionFailed(HTTPClientError):
# class RequestEntityTooLarge(HTTPClientError):
# class RequestUriTooLong(HTTPClientError):
# class UnsupportedMediaType(HTTPClientError):
# class RequestedRangeNotSatisfiable(HTTPClientError):
# class ExpectationFailed(HTTPClientError):
# class UnprocessableEntity(HTTPClientError):
# class InternalServerError(HttpServerError):
# class HttpNotImplemented(HttpServerError):
# class BadGateway(HttpServerError):
# class ServiceUnavailable(HttpServerError):
# class GatewayTimeout(HttpServerError):
# class HttpVersionNotSupported(HttpServerError):
# def __init__(self, opt_names):
# def __init__(self, auth_system):
# def __init__(self, endpoints=None):
# def __init__(self, message=None, details=None,
# response=None, request_id=None,
# url=None, method=None, http_status=None):
# def __init__(self, *args, **kwargs):
# def from_response(response, method, url):
, which may contain function names, class names, or code. Output only the next line. | except (TypeError, ValueError, exceptions.NotFound): |
Using the snippet: <|code_start|> HTTPSConnection = http.client.HTTPSConnection
Connection = SSL.Connection
def verify_callback(host=None):
"""Provide wrapper for do_verify_callback.
We use a partial around the 'real' verify_callback function
so that we can stash the host value without holding a
reference on the VerifiedHTTPSConnection.
"""
def wrapper(connection, x509, errnum,
depth, preverify_ok, host=host):
return do_verify_callback(connection, x509, errnum,
depth, preverify_ok, host=host)
return wrapper
def do_verify_callback(connection, x509, errnum,
depth, preverify_ok, host=None):
"""Verify the server's SSL certificate.
This is a standalone function rather than a method to avoid
issues around closing sockets if a reference is held on
a VerifiedHTTPSConnection by the callback function.
"""
if x509.has_expired():
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
<|code_end|>
, determine the next line of code. You have imports:
import socket
import ssl
import struct
import OpenSSL
import http.client
from eventlet import patcher
from eventlet.green.httplib import HTTPSConnection
from eventlet.green.OpenSSL.SSL import GreenConnection as Connection
from OpenSSL import SSL
from glanceclient import exc
and context (class names, function names, or code) available:
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
. Output only the next line. | raise exc.SSLCertificateError(msg) |
Continue the code snippet: <|code_start|># Copyright 2022 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.fake_client = mock.MagicMock()
<|code_end|>
. Use current file imports:
import testtools
from unittest import mock
from glanceclient.v2 import info
and context (classes, functions, or code) from other files:
# Path: glanceclient/v2/info.py
# class Controller:
# def __init__(self, http_client, schema_client):
# def get_usage(self, **kwargs):
. Output only the next line. | self.info_controller = info.Controller(self.fake_client, None) |
Predict the next line after this snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
IMAGE = {"protected": False,
"disk_format": "qcow2",
"name": "glance_functional_test_image.img",
"visibility": "private",
"container_format": "bare"}
<|code_end|>
using the current file's imports:
from glanceclient.tests.functional import base
import time
and any relevant context from other files:
# Path: glanceclient/tests/functional/base.py
# def credentials(cloud='devstack-admin'):
# def _get_clients(self):
# def glance(self, *args, **kwargs):
# def glance_pyclient(self):
# def __init__(self, **kwargs):
# def __init__(self, keystone, version="2"):
# def find(self, image_name):
# class ClientTestBase(base.ClientTestBase):
# class Keystone(object):
# class Glance(object):
. Output only the next line. | class HttpHeadersTest(base.ClientTestBase): |
Next line prediction: <|code_start|> '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): {
'DELETE': (
{},
None,
),
'PUT': (
{},
{
'image_id': IMAGE,
'tag_value': TAG
}
),
}
}
schema_fixtures = {
'tag': {
'GET': (
{},
{'name': 'image', 'properties': {'image_id': {}, 'tags': {}}}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
<|code_end|>
. Use current file imports:
(import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import image_tags)
and context including class names, function names, or small code snippets from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/image_tags.py
# class Controller(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def update(self, image_id, tag_value):
# def delete(self, image_id, tag_value):
. Output only the next line. | self.controller = base.BaseController(self.api, self.schema_api, |
Given the following code snippet before the placeholder: <|code_start|>
data_fixtures = {
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): {
'DELETE': (
{},
None,
),
'PUT': (
{},
{
'image_id': IMAGE,
'tag_value': TAG
}
),
}
}
schema_fixtures = {
'tag': {
'GET': (
{},
{'name': 'image', 'properties': {'image_id': {}, 'tags': {}}}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
<|code_end|>
, predict the next line using imports from the current file:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import image_tags
and context including class names, function names, and sometimes code from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/image_tags.py
# class Controller(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def update(self, image_id, tag_value):
# def delete(self, image_id, tag_value):
. Output only the next line. | self.api = utils.FakeAPI(data_fixtures) |
Predict the next line after this snippet: <|code_start|> 'DELETE': (
{},
None,
),
'PUT': (
{},
{
'image_id': IMAGE,
'tag_value': TAG
}
),
}
}
schema_fixtures = {
'tag': {
'GET': (
{},
{'name': 'image', 'properties': {'image_id': {}, 'tags': {}}}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
self.controller = base.BaseController(self.api, self.schema_api,
<|code_end|>
using the current file's imports:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import image_tags
and any relevant context from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/image_tags.py
# class Controller(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def update(self, image_id, tag_value):
# def delete(self, image_id, tag_value):
. Output only the next line. | image_tags.Controller) |
Based on the snippet: <|code_start|> # is required because the test_shell.do_xxx(gc, args) methods
# expects the args to be attributes of an object. If passed as
# dict directly, it throws an AttributeError.
class Args(object):
def __init__(self, entries):
self.__dict__.update(entries)
return Args(args)
def _mock_glance_client(self):
my_mocked_gc = mock.Mock()
my_mocked_gc.get.return_value = {}
return my_mocked_gc
def tearDown(self):
super(ShellInvalidEndpointandParameterTest, self).tearDown()
os.environ = self.old_environment
self.patched.stop()
def run_command(self, cmd):
self.shell.main(cmd.split())
def assert_called(self, method, url, body=None, **kwargs):
return self.shell.cs.assert_called(method, url, body, **kwargs)
def assert_called_anytime(self, method, url, body=None):
return self.shell.cs.assert_called_anytime(method, url, body)
def test_image_list_invalid_endpoint(self):
self.assertRaises(
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import io
import json
import os
import subprocess
import tempfile
import testtools
import glanceclient.v1.client as client
import glanceclient.v1.images
import glanceclient.v1.shell as v1shell
from unittest import mock
from glanceclient import exc
from glanceclient import shell
from glanceclient.tests import utils
and context (classes, functions, sometimes code) from other files:
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
#
# Path: glanceclient/shell.py
# SUPPORTED_VERSIONS = [1, 2]
# LOG = logging.getLogger('glanceclient')
# class OpenStackImagesShell(object):
# class HelpFormatter(argparse.HelpFormatter):
# def _append_global_identity_args(self, parser, argv):
# def get_base_parser(self, argv):
# def get_subcommand_parser(self, version, argv=None):
# def _find_actions(self, subparsers, actions_module):
# def _add_bash_completion_subparser(self, subparsers):
# def _get_image_url(self, args):
# def _discover_auth_versions(self, session, auth_url):
# def _get_keystone_auth_plugin(self, ks_session, **kwargs):
# def _get_kwargs_to_create_auth_plugin(self, args):
# def _get_versioned_client(self, api_version, args):
# def _cache_schemas(self, options, client, home_dir='~/.glanceclient'):
# def main(self, argv):
# def _get_subparser(api_version):
# def do_help(self, args, parser):
# def do_bash_completion(self, _args):
# def start_section(self, heading):
# def main():
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
. Output only the next line. | exc.CommunicationError, self.run_command, 'image-list') |
Using the snippet: <|code_start|> '/v1/images/70aa106f-3750-4d7c-a5ce-0a535ac08d0a': {
'HEAD': (
{
'x-image-meta-id': '70aa106f-3750-4d7c-a5ce-0a535ac08d0a',
'x-image-meta-status': 'deleted'
},
None
)
}
}
class ShellInvalidEndpointandParameterTest(utils.TestCase):
# Patch os.environ to avoid required auth info.
def setUp(self):
"""Run before each test."""
super(ShellInvalidEndpointandParameterTest, self).setUp()
self.old_environment = os.environ.copy()
os.environ = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_ID': 'tenant_id',
'OS_TOKEN_ID': 'test',
'OS_AUTH_URL': 'http://127.0.0.1:5000/v2.0/',
'OS_AUTH_TOKEN': 'pass',
'OS_IMAGE_API_VERSION': '1',
'OS_REGION_NAME': 'test',
'OS_IMAGE_URL': 'http://is.invalid'}
<|code_end|>
, determine the next line of code. You have imports:
import argparse
import io
import json
import os
import subprocess
import tempfile
import testtools
import glanceclient.v1.client as client
import glanceclient.v1.images
import glanceclient.v1.shell as v1shell
from unittest import mock
from glanceclient import exc
from glanceclient import shell
from glanceclient.tests import utils
and context (class names, function names, or code) available:
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
#
# Path: glanceclient/shell.py
# SUPPORTED_VERSIONS = [1, 2]
# LOG = logging.getLogger('glanceclient')
# class OpenStackImagesShell(object):
# class HelpFormatter(argparse.HelpFormatter):
# def _append_global_identity_args(self, parser, argv):
# def get_base_parser(self, argv):
# def get_subcommand_parser(self, version, argv=None):
# def _find_actions(self, subparsers, actions_module):
# def _add_bash_completion_subparser(self, subparsers):
# def _get_image_url(self, args):
# def _discover_auth_versions(self, session, auth_url):
# def _get_keystone_auth_plugin(self, ks_session, **kwargs):
# def _get_kwargs_to_create_auth_plugin(self, args):
# def _get_versioned_client(self, api_version, args):
# def _cache_schemas(self, options, client, home_dir='~/.glanceclient'):
# def main(self, argv):
# def _get_subparser(api_version):
# def do_help(self, args, parser):
# def do_bash_completion(self, _args):
# def start_section(self, heading):
# def main():
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
. Output only the next line. | self.shell = shell.OpenStackImagesShell() |
Predict the next line after this snippet: <|code_start|> 'x-image-meta-disk_format': 'ami',
},
None
)
},
'/v1/images/detail?limit=20&name=70aa106f-3750-4d7c-a5ce-0a535ac08d0a': {
'GET': (
{},
{'images': [
{
'id': '70aa106f-3750-4d7c-a5ce-0a535ac08d0a',
'name': 'imagedeleted',
'deleted': True,
'status': 'deleted',
},
]},
),
},
'/v1/images/70aa106f-3750-4d7c-a5ce-0a535ac08d0a': {
'HEAD': (
{
'x-image-meta-id': '70aa106f-3750-4d7c-a5ce-0a535ac08d0a',
'x-image-meta-status': 'deleted'
},
None
)
}
}
<|code_end|>
using the current file's imports:
import argparse
import io
import json
import os
import subprocess
import tempfile
import testtools
import glanceclient.v1.client as client
import glanceclient.v1.images
import glanceclient.v1.shell as v1shell
from unittest import mock
from glanceclient import exc
from glanceclient import shell
from glanceclient.tests import utils
and any relevant context from other files:
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
#
# Path: glanceclient/shell.py
# SUPPORTED_VERSIONS = [1, 2]
# LOG = logging.getLogger('glanceclient')
# class OpenStackImagesShell(object):
# class HelpFormatter(argparse.HelpFormatter):
# def _append_global_identity_args(self, parser, argv):
# def get_base_parser(self, argv):
# def get_subcommand_parser(self, version, argv=None):
# def _find_actions(self, subparsers, actions_module):
# def _add_bash_completion_subparser(self, subparsers):
# def _get_image_url(self, args):
# def _discover_auth_versions(self, session, auth_url):
# def _get_keystone_auth_plugin(self, ks_session, **kwargs):
# def _get_kwargs_to_create_auth_plugin(self, args):
# def _get_versioned_client(self, api_version, args):
# def _cache_schemas(self, options, client, home_dir='~/.glanceclient'):
# def main(self, argv):
# def _get_subparser(api_version):
# def do_help(self, args, parser):
# def do_bash_completion(self, _args):
# def start_section(self, heading):
# def main():
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
. Output only the next line. | class ShellInvalidEndpointandParameterTest(utils.TestCase): |
Given snippet: <|code_start|> {
"status": "CURRENT",
"id": "v2.3",
"links": [
{
"href": "http://10.229.45.145:9292/v2/",
"rel": "self"
}
]
},
{
"status": "SUPPORTED",
"id": "v1.0",
"links": [
{
"href": "http://10.229.45.145:9292/v1/",
"rel": "self"
}
]
}
]}
)
}
}
class TestVersions(testtools.TestCase):
def setUp(self):
super(TestVersions, self).setUp()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import testtools
from glanceclient.tests import utils
from glanceclient.v2 import versions
and context:
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
which might include code, classes, or functions. Output only the next line. | self.api = utils.FakeAPI(fixtures) |
Given the code snippet: <|code_start|># a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def original_only(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if not hasattr(self.client, 'log_curl_request'):
self.skipTest('Skip logging tests for session client')
return f(self, *args, **kwargs)
class TestClient(testtools.TestCase):
scenarios = [
('httpclient', {'create_client': '_create_http_client'}),
('session', {'create_client': '_create_session_client'})
]
def _create_http_client(self):
<|code_end|>
, generate the next line using the imports in this file:
import functools
import json
import logging
import uuid
import fixtures
import io
import requests
import testtools
import types
import glanceclient
from unittest import mock
from keystoneauth1 import session
from keystoneauth1 import token_endpoint
from oslo_utils import encodeutils
from requests_mock.contrib import fixture
from urllib import parse
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from testtools import matchers
from glanceclient.common import http
from glanceclient.tests import utils
and context (functions, classes, or occasionally code) from other files:
# Path: glanceclient/common/http.py
# LOG = logging.getLogger(__name__)
# USER_AGENT = 'python-glanceclient'
# CHUNKSIZE = 1024 * 64 # 64kB
# REQ_ID_HEADER = 'X-OpenStack-Request-ID'
# TOKEN_HEADERS = ['X-Auth-Token', 'X-Service-Token']
# def encode_headers(headers):
# def _chunk_body(body):
# def _set_common_request_kwargs(self, headers, kwargs):
# def _handle_response(self, resp):
# def __init__(self, endpoint, **kwargs):
# def __del__(self):
# def parse_endpoint(endpoint):
# def log_curl_request(self, method, url, headers, data, kwargs):
# def log_http_response(resp):
# def _request(self, method, url, **kwargs):
# def head(self, url, **kwargs):
# def get(self, url, **kwargs):
# def post(self, url, **kwargs):
# def put(self, url, **kwargs):
# def patch(self, url, **kwargs):
# def delete(self, url, **kwargs):
# def _close_after_stream(response, chunk_size):
# def __init__(self, session, **kwargs):
# def request(self, url, method, **kwargs):
# def get_http_client(endpoint=None, session=None, **kwargs):
# class _BaseHTTPClient(object):
# class HTTPClient(_BaseHTTPClient):
# class SessionClient(adapter.Adapter, _BaseHTTPClient):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
. Output only the next line. | return http.HTTPClient(self.endpoint, token=self.token) |
Using the snippet: <|code_start|>
def test_http_json(self):
data = {"test": "json_request"}
path = '/v1/images'
text = 'OK'
self.mock.post(self.endpoint + path, text=text)
headers = {"test": u'chunked_request'}
resp, body = self.client.post(path, headers=headers, data=data)
self.assertEqual(text, resp.text)
self.assertIsInstance(self.mock.last_request.body, str)
self.assertEqual(data, json.loads(self.mock.last_request.body))
def test_http_chunked_response(self):
data = "TEST"
path = '/v1/images/'
self.mock.get(self.endpoint + path, body=io.StringIO(data),
headers={"Content-Type": "application/octet-stream"})
resp, body = self.client.get(path)
self.assertIsInstance(body, types.GeneratorType)
self.assertEqual([data], list(body))
@original_only
def test_log_http_response_with_non_ascii_char(self):
try:
response = 'Ok'
headers = {"Content-Type": "text/plain",
"test": "value1\xa5\xa6"}
<|code_end|>
, determine the next line of code. You have imports:
import functools
import json
import logging
import uuid
import fixtures
import io
import requests
import testtools
import types
import glanceclient
from unittest import mock
from keystoneauth1 import session
from keystoneauth1 import token_endpoint
from oslo_utils import encodeutils
from requests_mock.contrib import fixture
from urllib import parse
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from testtools import matchers
from glanceclient.common import http
from glanceclient.tests import utils
and context (class names, function names, or code) available:
# Path: glanceclient/common/http.py
# LOG = logging.getLogger(__name__)
# USER_AGENT = 'python-glanceclient'
# CHUNKSIZE = 1024 * 64 # 64kB
# REQ_ID_HEADER = 'X-OpenStack-Request-ID'
# TOKEN_HEADERS = ['X-Auth-Token', 'X-Service-Token']
# def encode_headers(headers):
# def _chunk_body(body):
# def _set_common_request_kwargs(self, headers, kwargs):
# def _handle_response(self, resp):
# def __init__(self, endpoint, **kwargs):
# def __del__(self):
# def parse_endpoint(endpoint):
# def log_curl_request(self, method, url, headers, data, kwargs):
# def log_http_response(resp):
# def _request(self, method, url, **kwargs):
# def head(self, url, **kwargs):
# def get(self, url, **kwargs):
# def post(self, url, **kwargs):
# def put(self, url, **kwargs):
# def patch(self, url, **kwargs):
# def delete(self, url, **kwargs):
# def _close_after_stream(response, chunk_size):
# def __init__(self, session, **kwargs):
# def request(self, url, method, **kwargs):
# def get_http_client(endpoint=None, session=None, **kwargs):
# class _BaseHTTPClient(object):
# class HTTPClient(_BaseHTTPClient):
# class SessionClient(adapter.Adapter, _BaseHTTPClient):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
. Output only the next line. | fake = utils.FakeResponse(headers, io.StringIO(response)) |
Next line prediction: <|code_start|> ),
},
}
schema_fixtures = {
'task': {
'GET': (
{},
{
'name': 'task',
'properties': {
'id': {},
'type': {},
'status': {},
'input': {},
'result': {},
'message': {},
},
'additionalProperties': False,
}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
<|code_end|>
. Use current file imports:
(import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import tasks)
and context including class names, function names, or small code snippets from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/tasks.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('id', 'type', 'status')
# class Controller(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def list(self, **kwargs):
# def paginate(url):
# def get(self, task_id):
# def create(self, **kwargs):
. Output only the next line. | self.controller = base.BaseController(self.api, self.schema_api, |
Given snippet: <|code_start|> },
]},
),
},
}
schema_fixtures = {
'task': {
'GET': (
{},
{
'name': 'task',
'properties': {
'id': {},
'type': {},
'status': {},
'input': {},
'result': {},
'message': {},
},
'additionalProperties': False,
}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import tasks
and context:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/tasks.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('id', 'type', 'status')
# class Controller(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def list(self, **kwargs):
# def paginate(url):
# def get(self, task_id):
# def create(self, **kwargs):
which might include code, classes, or functions. Output only the next line. | self.api = utils.FakeAPI(fixtures) |
Given snippet: <|code_start|># Copyright 2013 OpenStack Foundation.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_OWNED_TASK_ID = 'a4963502-acc7-42ba-ad60-5aa0962b7faf'
_OWNER_ID = '6bd473f0-79ae-40ad-a927-e07ec37b642f'
_FAKE_OWNER_ID = '63e7f218-29de-4477-abdc-8db7c9533188'
_PENDING_ID = '3a4560a1-e585-443e-9b39-553b46ec92d1'
_PROCESSING_ID = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'
fixtures = {
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import tasks
and context:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/tasks.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('id', 'type', 'status')
# class Controller(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def list(self, **kwargs):
# def paginate(url):
# def get(self, task_id):
# def create(self, **kwargs):
which might include code, classes, or functions. Output only the next line. | '/v2/tasks?limit=%d' % tasks.DEFAULT_PAGE_SIZE: { |
Given the following code snippet before the placeholder: <|code_start|> "type": "string",
"readOnly": True,
"description": "Date and time of namespace creation ",
"format": "date-time"
},
"namespace": {
"type": "string",
"description": "The unique namespace text.",
"maxLength": 80
},
"protected": {
"type": "boolean",
"description": "If true, namespace will not be "
"deletable."
},
"schema": {
"type": "string"
}
}
}
),
}
}
class TestNamespaceController(testtools.TestCase):
def setUp(self):
super(TestNamespaceController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
<|code_end|>
, predict the next line using imports from the current file:
import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import metadefs
and context including class names, function names, and sometimes code from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/metadefs.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('created_at', 'namespace')
# class NamespaceController(object):
# class ResourceTypeController(object):
# class PropertyController(object):
# class ObjectController(object):
# class TagController(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, **kwargs):
# def update(self, namespace_name, **kwargs):
# def get(self, namespace, **kwargs):
# def _get(self, namespace, header=None, **kwargs):
# def list(self, **kwargs):
# def paginate(url):
# def delete(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def associate(self, namespace, **kwargs):
# def deassociate(self, namespace, resource):
# def list(self):
# def get(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, prop_name, **kwargs):
# def get(self, namespace, prop_name):
# def _get(self, namespace, prop_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, prop_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, object_name, **kwargs):
# def get(self, namespace, object_name):
# def _get(self, namespace, object_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, object_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, tag_name):
# def create_multiple(self, namespace, **kwargs):
# def update(self, namespace, tag_name, **kwargs):
# def get(self, namespace, tag_name):
# def _get(self, namespace, tag_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, tag_name):
# def delete_all(self, namespace):
. Output only the next line. | self.controller = base.BaseController(self.api, self.schema_api, |
Next line prediction: <|code_start|> },
"created_at": {
"type": "string",
"readOnly": True,
"description": "Date and time of namespace creation ",
"format": "date-time"
},
"namespace": {
"type": "string",
"description": "The unique namespace text.",
"maxLength": 80
},
"protected": {
"type": "boolean",
"description": "If true, namespace will not be "
"deletable."
},
"schema": {
"type": "string"
}
}
}
),
}
}
class TestNamespaceController(testtools.TestCase):
def setUp(self):
super(TestNamespaceController, self).setUp()
<|code_end|>
. Use current file imports:
(import testtools
from glanceclient.tests.unit.v2 import base
from glanceclient.tests import utils
from glanceclient.v2 import metadefs)
and context including class names, function names, or small code snippets from other files:
# Path: glanceclient/tests/unit/v2/base.py
# class BaseController(testtools.TestCase):
# class BaseResourceTypeController(BaseController):
# def __init__(self, api, schema_api, controller_class):
# def _assertRequestId(self, obj):
# def list(self, *args, **kwargs):
# def get_associated_image_tasks(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def create(self, *args, **kwargs):
# def create_multiple(self, *args, **kwargs):
# def update(self, *args, **properties):
# def delete(self, *args):
# def delete_all(self, *args):
# def deactivate(self, *args):
# def reactivate(self, *args):
# def upload(self, *args, **kwargs):
# def data(self, *args, **kwargs):
# def delete_locations(self, *args):
# def add_location(self, *args, **kwargs):
# def update_location(self, *args, **kwargs):
# def associate(self, *args, **kwargs):
# def deassociate(self, *args):
# def image_import(self, *args):
# def __init__(self, api, schema_api, controller_class):
# def get(self, *args, **kwargs):
#
# Path: glanceclient/tests/utils.py
# class FakeAPI(object):
# class FakeSchemaAPI(FakeAPI):
# class RawRequest(object):
# class FakeResponse(object):
# class TestCase(testtools.TestCase):
# class FakeTTYStdout(io.StringIO):
# class FakeNoTTYStdout(FakeTTYStdout):
# def __init__(self, fixtures):
# def _request(self, method, url, headers=None, data=None,
# content_length=None):
# def get(self, *args, **kwargs):
# def post(self, *args, **kwargs):
# def put(self, *args, **kwargs):
# def patch(self, *args, **kwargs):
# def delete(self, *args, **kwargs):
# def head(self, *args, **kwargs):
# def get(self, *args, **kwargs):
# def __init__(self, headers, body=None,
# version=1.0, status=200, reason="Ok"):
# def getheaders(self):
# def getheader(self, key, default):
# def read(self, amt):
# def __init__(self, headers=None, body=None,
# version=1.0, status_code=200, reason="Ok"):
# def status(self):
# def ok(self):
# def read(self, amt):
# def close(self):
# def content(self):
# def text(self):
# def json(self, **kwargs):
# def iter_content(self, chunk_size=1, decode_unicode=False):
# def release_conn(self, **kwargs):
# def isatty(self):
# def write(self, data):
# def isatty(self):
# def sort_url_by_query_keys(url):
# def build_call_record(method, url, headers, data):
# TEST_REQUEST_BASE = {
# 'config': {'danger_mode': False},
# 'verify': True}
#
# Path: glanceclient/v2/metadefs.py
# DEFAULT_PAGE_SIZE = 20
# SORT_DIR_VALUES = ('asc', 'desc')
# SORT_KEY_VALUES = ('created_at', 'namespace')
# class NamespaceController(object):
# class ResourceTypeController(object):
# class PropertyController(object):
# class ObjectController(object):
# class TagController(object):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, **kwargs):
# def update(self, namespace_name, **kwargs):
# def get(self, namespace, **kwargs):
# def _get(self, namespace, header=None, **kwargs):
# def list(self, **kwargs):
# def paginate(url):
# def delete(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def associate(self, namespace, **kwargs):
# def deassociate(self, namespace, resource):
# def list(self):
# def get(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, prop_name, **kwargs):
# def get(self, namespace, prop_name):
# def _get(self, namespace, prop_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, prop_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, **kwargs):
# def update(self, namespace, object_name, **kwargs):
# def get(self, namespace, object_name):
# def _get(self, namespace, object_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, object_name):
# def delete_all(self, namespace):
# def __init__(self, http_client, schema_client):
# def model(self):
# def create(self, namespace, tag_name):
# def create_multiple(self, namespace, **kwargs):
# def update(self, namespace, tag_name, **kwargs):
# def get(self, namespace, tag_name):
# def _get(self, namespace, tag_name, header=None):
# def list(self, namespace, **kwargs):
# def delete(self, namespace, tag_name):
# def delete_all(self, namespace):
. Output only the next line. | self.api = utils.FakeAPI(data_fixtures) |
Predict the next line for this snippet: <|code_start|>#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def Client(version=None, endpoint=None, session=None, *args, **kwargs):
"""Client for the OpenStack Images API.
Generic client for the OpenStack Images API. See version classes
for specific details.
:param string version: The version of API to use.
:param session: A keystoneauth1 session that should be used for transport.
:type session: keystoneauth1.session.Session
"""
# FIXME(jamielennox): Add a deprecation warning if no session is passed.
# Leaving it as an option until we can ensure nothing break when we switch.
if session:
if endpoint:
kwargs.setdefault('endpoint_override', endpoint)
if not version:
<|code_end|>
with the help of current file imports:
import warnings
from oslo_utils import importutils
from glanceclient.common import utils
and context from other files:
# Path: glanceclient/common/utils.py
# SENSITIVE_HEADERS = ('X-Auth-Token', )
# REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# def arg(*args, **kwargs):
# def _decorator(func):
# def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
# def args_decorator(func):
# def prepare_fields(fields):
# def func_wrapper(gc, args):
# def schema_args(schema_getter, omit=None):
# def _decorator(func):
# def pretty_choice_list(l):
# def has_version(client, version):
# def print_cached_images(cached_images):
# def print_dict_list(objects, fields):
# def print_list(objs, fields, formatters=None, field_settings=None):
# def _encode(src):
# def unicode_key_value_to_string(src):
# def print_dict(d, max_column_width=80):
# def find_resource(manager, name_or_id):
# def env(*vars, **kwargs):
# def exit(msg='', exit_code=1):
# def print_err(msg):
# def save_image(data, path):
# def make_size_human_readable(size):
# def get_file_size(file_obj):
# def get_data_file(args):
# def strip_version(endpoint):
# def print_image(image_obj, human_readable=False, max_col_width=None):
# def integrity_iter(iter, checksum):
# def serious_integrity_iter(iter, hasher, hash_value):
# def memoized_property(fn):
# def _memoized_property(self):
# def safe_header(name, value):
# def endpoint_version_from_url(endpoint, default_version=None):
# def debug_enabled(argv):
# def __init__(self, iterable, length):
# def __iter__(self):
# def next(self):
# def __len__(self):
# def __init__(self, wrapped):
# def request_ids(self):
# def wrapped(self):
# def next(self):
# def __init__(self, wrapped):
# def _set_request_ids(self, resp):
# def _next(self):
# def next(self):
# def __next__(self):
# def __iter__(self):
# def request_ids(self):
# def wrapped(self):
# def add_req_id_to_object():
# def inner(wrapped, instance, args, kwargs):
# def add_req_id_to_generator():
# def inner(wrapped, instance, args, kwargs):
# def _extract_request_id(resp):
# class IterableWithLength(object):
# class RequestIdProxy(wrapt.ObjectProxy):
# class GeneratorProxy(wrapt.ObjectProxy):
, which may contain function names, class names, or code. Output only the next line. | __, version = utils.strip_version(endpoint) |
Given snippet: <|code_start|># Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DEFAULT_PAGE_SIZE = 20
SORT_DIR_VALUES = ('asc', 'desc')
SORT_KEY_VALUES = ('id', 'type', 'status')
class Controller(object):
def __init__(self, http_client, schema_client):
self.http_client = http_client
self.schema_client = schema_client
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import urllib.parse
import warlock
from oslo_utils import encodeutils
from glanceclient.common import utils
from glanceclient.v2 import schemas
and context:
# Path: glanceclient/common/utils.py
# SENSITIVE_HEADERS = ('X-Auth-Token', )
# REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# def arg(*args, **kwargs):
# def _decorator(func):
# def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
# def args_decorator(func):
# def prepare_fields(fields):
# def func_wrapper(gc, args):
# def schema_args(schema_getter, omit=None):
# def _decorator(func):
# def pretty_choice_list(l):
# def has_version(client, version):
# def print_cached_images(cached_images):
# def print_dict_list(objects, fields):
# def print_list(objs, fields, formatters=None, field_settings=None):
# def _encode(src):
# def unicode_key_value_to_string(src):
# def print_dict(d, max_column_width=80):
# def find_resource(manager, name_or_id):
# def env(*vars, **kwargs):
# def exit(msg='', exit_code=1):
# def print_err(msg):
# def save_image(data, path):
# def make_size_human_readable(size):
# def get_file_size(file_obj):
# def get_data_file(args):
# def strip_version(endpoint):
# def print_image(image_obj, human_readable=False, max_col_width=None):
# def integrity_iter(iter, checksum):
# def serious_integrity_iter(iter, hasher, hash_value):
# def memoized_property(fn):
# def _memoized_property(self):
# def safe_header(name, value):
# def endpoint_version_from_url(endpoint, default_version=None):
# def debug_enabled(argv):
# def __init__(self, iterable, length):
# def __iter__(self):
# def next(self):
# def __len__(self):
# def __init__(self, wrapped):
# def request_ids(self):
# def wrapped(self):
# def next(self):
# def __init__(self, wrapped):
# def _set_request_ids(self, resp):
# def _next(self):
# def next(self):
# def __next__(self):
# def __iter__(self):
# def request_ids(self):
# def wrapped(self):
# def add_req_id_to_object():
# def inner(wrapped, instance, args, kwargs):
# def add_req_id_to_generator():
# def inner(wrapped, instance, args, kwargs):
# def _extract_request_id(resp):
# class IterableWithLength(object):
# class RequestIdProxy(wrapt.ObjectProxy):
# class GeneratorProxy(wrapt.ObjectProxy):
#
# Path: glanceclient/v2/schemas.py
# class SchemaBasedModel(warlock.Model):
# class SchemaProperty(object):
# class Schema(object):
# class Controller(object):
# def _make_custom_patch(self, new, original):
# def patch(self):
# def __init__(self, name, **kwargs):
# def translate_schema_properties(schema_properties):
# def __init__(self, raw_schema):
# def is_core_property(self, property_name):
# def is_base_property(self, property_name):
# def _check_property(self, property_name, allow_non_base):
# def raw(self):
# def __init__(self, http_client):
# def get(self, schema_name):
which might include code, classes, or functions. Output only the next line. | @utils.memoized_property |
Predict the next line after this snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DEFAULT_PAGE_SIZE = 20
SORT_DIR_VALUES = ('asc', 'desc')
SORT_KEY_VALUES = ('id', 'type', 'status')
class Controller(object):
def __init__(self, http_client, schema_client):
self.http_client = http_client
self.schema_client = schema_client
@utils.memoized_property
def model(self):
schema = self.schema_client.get('task')
return warlock.model_factory(schema.raw(),
<|code_end|>
using the current file's imports:
import urllib.parse
import warlock
from oslo_utils import encodeutils
from glanceclient.common import utils
from glanceclient.v2 import schemas
and any relevant context from other files:
# Path: glanceclient/common/utils.py
# SENSITIVE_HEADERS = ('X-Auth-Token', )
# REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# def arg(*args, **kwargs):
# def _decorator(func):
# def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
# def args_decorator(func):
# def prepare_fields(fields):
# def func_wrapper(gc, args):
# def schema_args(schema_getter, omit=None):
# def _decorator(func):
# def pretty_choice_list(l):
# def has_version(client, version):
# def print_cached_images(cached_images):
# def print_dict_list(objects, fields):
# def print_list(objs, fields, formatters=None, field_settings=None):
# def _encode(src):
# def unicode_key_value_to_string(src):
# def print_dict(d, max_column_width=80):
# def find_resource(manager, name_or_id):
# def env(*vars, **kwargs):
# def exit(msg='', exit_code=1):
# def print_err(msg):
# def save_image(data, path):
# def make_size_human_readable(size):
# def get_file_size(file_obj):
# def get_data_file(args):
# def strip_version(endpoint):
# def print_image(image_obj, human_readable=False, max_col_width=None):
# def integrity_iter(iter, checksum):
# def serious_integrity_iter(iter, hasher, hash_value):
# def memoized_property(fn):
# def _memoized_property(self):
# def safe_header(name, value):
# def endpoint_version_from_url(endpoint, default_version=None):
# def debug_enabled(argv):
# def __init__(self, iterable, length):
# def __iter__(self):
# def next(self):
# def __len__(self):
# def __init__(self, wrapped):
# def request_ids(self):
# def wrapped(self):
# def next(self):
# def __init__(self, wrapped):
# def _set_request_ids(self, resp):
# def _next(self):
# def next(self):
# def __next__(self):
# def __iter__(self):
# def request_ids(self):
# def wrapped(self):
# def add_req_id_to_object():
# def inner(wrapped, instance, args, kwargs):
# def add_req_id_to_generator():
# def inner(wrapped, instance, args, kwargs):
# def _extract_request_id(resp):
# class IterableWithLength(object):
# class RequestIdProxy(wrapt.ObjectProxy):
# class GeneratorProxy(wrapt.ObjectProxy):
#
# Path: glanceclient/v2/schemas.py
# class SchemaBasedModel(warlock.Model):
# class SchemaProperty(object):
# class Schema(object):
# class Controller(object):
# def _make_custom_patch(self, new, original):
# def patch(self):
# def __init__(self, name, **kwargs):
# def translate_schema_properties(schema_properties):
# def __init__(self, raw_schema):
# def is_core_property(self, property_name):
# def is_base_property(self, property_name):
# def _check_property(self, property_name, allow_non_base):
# def raw(self):
# def __init__(self, http_client):
# def get(self, schema_name):
. Output only the next line. | base_class=schemas.SchemaBasedModel) |
Given the code snippet: <|code_start|>
The requests library performs SSL certificate validation,
however there is still a need to check that the glance
client is properly integrated with requests so that
cert validation actually happens.
"""
def setUp(self):
# Rather than spinning up a new process, we create
# a thread to perform client/server interaction.
# This should run more quickly.
super(TestHTTPSVerifyCert, self).setUp()
server = ThreadedTCPServer(('127.0.0.1', 0),
ThreadedTCPRequestHandler)
__, self.port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
@mock.patch('sys.stderr')
def test_v1_requests_cert_verification(self, __):
"""v1 regression test for bug 115260."""
port = self.port
url = 'https://0.0.0.0:%d' % port
try:
client = v1.Client(url,
insecure=False,
ssl_compression=True)
client.images.get('image123')
self.fail('No SSL exception has been raised')
<|code_end|>
, generate the next line using the imports in this file:
import os
import ssl
import testtools
import threading
import socketserver
from unittest import mock
from glanceclient import Client
from glanceclient import exc
from glanceclient import v1
from glanceclient import v2
and context (functions, classes, or occasionally code) from other files:
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
. Output only the next line. | except exc.CommunicationError as e: |
Continue the code snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
HTML_MSG = """<html>
<head>
<title>404 Entity Not Found</title>
</head>
<body>
<h1>404 Entity Not Found</h1>
Entity could not be found
<br /><br />
</body>
</html>"""
class TestHTTPExceptions(testtools.TestCase):
def test_from_response(self):
"""exc.from_response should return instance of an HTTP exception."""
mock_resp = mock.Mock()
mock_resp.status_code = 400
<|code_end|>
. Use current file imports:
import testtools
from unittest import mock
from glanceclient import exc
and context (classes, functions, or code) from other files:
# Path: glanceclient/exc.py
# class BaseException(Exception):
# class CommandError(BaseException):
# class InvalidEndpoint(BaseException):
# class CommunicationError(BaseException):
# class ClientException(Exception):
# class HTTPException(ClientException):
# class HTTPMultipleChoices(HTTPException):
# class BadRequest(HTTPException):
# class HTTPBadRequest(BadRequest):
# class Unauthorized(HTTPException):
# class HTTPUnauthorized(Unauthorized):
# class Forbidden(HTTPException):
# class HTTPForbidden(Forbidden):
# class NotFound(HTTPException):
# class HTTPNotFound(NotFound):
# class HTTPMethodNotAllowed(HTTPException):
# class Conflict(HTTPException):
# class HTTPConflict(Conflict):
# class OverLimit(HTTPException):
# class HTTPOverLimit(OverLimit):
# class HTTPInternalServerError(HTTPException):
# class HTTPNotImplemented(HTTPException):
# class HTTPBadGateway(HTTPException):
# class ServiceUnavailable(HTTPException):
# class HTTPServiceUnavailable(ServiceUnavailable):
# class NoTokenLookupException(Exception):
# class EndpointNotFound(Exception):
# class SSLConfigurationError(BaseException):
# class SSLCertificateError(BaseException):
# def __init__(self, message=None):
# def __str__(self):
# def __init__(self, details=None):
# def __str__(self):
# def __str__(self):
# def from_response(response, body=None):
. Output only the next line. | out = exc.from_response(mock_resp) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.