Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line after this snippet: <|code_start|>
BUCKET_NAME = "thesis-video-data"
STORE_HOST = env("STORE_HOST", "localhost")
ACCESS_KEY = env("STORE_ACCESS_KEY")
SECRET_KEY = env("STORE_SECRET_KEY")
class Uploader(object):
def __init__(self):
if STORE_HOST is None:
raise Exception("Missing minio host info")
if ACCESS_KEY is None or SECRET_KEY is None:
raise Exception("Missing minio credentials")
self.minio_client = Minio(STORE_HOST + ':9000',
access_key=ACCESS_KEY,
<|code_end|>
using the current file's imports:
import glob
from tqdm import tqdm
from analyzer.project import Project, StoragePath
from minio import Minio
from minio.policy import Policy
from minio.error import ResponseError
from analyzer.utils import env
from analyzer.path_utils import filename
from os.path import join
and any relevant context from other files:
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
#
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
. Output only the next line. | secret_key=SECRET_KEY, |
Given snippet: <|code_start|>
try:
if not self.minio_client.bucket_exists(BUCKET_NAME):
self.minio_client.make_bucket(BUCKET_NAME, location="us-east-1")
self.minio_client.set_bucket_policy(BUCKET_NAME, "", Policy.READ_ONLY)
except ResponseError as err:
print(err)
def upload_frames(self, project):
source_path = project.folder_path(Project.Folder.frames)
remote_path = project.folder_path(Project.Folder.frames, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_keyframes(self, project):
source_path = project.folder_path(Project.Folder.keyframes)
remote_path = project.folder_path(Project.Folder.keyframes, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_slices(self, project):
source_path = project.folder_path(Project.Folder.spatio)
remote_path = project.folder_path(Project.Folder.spatio, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_keyframe_thumbnails(self, project):
source_path = project.folder_path(Project.Folder.keyframe_thumbnails)
remote_path = project.folder_path(Project.Folder.keyframe_thumbnails, storage_env=StoragePath.remote)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import glob
from tqdm import tqdm
from analyzer.project import Project, StoragePath
from minio import Minio
from minio.policy import Policy
from minio.error import ResponseError
from analyzer.utils import env
from analyzer.path_utils import filename
from os.path import join
and context:
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
#
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
which might include code, classes, or functions. Output only the next line. | self.upload_images(source_path, remote_path) |
Based on the snippet: <|code_start|>
BUCKET_NAME = "thesis-video-data"
STORE_HOST = env("STORE_HOST", "localhost")
ACCESS_KEY = env("STORE_ACCESS_KEY")
SECRET_KEY = env("STORE_SECRET_KEY")
class Uploader(object):
def __init__(self):
if STORE_HOST is None:
raise Exception("Missing minio host info")
if ACCESS_KEY is None or SECRET_KEY is None:
raise Exception("Missing minio credentials")
self.minio_client = Minio(STORE_HOST + ':9000',
access_key=ACCESS_KEY,
secret_key=SECRET_KEY,
secure=False)
try:
if not self.minio_client.bucket_exists(BUCKET_NAME):
self.minio_client.make_bucket(BUCKET_NAME, location="us-east-1")
self.minio_client.set_bucket_policy(BUCKET_NAME, "", Policy.READ_ONLY)
except ResponseError as err:
<|code_end|>
, predict the immediate next line with the help of imports:
import glob
from tqdm import tqdm
from analyzer.project import Project, StoragePath
from minio import Minio
from minio.policy import Policy
from minio.error import ResponseError
from analyzer.utils import env
from analyzer.path_utils import filename
from os.path import join
and context (classes, functions, sometimes code) from other files:
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
#
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
. Output only the next line. | print(err) |
Given snippet: <|code_start|> raise Exception("Missing minio credentials")
self.minio_client = Minio(STORE_HOST + ':9000',
access_key=ACCESS_KEY,
secret_key=SECRET_KEY,
secure=False)
try:
if not self.minio_client.bucket_exists(BUCKET_NAME):
self.minio_client.make_bucket(BUCKET_NAME, location="us-east-1")
self.minio_client.set_bucket_policy(BUCKET_NAME, "", Policy.READ_ONLY)
except ResponseError as err:
print(err)
def upload_frames(self, project):
source_path = project.folder_path(Project.Folder.frames)
remote_path = project.folder_path(Project.Folder.frames, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_keyframes(self, project):
source_path = project.folder_path(Project.Folder.keyframes)
remote_path = project.folder_path(Project.Folder.keyframes, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_slices(self, project):
source_path = project.folder_path(Project.Folder.spatio)
remote_path = project.folder_path(Project.Folder.spatio, storage_env=StoragePath.remote)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import glob
from tqdm import tqdm
from analyzer.project import Project, StoragePath
from minio import Minio
from minio.policy import Policy
from minio.error import ResponseError
from analyzer.utils import env
from analyzer.path_utils import filename
from os.path import join
and context:
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
#
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
which might include code, classes, or functions. Output only the next line. | self.upload_images(source_path, remote_path) |
Next line prediction: <|code_start|>#!/usr/bin/python
class Subtitle(Model):
def __init__(self, t1, t2, text, original_text=None, character=None):
self.t1 = Timestamp(t1) if (type(t1) == int) else t1
self.t2 = Timestamp(t2) if (type(t2) == int) else t2
self.text = text
<|code_end|>
. Use current file imports:
(import re
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import flatten, Model)
and context including class names, function names, or small code snippets from other files:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# def flatten(l):
# return [item for sublist in l for item in sublist]
#
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
. Output only the next line. | self.original_text = text if text is not None else original_text |
Using the snippet: <|code_start|>#!/usr/bin/python
class Subtitle(Model):
def __init__(self, t1, t2, text, original_text=None, character=None):
self.t1 = Timestamp(t1) if (type(t1) == int) else t1
self.t2 = Timestamp(t2) if (type(t2) == int) else t2
self.text = text
self.original_text = text if text is not None else original_text
<|code_end|>
, determine the next line of code. You have imports:
import re
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import flatten, Model
and context (class names, function names, or code) available:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# def flatten(l):
# return [item for sublist in l for item in sublist]
#
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
. Output only the next line. | self.character = character |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/python
class Subtitle(Model):
def __init__(self, t1, t2, text, original_text=None, character=None):
self.t1 = Timestamp(t1) if (type(t1) == int) else t1
self.t2 = Timestamp(t2) if (type(t2) == int) else t2
self.text = text
self.original_text = text if text is not None else original_text
self.character = character
def __str__(self):
<|code_end|>
using the current file's imports:
import re
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import flatten, Model
and any relevant context from other files:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# def flatten(l):
# return [item for sublist in l for item in sublist]
#
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
. Output only the next line. | return "t1: {}, t2: {}, text: {}, character: {}".format(self.t1, self.t2, self.text, self.character) |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/python
class Subtitle(Model):
def __init__(self, t1, t2, text, original_text=None, character=None):
self.t1 = Timestamp(t1) if (type(t1) == int) else t1
self.t2 = Timestamp(t2) if (type(t2) == int) else t2
self.text = text
self.original_text = text if text is not None else original_text
self.character = character
<|code_end|>
with the help of current file imports:
import re
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import flatten, Model
and context from other files:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# def flatten(l):
# return [item for sublist in l for item in sublist]
#
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
, which may contain function names, class names, or code. Output only the next line. | def __str__(self): |
Given the following code snippet before the placeholder: <|code_start|> hist = centroid_histogram(clt)
cluster_centers = clt.cluster_centers_
bundle = sort_frequency_with_clusters(hist, cluster_centers)
clusters = rearrange_cluster(bundle)
return clusters
def sort_frequency_with_clusters(hist, cluster_centers):
cluster_centers = cluster_centers.astype(int).tolist()
hist = [round(val, 4) for val in hist]
bundle = list(zip(hist, cluster_centers))
bundle.sort(reverse=True)
return bundle
def centroid_histogram(clt):
num_labels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins=num_labels)
hist = hist.astype("float")
hist /= hist.sum()
return hist
<|code_end|>
, predict the next line using imports from the current file:
import cv2
import numpy as np
from sklearn.cluster import KMeans
from os import path
from tqdm import tqdm
from analyzer.project import Project
from analyzer.utils import image_filename
and context including class names, function names, and sometimes code from other files:
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/utils.py
# def image_filename(index, file_type="jpg"):
# return '{:010d}.{}'.format(index + 1, file_type)
. Output only the next line. | def rearrange_cluster(colors): |
Predict the next line after this snippet: <|code_start|>
def colors_from_image(image_path, count):
gbr_image = cv2.imread(image_path)
rgb_image = cv2.cvtColor(gbr_image, cv2.COLOR_BGR2RGB)
img = rgb_image.reshape((rgb_image.shape[0] * rgb_image.shape[1], 3))
clt = KMeans(n_clusters=count)
clt.fit(img)
hist = centroid_histogram(clt)
cluster_centers = clt.cluster_centers_
bundle = sort_frequency_with_clusters(hist, cluster_centers)
clusters = rearrange_cluster(bundle)
return clusters
def sort_frequency_with_clusters(hist, cluster_centers):
cluster_centers = cluster_centers.astype(int).tolist()
<|code_end|>
using the current file's imports:
import cv2
import numpy as np
from sklearn.cluster import KMeans
from os import path
from tqdm import tqdm
from analyzer.project import Project
from analyzer.utils import image_filename
and any relevant context from other files:
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/utils.py
# def image_filename(index, file_type="jpg"):
# return '{:010d}.{}'.format(index + 1, file_type)
. Output only the next line. | hist = [round(val, 4) for val in hist] |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
class ScriptEntity(Model):
def __init__(self, character, text, type="speech"):
self.character = character
self.type = type
self.original_text = text
self.text = text.rstrip().replace('\n', ' ') if type == 'speech' else text
def __str__(self):
return "type: {}, character: {}, text: \"{}\"".format(self.type, self.character, self.text)
def __repr__(self):
return self.__str__()
@classmethod
def from_dict(cls, data, from_camel=True):
d = Model.from_dict(data, from_camel)
return cls(d.get("character"), d.get("text"), type=d.get("type"))
<|code_end|>
. Write the next line using the current file imports:
from analyzer.utils import Model
and context from other files:
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
, which may include functions, classes, or code. Output only the next line. | @classmethod |
Using the snippet: <|code_start|> return min(d / len(s1), 1)
def pretty_print_grid(alignment):
grid = alignment.grid
subtitle_index = alignment.vertical_index
script_index = alignment.horizontal_index
alignments = alignment.alignment_list
def search_indexes(i, j):
for alignment in alignments:
if alignment == (i, j):
return True
return False
max = 7
tail = ".."
print()
for i, (word, speech) in enumerate(script_index):
if i == 0:
print(" " * 7, end="")
w = (word[:5] + tail) if len(word) + len(tail) > max else word
print("{: ^7}".format(w), end="")
if i == len(script_index) - 1:
print()
for row in range(len(grid)):
<|code_end|>
, determine the next line of code. You have imports:
import editdistance
import numpy as np
from scipy.spatial import distance as dist
from tqdm import tqdm
from copy import deepcopy
from itertools import groupby
from analyzer.utils import window
from analyzer.alignment_utils import Alignment
from analyzer.script_entity import ScriptEntity
from analyzer.string_utils import remove_punctuation
and context (class names, function names, or code) available:
# Path: analyzer/utils.py
# def window(seq, n=2):
# """Returns a sliding window (of width n) over data from the iterable"""
# " s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
# it = iter(seq)
# result = tuple(islice(it, n))
# if len(result) == n:
# yield result
# for elem in it:
# result = result[1:] + (elem,)
# yield result
#
# Path: analyzer/alignment_utils.py
# class Alignment(object):
# old_vertical_index = None
# old_horizontal_index = None
#
# def __init__(self, alignment_list, vertical_index, horizontal_index, grid, traceback, subtitles=None):
# self.alignment_list = alignment_list
# self.vertical_index = vertical_index
# self.horizontal_index = horizontal_index
# self.grid = grid
# self.traceback = traceback
# self.subtitles = subtitles
#
# Path: analyzer/script_entity.py
# class ScriptEntity(Model):
# def __init__(self, character, text, type="speech"):
# self.character = character
# self.type = type
# self.original_text = text
#
# self.text = text.rstrip().replace('\n', ' ') if type == 'speech' else text
#
# def __str__(self):
# return "type: {}, character: {}, text: \"{}\"".format(self.type, self.character, self.text)
#
# def __repr__(self):
# return self.__str__()
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# return cls(d.get("character"), d.get("text"), type=d.get("type"))
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# Path: analyzer/string_utils.py
# def remove_punctuation(text):
# exclude = set(string.punctuation)
# exclude.add("'")
#
# characters = []
# for character in text:
# if character not in exclude:
# characters.append(character)
# else:
# characters.append(" ")
#
# cleaned_text = "".join(characters)
# var = re.sub(' +', ' ', cleaned_text)
#
# return var
. Output only the next line. | for column in range(len(grid[row])): |
Predict the next line after this snippet: <|code_start|>
class ScoreMatrix(object):
def __init__(self, grid, traceback, distance_function):
self.grid = grid
self.traceback = traceback
self.distance_function = distance_function
def score(self, c1, c2):
return self.distance_function(c1, c2)
def binary_distance(s1, s2):
return 0 if s1 == s2 else 1
def levenstein_distance(s1, s2):
d = editdistance.eval(s1, s2)
return min(d / len(s1), 1)
<|code_end|>
using the current file's imports:
import editdistance
import numpy as np
from scipy.spatial import distance as dist
from tqdm import tqdm
from copy import deepcopy
from itertools import groupby
from analyzer.utils import window
from analyzer.alignment_utils import Alignment
from analyzer.script_entity import ScriptEntity
from analyzer.string_utils import remove_punctuation
and any relevant context from other files:
# Path: analyzer/utils.py
# def window(seq, n=2):
# """Returns a sliding window (of width n) over data from the iterable"""
# " s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
# it = iter(seq)
# result = tuple(islice(it, n))
# if len(result) == n:
# yield result
# for elem in it:
# result = result[1:] + (elem,)
# yield result
#
# Path: analyzer/alignment_utils.py
# class Alignment(object):
# old_vertical_index = None
# old_horizontal_index = None
#
# def __init__(self, alignment_list, vertical_index, horizontal_index, grid, traceback, subtitles=None):
# self.alignment_list = alignment_list
# self.vertical_index = vertical_index
# self.horizontal_index = horizontal_index
# self.grid = grid
# self.traceback = traceback
# self.subtitles = subtitles
#
# Path: analyzer/script_entity.py
# class ScriptEntity(Model):
# def __init__(self, character, text, type="speech"):
# self.character = character
# self.type = type
# self.original_text = text
#
# self.text = text.rstrip().replace('\n', ' ') if type == 'speech' else text
#
# def __str__(self):
# return "type: {}, character: {}, text: \"{}\"".format(self.type, self.character, self.text)
#
# def __repr__(self):
# return self.__str__()
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# return cls(d.get("character"), d.get("text"), type=d.get("type"))
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# Path: analyzer/string_utils.py
# def remove_punctuation(text):
# exclude = set(string.punctuation)
# exclude.add("'")
#
# characters = []
# for character in text:
# if character not in exclude:
# characters.append(character)
# else:
# characters.append(" ")
#
# cleaned_text = "".join(characters)
# var = re.sub(' +', ' ', cleaned_text)
#
# return var
. Output only the next line. | def pretty_print_grid(alignment): |
Predict the next line after this snippet: <|code_start|>
class ScoreMatrix(object):
def __init__(self, grid, traceback, distance_function):
self.grid = grid
<|code_end|>
using the current file's imports:
import editdistance
import numpy as np
from scipy.spatial import distance as dist
from tqdm import tqdm
from copy import deepcopy
from itertools import groupby
from analyzer.utils import window
from analyzer.alignment_utils import Alignment
from analyzer.script_entity import ScriptEntity
from analyzer.string_utils import remove_punctuation
and any relevant context from other files:
# Path: analyzer/utils.py
# def window(seq, n=2):
# """Returns a sliding window (of width n) over data from the iterable"""
# " s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
# it = iter(seq)
# result = tuple(islice(it, n))
# if len(result) == n:
# yield result
# for elem in it:
# result = result[1:] + (elem,)
# yield result
#
# Path: analyzer/alignment_utils.py
# class Alignment(object):
# old_vertical_index = None
# old_horizontal_index = None
#
# def __init__(self, alignment_list, vertical_index, horizontal_index, grid, traceback, subtitles=None):
# self.alignment_list = alignment_list
# self.vertical_index = vertical_index
# self.horizontal_index = horizontal_index
# self.grid = grid
# self.traceback = traceback
# self.subtitles = subtitles
#
# Path: analyzer/script_entity.py
# class ScriptEntity(Model):
# def __init__(self, character, text, type="speech"):
# self.character = character
# self.type = type
# self.original_text = text
#
# self.text = text.rstrip().replace('\n', ' ') if type == 'speech' else text
#
# def __str__(self):
# return "type: {}, character: {}, text: \"{}\"".format(self.type, self.character, self.text)
#
# def __repr__(self):
# return self.__str__()
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# return cls(d.get("character"), d.get("text"), type=d.get("type"))
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# Path: analyzer/string_utils.py
# def remove_punctuation(text):
# exclude = set(string.punctuation)
# exclude.add("'")
#
# characters = []
# for character in text:
# if character not in exclude:
# characters.append(character)
# else:
# characters.append(" ")
#
# cleaned_text = "".join(characters)
# var = re.sub(' +', ' ', cleaned_text)
#
# return var
. Output only the next line. | self.traceback = traceback |
Using the snippet: <|code_start|> resp = urllib.request.urlopen(req)
content = resp.read()
return ElementTree.fromstring(content), {'chapter': 'http://jvance.com/2008/ChapterGrabber'}
def parse_chapter_info(root, ns):
title = root.find('chapter:title', ns).text
ref = root.find('chapter:ref', ns)
id = int(ref.find('chapter:chapterSetId', ns).text)
confirmations = root.attrib["confirmations"]
source = root.find('chapter:source', ns)
duration = source.find('chapter:duration', ns).text
parsed_chapter_info = ChapterInfo(id, title, duration)
parsed_chapters = []
chapters = root.find('chapter:chapters', ns)
parsed_chapter_info.chapters = parsed_chapters
print("{}\t\t{}\t\t{}".format(id, confirmations, title))
for i, chapter in enumerate(chapters):
attrib = chapter.attrib
name = attrib["name"]
time = attrib["time"]
c = Chapter(i, name, time)
parsed_chapters.append(c)
<|code_end|>
, determine the next line of code. You have imports:
from xml.etree import ElementTree
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import Model, env
import urllib
import urllib.request
and context (class names, function names, or code) available:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
#
# def env(name, default=None):
# return getenv(name, default)
. Output only the next line. | return parsed_chapter_info |
Predict the next line for this snippet: <|code_start|> c1.t2 = c2.t1 - Timestamp(1) # minus 1ms
if i == last_index:
c2.t2 = duration
return chapters
def run(project, title):
results, ns = load_titles(title)
print("<ID>\t<CONFIRMATIONS>\t\t<TITLE>")
titles = parse_titles(results, ns)
valid = False
selected_id = None
while not valid:
answer = input('\n\nSelect id: ')
try:
selected_id = int(answer)
valid = True
except ValueError:
print("Invalid id\n")
chapter_infos = [chapter_info for chapter_info in titles if chapter_info.id == selected_id]
if len(chapter_infos) == 0:
raise Exception("Failed to find chapters")
chapter_info = chapter_infos[0]
<|code_end|>
with the help of current file imports:
from xml.etree import ElementTree
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import Model, env
import urllib
import urllib.request
and context from other files:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
#
# def env(name, default=None):
# return getenv(name, default)
, which may contain function names, class names, or code. Output only the next line. | chapter_info.chapters = add_end_to_chapters(chapter_info.chapters, chapter_info.duration) |
Given snippet: <|code_start|> for i, (c1, c2) in enumerate(windows):
c1.t2 = c2.t1 - Timestamp(1) # minus 1ms
if i == last_index:
c2.t2 = duration
return chapters
def run(project, title):
results, ns = load_titles(title)
print("<ID>\t<CONFIRMATIONS>\t\t<TITLE>")
titles = parse_titles(results, ns)
valid = False
selected_id = None
while not valid:
answer = input('\n\nSelect id: ')
try:
selected_id = int(answer)
valid = True
except ValueError:
print("Invalid id\n")
chapter_infos = [chapter_info for chapter_info in titles if chapter_info.id == selected_id]
if len(chapter_infos) == 0:
raise Exception("Failed to find chapters")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from xml.etree import ElementTree
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import Model, env
import urllib
import urllib.request
and context:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
#
# def env(name, default=None):
# return getenv(name, default)
which might include code, classes, or functions. Output only the next line. | chapter_info = chapter_infos[0] |
Predict the next line after this snippet: <|code_start|> def __str__(self):
return "{:>2}: {} - {}: {}".format(self.id, self.t1, self.t2, self.name)
def __repr__(self):
return "{:>2}: {} - {}: {}".format(self.id, self.t1, self.t2, self.name)
def as_dict(self, camel=True):
d = Model.as_dict(self)
d["t1"] = self.t1.millis
d["t2"] = self.t2.millis
return d
@classmethod
def from_dict(cls, data, from_camel=True):
d = Model.from_dict(data, from_camel)
return cls(d.get("id"), d.get("name"), d.get("t1"), d.get("t2"))
def load_titles(title):
encoded_title = urllib.request.quote(title, safe='')
query = "search?title={}".format(encoded_title)
url = BASE_URL + query
req = urllib.request.Request(url)
req.add_header('ApiKey', API_KEY)
resp = urllib.request.urlopen(req)
content = resp.read()
<|code_end|>
using the current file's imports:
from xml.etree import ElementTree
from analyzer import utils
from analyzer.timestamp import Timestamp
from analyzer.utils import Model, env
import urllib
import urllib.request
and any relevant context from other files:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
#
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
#
# def env(name, default=None):
# return getenv(name, default)
. Output only the next line. | return ElementTree.fromstring(content), {'chapter': 'http://jvance.com/2008/ChapterGrabber'} |
Given the code snippet: <|code_start|> local_base_path = StoragePath.local.base_path(self.identifier)
create_directory(local_base_path)
class Folder(Enum):
frames = 1
keyframes = 2
keyframe_thumbnails = 3
spatio = 4
plots = 5
def __str__(self):
return {
Project.Folder.frames: "frames",
Project.Folder.keyframes: "keyframes",
Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
Project.Folder.spatio: "spatio_temporal_slices",
Project.Folder.plots: "plots",
}[self]
class File(Enum):
shots = 1
chapters = 2
subtitles = 3
merged_subtitles = 4
original_subtitles = 5
script = 6
keyframe_montage = 7
shot_change_ratio = 8
def __str__(self):
<|code_end|>
, generate the next line using the imports in this file:
import json
import os
import re
from enum import Enum
from analyzer.path_utils import create_directory
from analyzer.utils import Model
from analyzer.utils import env
and context (functions, classes, or occasionally code) from other files:
# Path: analyzer/path_utils.py
# def create_directory(directory):
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
. Output only the next line. | return { |
Based on the snippet: <|code_start|>
def __init__(self, title):
self.name = title
identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
self.identifier = identifier.replace(" ", "_")
local_base_path = StoragePath.local.base_path(self.identifier)
create_directory(local_base_path)
class Folder(Enum):
frames = 1
keyframes = 2
keyframe_thumbnails = 3
spatio = 4
plots = 5
def __str__(self):
return {
Project.Folder.frames: "frames",
Project.Folder.keyframes: "keyframes",
Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
Project.Folder.spatio: "spatio_temporal_slices",
Project.Folder.plots: "plots",
}[self]
class File(Enum):
shots = 1
chapters = 2
subtitles = 3
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import os
import re
from enum import Enum
from analyzer.path_utils import create_directory
from analyzer.utils import Model
from analyzer.utils import env
and context (classes, functions, sometimes code) from other files:
# Path: analyzer/path_utils.py
# def create_directory(directory):
# if not os.path.exists(directory):
# os.makedirs(directory)
#
# Path: analyzer/utils.py
# class Model(object):
# def as_dict(self, camel=True):
# dictionary = to_dict(self)
# if camel:
# return change_dict_naming_convention(dictionary, underscore_to_camel)
# else:
# return dictionary
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# if from_camel:
# return change_dict_naming_convention(data, camel_to_underscore)
# else:
# return data
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
#
# def to_mongo_dict(self):
# dictionary = self.as_dict()
# return to_mongo_dict(dictionary)
. Output only the next line. | merged_subtitles = 4 |
Continue the code snippet: <|code_start|> is_web_page_fetched = False
original_encoding = None
while not is_web_page_fetched:
# get the script's URL from the parameters if it was passed
if script_url == '' and url is not None:
script_url = url
else:
print('Please provide the URL of a movie script you want to see parsed as JSON.')
print('The parser was intended to work with imsdb.com, and you must provide a full URL (with http[s]://)')
script_url = input('--> ')
try:
request = urllib.request.Request(script_url)
webpage_bytes = urllib.request.urlopen(request)
soup = BeautifulSoup(webpage_bytes, 'lxml')
original_encoding = soup.original_encoding
print('Detected encoding is ', soup.original_encoding)
is_web_page_fetched = True
except urllib.error.URLError as err:
print('Catched an URLError while fetching the URL:', err)
print()
pass
except ValueError as err:
print('Catched a ValueError while fetching the URL:', err)
print()
pass
except:
print('Catched an unrecognized error')
<|code_end|>
. Use current file imports:
import re
import urllib.request
from pprint import pprint
from bs4 import BeautifulSoup, Tag, UnicodeDammit
from analyzer.script_entity import ScriptEntity
and context (classes, functions, or code) from other files:
# Path: analyzer/script_entity.py
# class ScriptEntity(Model):
# def __init__(self, character, text, type="speech"):
# self.character = character
# self.type = type
# self.original_text = text
#
# self.text = text.rstrip().replace('\n', ' ') if type == 'speech' else text
#
# def __str__(self):
# return "type: {}, character: {}, text: \"{}\"".format(self.type, self.character, self.text)
#
# def __repr__(self):
# return self.__str__()
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# return cls(d.get("character"), d.get("text"), type=d.get("type"))
#
# @classmethod
# def from_dicts(cls, data, from_camel=True):
# return [cls.from_dict(d) for d in data]
. Output only the next line. | raise |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
class ShotModelTests(unittest.TestCase):
def setUp(self):
self._sut = Shot(0, 10, 0)
def test_shot_model(self):
assert self._sut.length == 10
assert self._sut.duration == 400
assert self._sut.keyframe.index == 0
assert self._sut.keyframe.colors is None
assert self._sut.keyframe.labels is None
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from analyzer.shot_detection import Shot, Label
from analyzer.subtitles_parser import Subtitle
from analyzer.timestamp import Timestamp
and context including class names, function names, and sometimes code from other files:
# Path: analyzer/shot_detection.py
# class Shot(Model):
# def __init__(self, start_index=None, end_index=None, id=None, relative_diff=None):
# if end_index:
# self.keyframe = Keyframe(start_index)
# self.end_index = end_index
# self.length = end_index - start_index
# self.duration = int(self.length / 25 * 1000)
# else:
# self.keyframe = None
# self.end_index = None
# self.duration = None
#
# self.start_index = start_index
# self.id = id
# self.start_diff = relative_diff
#
# def as_dict(self, camel=True):
# d = Model.as_dict(self)
# if self.keyframe.labels:
# d["keyframe"]["labels"] = [label.as_dict() for label in self.keyframe.labels]
#
# return d
#
# def to_mongo_dict(self):
# d = self.as_dict()
# return utils.to_mongo_dict(d)
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# shot = Shot(d.get("start_index"), d.get("end_index"), d.get("id"), d.get("relative_diff"))
#
# if "labels" in d["keyframe"] and d["keyframe"]["labels"] is not None:
# shot.keyframe.labels = [Label.from_dict(l) for l in (d["keyframe"]["labels"])]
#
# if "colors" in d["keyframe"] and d["keyframe"]["colors"] is not None:
# shot.keyframe.colors = [l for l in (d["keyframe"]["colors"])]
#
# return shot
#
# def __str__(self):
# return "id: {}, start_index: {} length: {}, end_index: {}" \
# .format(self.id, self.start_index, self.length, self.end_index)
#
# def __repr__(self):
# return self.__str__()
#
# class Label(Model):
# def __init__(self, description, score):
# self.description = description
# self.score = score
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# return cls(d.get("description"), d.get("score"))
#
# def __str__(self):
# return "{:<12} => {}".format(self.description, self.score)
#
# def __repr__(self):
# return self.__str__()
#
# Path: analyzer/subtitles_parser.py
# class Subtitle(Model):
# def __init__(self, t1, t2, text, original_text=None, character=None):
# self.t1 = Timestamp(t1) if (type(t1) == int) else t1
# self.t2 = Timestamp(t2) if (type(t2) == int) else t2
# self.text = text
# self.original_text = text if text is not None else original_text
# self.character = character
#
# def __str__(self):
# return "t1: {}, t2: {}, text: {}, character: {}".format(self.t1, self.t2, self.text, self.character)
#
# def __repr__(self):
# return self.__str__()
#
# def as_dict(self, camel=True):
# d = Model.as_dict(self)
# d["t1"] = self.t1.millis
# d["t2"] = self.t2.millis
#
# return d
#
# def to_mongo_dict(self):
# d = self.as_dict()
# return utils.to_mongo_dict(d)
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
#
# return cls(d["t1"], d["t2"], d["text"], d["original_text"], d["character"])
#
# Path: analyzer/timestamp.py
# class Timestamp(object):
# def __init__(self, val):
# if type(val) is str:
# timestamp_pattern = "(?:(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d))?(?:[,.](\d?\d?\d))?"
# regex = re.compile(timestamp_pattern)
# regex_result = regex.match(val)
#
# if regex_result is not None:
# groups = regex_result.groups()
# hours = entity_to_int(groups[0])
# minutes = entity_to_int(groups[1])
# seconds = entity_to_int(groups[2])
# milliseconds = entity_to_int(groups[3])
#
# self.millis = (hours_to_millis(hours) +
# minutes_to_millis(minutes) +
# seconds_to_millis(seconds) +
# milliseconds)
# elif type(val) is int:
# self.millis = val
# else:
# raise Exception("Timestamp parse failed")
#
# def __str__(self):
# return "{}".format(self.millis)
#
# def __repr__(self):
# return self.__str__()
#
# def __add__(self, other):
# return Timestamp(self.millis + other.millis)
#
# def __sub__(self, other):
# return Timestamp(self.millis - other.millis)
. Output only the next line. | def test_camel_case(self): |
Using the snippet: <|code_start|>
BUCKET_NAME = "thesis-video-data"
ACCESS_KEY = env("AWS_ACCESS_KEY")
SECRET_KEY = env("AWS_SECRET_KEY")
LOCATION = env("AWS_LOCATION")
def connect_bucket():
print(ACCESS_KEY, SECRET_KEY)
session = Session(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
region_name=LOCATION)
s3 = session.resource("s3")
try:
s3.meta.client.head_bucket(Bucket=BUCKET_NAME)
bucket = s3.Bucket(BUCKET_NAME)
except ClientError:
bucket = s3.create_bucket(Bucket=BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-central-1'})
<|code_end|>
, determine the next line of code. You have imports:
import boto3
import glob
from boto3.session import Session
from botocore.client import ClientError
from os.path import join
from analyzer.path_utils import filename
from analyzer.project import StoragePath, Project
from analyzer.utils import env
and context (class names, function names, or code) available:
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
#
# Path: analyzer/project.py
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
. Output only the next line. | return bucket |
Next line prediction: <|code_start|>
ACCESS_KEY = env("AWS_ACCESS_KEY")
SECRET_KEY = env("AWS_SECRET_KEY")
LOCATION = env("AWS_LOCATION")
def connect_bucket():
print(ACCESS_KEY, SECRET_KEY)
session = Session(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
region_name=LOCATION)
s3 = session.resource("s3")
try:
s3.meta.client.head_bucket(Bucket=BUCKET_NAME)
bucket = s3.Bucket(BUCKET_NAME)
except ClientError:
bucket = s3.create_bucket(Bucket=BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-central-1'})
return bucket
def upload_frames(project):
source_path = project.folder_path(Project.Folder.frames)
remote_path = project.folder_path(Project.Folder.frames, storage_env=StoragePath.remote)
upload_images(source_path, remote_path)
<|code_end|>
. Use current file imports:
(import boto3
import glob
from boto3.session import Session
from botocore.client import ClientError
from os.path import join
from analyzer.path_utils import filename
from analyzer.project import StoragePath, Project
from analyzer.utils import env)
and context including class names, function names, or small code snippets from other files:
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
#
# Path: analyzer/project.py
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
. Output only the next line. | def upload_keyframes(project): |
Predict the next line for this snippet: <|code_start|>
BUCKET_NAME = "thesis-video-data"
ACCESS_KEY = env("AWS_ACCESS_KEY")
SECRET_KEY = env("AWS_SECRET_KEY")
LOCATION = env("AWS_LOCATION")
def connect_bucket():
print(ACCESS_KEY, SECRET_KEY)
session = Session(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
<|code_end|>
with the help of current file imports:
import boto3
import glob
from boto3.session import Session
from botocore.client import ClientError
from os.path import join
from analyzer.path_utils import filename
from analyzer.project import StoragePath, Project
from analyzer.utils import env
and context from other files:
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
#
# Path: analyzer/project.py
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
, which may contain function names, class names, or code. Output only the next line. | region_name=LOCATION) |
Predict the next line for this snippet: <|code_start|> source_path = project.folder_path(Project.Folder.keyframes)
remote_path = project.folder_path(Project.Folder.keyframes, storage_env=StoragePath.remote)
upload_images(source_path, remote_path)
def upload_slices(project):
source_path = project.folder_path(Project.Folder.spatio)
remote_path = project.folder_path(Project.Folder.spatio, storage_env=StoragePath.remote)
upload_images(source_path, remote_path)
def upload_images(source_path, destination_path):
bucket = connect_bucket()
image_paths = sorted(glob.glob(join(source_path, "*.jpg")))
for path in image_paths:
remote_path = join(destination_path, filename(path))
data = open(path, 'rb')
bucket.put_object(Key=remote_path, Body=data)
def upload_keyframes_montage(project):
bucket = connect_bucket()
path = project.file_path(Project.File.keyframe_montage)
remote_path = join(project.folder_path("", storage_env=StoragePath.remote), filename(path))
<|code_end|>
with the help of current file imports:
import boto3
import glob
from boto3.session import Session
from botocore.client import ClientError
from os.path import join
from analyzer.path_utils import filename
from analyzer.project import StoragePath, Project
from analyzer.utils import env
and context from other files:
# Path: analyzer/path_utils.py
# def filename(path):
# return basename(normpath(path))
#
# Path: analyzer/project.py
# class StoragePath(Enum):
# local = 1
# remote = 2
#
# def base_path(self, identifier):
# from analyzer.utils import env
#
# if self == StoragePath.local:
# data_path = env("DATA_DIR", "data")
# return os.path.join(data_path, identifier)
# elif self == StoragePath.remote:
# return identifier
#
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/utils.py
# def env(name, default=None):
# return getenv(name, default)
, which may contain function names, class names, or code. Output only the next line. | data = open(path, 'rb') |
Given the code snippet: <|code_start|>
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
def run(project, shots):
def shot_to_keyframe_path(shot):
<|code_end|>
, generate the next line using the imports in this file:
import base64
from os import path
from tqdm import tqdm
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from analyzer import utils
from analyzer.project import Project
from analyzer.shot_detection import Label
and context (functions, classes, or occasionally code) from other files:
# Path: analyzer/utils.py
# def env(name, default=None):
# def window(seq, n=2):
# def block_shaped(image, n_rows, n_cols):
# def flatten(l):
# def camel_to_underscore(name):
# def underscore_to_camel(name):
# def change_dict_naming_convention(d, convert_function):
# def as_dict(self, camel=True):
# def from_dict(cls, data, from_camel=True):
# def from_dicts(cls, data, from_camel=True):
# def to_mongo_dict(self):
# def to_mongo_dict(obj):
# def to_dict(obj):
# def props(obj):
# def image_filename(index, file_type="jpg"):
# def extract_filename(path):
# def extract_name(file):
# def basepath(path):
# def extract_index(path):
# def objects_as_dict(objects):
# def slice_paths(paths, limit):
# def derivative(distances):
# def crop_image(image, cx=20, cy=20):
# DEBUG = bool(int(getenv('DEBUG', False)))
# T = type(objects[0])
# class Model(object):
#
# Path: analyzer/project.py
# class Project(Model):
# keyframe_montage_size = None
# keyframe_size = None
#
# def __init__(self, title):
# self.name = title
#
# identifier = re.sub('[^a-zA-Z0-9-_*.]', ' ', title.lower())
# self.identifier = identifier.replace(" ", "_")
#
# local_base_path = StoragePath.local.base_path(self.identifier)
# create_directory(local_base_path)
#
# class Folder(Enum):
# frames = 1
# keyframes = 2
# keyframe_thumbnails = 3
# spatio = 4
# plots = 5
#
# def __str__(self):
# return {
# Project.Folder.frames: "frames",
# Project.Folder.keyframes: "keyframes",
# Project.Folder.keyframe_thumbnails: "keyframe_thumbnails",
# Project.Folder.spatio: "spatio_temporal_slices",
# Project.Folder.plots: "plots",
# }[self]
#
# class File(Enum):
# shots = 1
# chapters = 2
# subtitles = 3
# merged_subtitles = 4
# original_subtitles = 5
# script = 6
# keyframe_montage = 7
# shot_change_ratio = 8
#
# def __str__(self):
# return {
# Project.File.shots: "shots.json",
# Project.File.chapters: "chapters.json",
# Project.File.subtitles: "subtitles.json",
# Project.File.original_subtitles: "subtitles.srt",
# Project.File.merged_subtitles: "merged_subtitles.json",
# Project.File.script: "script.json",
# Project.File.keyframe_montage: "keyframe_montage.jpg",
# Project.File.shot_change_ratio: "shot_change_ratio.json",
# }[self]
#
# def setup(self):
# for folder_type in Project.Folder:
# self.folder_path(folder_type)
#
# @staticmethod
# def file_exists(path):
# return os.path.exists(path)
#
# def folder_path(self, folder_type, destination=None, storage_env=StoragePath.local):
# assert folder_type is not None
#
# folder = str(folder_type) if type(folder_type) == Project.Folder else folder_type
#
# if destination:
# return self.__folder_path(destination, storage_env)
# else:
# base_path = storage_env.base_path(self.identifier)
# default_path = os.path.join(base_path, folder)
# return self.__folder_path(default_path, storage_env)
#
# def file_path(self, file_type, destination=None, storage_env=StoragePath.local):
# return self.__file_path(str(file_type), destination, storage_env)
#
# def write(self, data, file_type, destination=None):
# destination = self.__file_path(str(file_type), destination)
# write_json(destination, data)
#
# def read(self, file_type):
# path = self.__file_path(str(file_type))
# with open(path) as data:
# return json.load(data)
#
# def __folder_path(self, folder, storage_env=StoragePath.local):
# if storage_env == StoragePath.local:
# create_directory(folder)
#
# return folder
#
# def __file_path(self, filename, full_path=None, storage_env=StoragePath.local):
# if full_path is None:
# base_path = storage_env.base_path(self.identifier)
# return os.path.join(base_path, filename)
#
# if os.path.isdir(full_path):
# full_path = os.path.join(full_path, filename)
#
# dir = os.path.dirname(full_path)
# create_directory(dir)
# return full_path
#
# Path: analyzer/shot_detection.py
# class Label(Model):
# def __init__(self, description, score):
# self.description = description
# self.score = score
#
# @classmethod
# def from_dict(cls, data, from_camel=True):
# d = Model.from_dict(data, from_camel)
# return cls(d.get("description"), d.get("score"))
#
# def __str__(self):
# return "{:<12} => {}".format(self.description, self.score)
#
# def __repr__(self):
# return self.__str__()
. Output only the next line. | index = shot.keyframe.index |
Next line prediction: <|code_start|> names = filter(lambda x: re.match(name_regex, x), l.split(' '))
# require at least first and last name
if len(names) >= 2:
# search by last name
try:
user = User.objects.get(last_name=names[-1])
# found with unique last name
listed_members += [ user.get_profile() ]
print 'found %s by unique last name %s' % (user.get_full_name(), names[-1])
# and done
continue
except User.MultipleObjectsReturned:
# check first name
candidates = filter(lambda x: names[0] in x.first_name, User.objects.filter(last_name=names[-1]))
if len(candidates) == 1:
# found with unique first and last name pair
user = candidates[0]
listed_members += [ user.get_profile() ]
print 'found %s by unique first and last name pair %s %s' % (user.get_full_name(), names[0], names[-1])
# and done
continue
else:
# not found, try another method
pass
except User.DoesNotExist:
# not found, try another method
pass
# give up and ask operator
user = None
<|code_end|>
. Use current file imports:
(from django.core.management.base import BaseCommand
from django.core.management import CommandError
from django.conf import settings
from django.db.models import Q
from django.contrib.auth.models import User
from cms.models import Member
from sys import stdin
import re)
and context including class names, function names, or small code snippets from other files:
# Path: cms/models.py
# class Member(models.Model):
# GROUP_CHOICES = (
# (u'graduate', u'Graduate'),
# (u'undergraduate', u'Undergraduate'),
# (u'faculty', u'Faculty')
# )
#
# CLASS_CHOICES = (
# (u'freshman', u'Freshman'),
# (u'sophomore', u'Sophomore'),
# (u'junior', u'Junior'),
# (u'senior', u'Senior')
# )
#
# STATUS_EMPTY = 0 # this member is pending "creation" by its owner
# STATUS_ACTIVE = 1 # this member has been created
# STATUS_ARCHIVED = 2 # this member has been archived and is frozen
#
# STATUS_CHOICES = (
# (STATUS_EMPTY, u'Empty'),
# (STATUS_ACTIVE, u'Active'),
# (STATUS_ARCHIVED, u'Archived')
# )
#
# user = models.ForeignKey(User, related_name='profile', unique=True)
# group = models.CharField(max_length=255, choices=GROUP_CHOICES)
# classification = models.CharField(max_length=255, choices=CLASS_CHOICES, blank=True)
# hometown = models.CharField(max_length=255, blank=True)
# interests = models.TextField(blank=True)
# homepage = models.URLField(blank=True)
# blurb = models.TextField(blank=True)
# image = models.ImageField(upload_to=make_member_image_name, storage=OverwriteStorage(), blank=True)
# status = models.IntegerField(choices=STATUS_CHOICES)
# #activation_key = models.CharField(max_length=255, blank=True)
#
# def __unicode__(self):
# return unicode(self.user.get_full_name())
#
# @models.permalink
# def get_absolute_url(self):
# return ('cms:profile_url', (self.pk,), {})
#
# def generate_hashed_email(self):
# return hashlib.md5(self.user.email).hexdigest()
#
# def get_coordinated_projects(self):
# return Project.objects.filter(pk__in=ProjectMember.objects.filter(member__pk=self.pk, is_coordinator=True).values_list('project__pk', flat=True))
#
# @staticmethod
# def get_possible_project_members():
# # need to allow member one year old as well since activity status is predicated on project membership
# # but you can't create a project for a new year without a project coordinator
# # hence, the one year offset avoids the chicken-and-the-egg problem
# #return Member.objects.filter(Q(status=Member.STATUS_ACTIVE) | Q(pk__in=ProjectMember.objects.filter(project__year__gte= \
# # settings.CURRENT_YEAR-1).distinct().values_list('member'))).order_by('user__first_name', 'user__last_name')
# return Member.objects.exclude(status=Member.STATUS_ARCHIVED).order_by('user__first_name', 'user__last_name')
#
# class Meta:
# verbose_name = 'member'
# verbose_name_plural = 'members'
# ordering = ['user__first_name']
. Output only the next line. | while user is None: |
Based on the snippet: <|code_start|>
register = template.Library()
@register.filter
@stringfilter
def stripjs(value):
stripped = re.compile(r'<script(?:\s[^>]*)?(>(?:.(?!/script>))*</script>|/>)', re.S).sub('', force_unicode(value))
return mark_safe(stripped)
@register.filter
def logged_in(user):
if user is None or user.id is None:
return False
return user.id != -1
@register.filter
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import calendar
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from cms import permissions
and context (classes, functions, sometimes code) from other files:
# Path: cms/permissions.py
# def is_user_slc_leader(user):
# def can_user_create_project(user):
# def can_user_edit_project(user, project):
# def can_user_delete_project(user, project):
# def can_user_demote_project_coordinators(user, project):
# def can_user_create_member(user):
# def can_user_edit_member(user, member):
# def can_user_delete_member(user, member):
# def can_user_archive_member(user, member):
# def can_user_reactivate_member(user, member):
# def can_user_post_as_member(user, member):
# def can_user_edit_blogpost(user, blogpost):
# def can_user_create_page(user):
# def can_user_edit_page(user, page):
# def can_user_delete_page(user, page):
# def can_user_delete_sponsor(user):
# def can_user_create_sponsor(user):
. Output only the next line. | def is_slc_leader(user): |
Continue the code snippet: <|code_start|>
class UserBlogFeed(Feed):
description_template = "blogs/blog_rss.html"
def get_object(self, request, **kwargs):
return get_object_or_404(Member, pk=kwargs.get('pk', None))
def title(self, obj):
return "STARS Blog: " + obj.user.first_name + " " + obj.user.last_name
<|code_end|>
. Use current file imports:
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from cms.models import Member, BlogPost
and context (classes, functions, or code) from other files:
# Path: cms/models.py
# class Member(models.Model):
# GROUP_CHOICES = (
# (u'graduate', u'Graduate'),
# (u'undergraduate', u'Undergraduate'),
# (u'faculty', u'Faculty')
# )
#
# CLASS_CHOICES = (
# (u'freshman', u'Freshman'),
# (u'sophomore', u'Sophomore'),
# (u'junior', u'Junior'),
# (u'senior', u'Senior')
# )
#
# STATUS_EMPTY = 0 # this member is pending "creation" by its owner
# STATUS_ACTIVE = 1 # this member has been created
# STATUS_ARCHIVED = 2 # this member has been archived and is frozen
#
# STATUS_CHOICES = (
# (STATUS_EMPTY, u'Empty'),
# (STATUS_ACTIVE, u'Active'),
# (STATUS_ARCHIVED, u'Archived')
# )
#
# user = models.ForeignKey(User, related_name='profile', unique=True)
# group = models.CharField(max_length=255, choices=GROUP_CHOICES)
# classification = models.CharField(max_length=255, choices=CLASS_CHOICES, blank=True)
# hometown = models.CharField(max_length=255, blank=True)
# interests = models.TextField(blank=True)
# homepage = models.URLField(blank=True)
# blurb = models.TextField(blank=True)
# image = models.ImageField(upload_to=make_member_image_name, storage=OverwriteStorage(), blank=True)
# status = models.IntegerField(choices=STATUS_CHOICES)
# #activation_key = models.CharField(max_length=255, blank=True)
#
# def __unicode__(self):
# return unicode(self.user.get_full_name())
#
# @models.permalink
# def get_absolute_url(self):
# return ('cms:profile_url', (self.pk,), {})
#
# def generate_hashed_email(self):
# return hashlib.md5(self.user.email).hexdigest()
#
# def get_coordinated_projects(self):
# return Project.objects.filter(pk__in=ProjectMember.objects.filter(member__pk=self.pk, is_coordinator=True).values_list('project__pk', flat=True))
#
# @staticmethod
# def get_possible_project_members():
# # need to allow member one year old as well since activity status is predicated on project membership
# # but you can't create a project for a new year without a project coordinator
# # hence, the one year offset avoids the chicken-and-the-egg problem
# #return Member.objects.filter(Q(status=Member.STATUS_ACTIVE) | Q(pk__in=ProjectMember.objects.filter(project__year__gte= \
# # settings.CURRENT_YEAR-1).distinct().values_list('member'))).order_by('user__first_name', 'user__last_name')
# return Member.objects.exclude(status=Member.STATUS_ARCHIVED).order_by('user__first_name', 'user__last_name')
#
# class Meta:
# verbose_name = 'member'
# verbose_name_plural = 'members'
# ordering = ['user__first_name']
#
# class BlogPost(models.Model):
# author = models.ForeignKey(Member)
# title = models.CharField(max_length=255)
# date = models.DateTimeField(auto_now_add=True)
# edit_date = models.DateTimeField(auto_now=True)
# post = models.TextField(help_text='HTML is allowed')
# tags = models.ManyToManyField(Tag, blank=True, related_name='blogposts')
#
# objects = BlogPostManager()
#
# @models.permalink
# def get_absolute_url(self):
# return ('cms:blog_post_url', (), {'pk': self.author.pk, 'blog_pk': self.pk})
#
# def __unicode__(self):
# return unicode('%s by %s' % (self.title, self.author))
#
# class Meta:
# verbose_name = 'blog post'
# verbose_name_plural = 'blog posts'
# ordering = ['-date']
. Output only the next line. | def link(self, obj): |
Predict the next line after this snippet: <|code_start|>
class UserBlogFeed(Feed):
description_template = "blogs/blog_rss.html"
def get_object(self, request, **kwargs):
return get_object_or_404(Member, pk=kwargs.get('pk', None))
def title(self, obj):
return "STARS Blog: " + obj.user.first_name + " " + obj.user.last_name
def link(self, obj):
return reverse("cms:blog_url", kwargs={'pk': obj.pk})
def description(self, obj):
return ""
def items(self, obj):
return BlogPost.objects.filter(author=obj).order_by('-date')[:30]
def item_title(self, item):
return item.title
class BlogFeed(Feed):
description_template = "blogs/blog_rss.html"
<|code_end|>
using the current file's imports:
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from cms.models import Member, BlogPost
and any relevant context from other files:
# Path: cms/models.py
# class Member(models.Model):
# GROUP_CHOICES = (
# (u'graduate', u'Graduate'),
# (u'undergraduate', u'Undergraduate'),
# (u'faculty', u'Faculty')
# )
#
# CLASS_CHOICES = (
# (u'freshman', u'Freshman'),
# (u'sophomore', u'Sophomore'),
# (u'junior', u'Junior'),
# (u'senior', u'Senior')
# )
#
# STATUS_EMPTY = 0 # this member is pending "creation" by its owner
# STATUS_ACTIVE = 1 # this member has been created
# STATUS_ARCHIVED = 2 # this member has been archived and is frozen
#
# STATUS_CHOICES = (
# (STATUS_EMPTY, u'Empty'),
# (STATUS_ACTIVE, u'Active'),
# (STATUS_ARCHIVED, u'Archived')
# )
#
# user = models.ForeignKey(User, related_name='profile', unique=True)
# group = models.CharField(max_length=255, choices=GROUP_CHOICES)
# classification = models.CharField(max_length=255, choices=CLASS_CHOICES, blank=True)
# hometown = models.CharField(max_length=255, blank=True)
# interests = models.TextField(blank=True)
# homepage = models.URLField(blank=True)
# blurb = models.TextField(blank=True)
# image = models.ImageField(upload_to=make_member_image_name, storage=OverwriteStorage(), blank=True)
# status = models.IntegerField(choices=STATUS_CHOICES)
# #activation_key = models.CharField(max_length=255, blank=True)
#
# def __unicode__(self):
# return unicode(self.user.get_full_name())
#
# @models.permalink
# def get_absolute_url(self):
# return ('cms:profile_url', (self.pk,), {})
#
# def generate_hashed_email(self):
# return hashlib.md5(self.user.email).hexdigest()
#
# def get_coordinated_projects(self):
# return Project.objects.filter(pk__in=ProjectMember.objects.filter(member__pk=self.pk, is_coordinator=True).values_list('project__pk', flat=True))
#
# @staticmethod
# def get_possible_project_members():
# # need to allow member one year old as well since activity status is predicated on project membership
# # but you can't create a project for a new year without a project coordinator
# # hence, the one year offset avoids the chicken-and-the-egg problem
# #return Member.objects.filter(Q(status=Member.STATUS_ACTIVE) | Q(pk__in=ProjectMember.objects.filter(project__year__gte= \
# # settings.CURRENT_YEAR-1).distinct().values_list('member'))).order_by('user__first_name', 'user__last_name')
# return Member.objects.exclude(status=Member.STATUS_ARCHIVED).order_by('user__first_name', 'user__last_name')
#
# class Meta:
# verbose_name = 'member'
# verbose_name_plural = 'members'
# ordering = ['user__first_name']
#
# class BlogPost(models.Model):
# author = models.ForeignKey(Member)
# title = models.CharField(max_length=255)
# date = models.DateTimeField(auto_now_add=True)
# edit_date = models.DateTimeField(auto_now=True)
# post = models.TextField(help_text='HTML is allowed')
# tags = models.ManyToManyField(Tag, blank=True, related_name='blogposts')
#
# objects = BlogPostManager()
#
# @models.permalink
# def get_absolute_url(self):
# return ('cms:blog_post_url', (), {'pk': self.author.pk, 'blog_pk': self.pk})
#
# def __unicode__(self):
# return unicode('%s by %s' % (self.title, self.author))
#
# class Meta:
# verbose_name = 'blog post'
# verbose_name_plural = 'blog posts'
# ordering = ['-date']
. Output only the next line. | def title(self, obj): |
Given the following code snippet before the placeholder: <|code_start|> post = models.TextField(help_text='HTML is allowed')
tags = models.ManyToManyField(Tag, blank=True, related_name='blogposts')
objects = BlogPostManager()
@models.permalink
def get_absolute_url(self):
return ('cms:blog_post_url', (), {'pk': self.author.pk, 'blog_pk': self.pk})
def __unicode__(self):
return unicode('%s by %s' % (self.title, self.author))
class Meta:
verbose_name = 'blog post'
verbose_name_plural = 'blog posts'
ordering = ['-date']
class Sponsor(models.Model):
name = models.CharField(max_length=255)
image = models.ImageField(upload_to=make_sponsor_image_name, storage=OverwriteStorage(), blank=True)
def __unicode__(self):
return unicode(self.name)
@models.permalink
def get_absolute_url(self):
return ('cms:sponsors_url')
def save(self):
img_tmp = self.image
<|code_end|>
, predict the next line using imports from the current file:
import hashlib
import os
from django.db import models
from django.conf import settings
from django.db.models import signals
from django.contrib.auth.models import User
from django.db.models import Q
from cms.managers import BlogPostManager
from cms.storage import OverwriteStorage
and context including class names, function names, and sometimes code from other files:
# Path: cms/managers.py
# class BlogPostManager(models.Manager):
# def by_academic_year(self, year):
# if isinstance(year, list):
# return reduce(lambda a,b: a|b, [ self.by_academic_year(x) for x in year ])
# else:
# if isinstance(year, str) or isinstance(year, unicode):
# year = int(year)
#
# lower_limit = datetime.datetime(year, 7, 1)
# upper_limit = datetime.datetime(year+1, 6, 30)
#
# return self.get_query_set().filter(date__gte=lower_limit, date__lte=upper_limit)
#
# Path: cms/storage.py
# class OverwriteStorage(FileSystemStorage):
#
# def get_available_name(self, name):
# """
# Returns a filename that's free on the target storage system, and
# available for new content to be written to.
# """
# # If the filename already exists, remove it as if it was a true file system
# if self.exists(name):
# os.remove(os.path.join(settings.MEDIA_ROOT, name))
# return name
. Output only the next line. | self.image = None |
Here is a snippet: <|code_start|> return os.path.join(PROJECT_IMAGE_FOLDER, str(instance.pk) + os.path.splitext(filename)[1].lower())
SPONSOR_IMAGE_FOLDER = 'sponsor'
def make_sponsor_image_name(instance, filename):
if instance.pk is None:
raise Exception('save Sponsor instance before saving ImageField')
return os.path.join(SPONSOR_IMAGE_FOLDER, str(instance.pk) + os.path.splitext(filename)[1].lower())
class Member(models.Model):
GROUP_CHOICES = (
(u'graduate', u'Graduate'),
(u'undergraduate', u'Undergraduate'),
(u'faculty', u'Faculty')
)
CLASS_CHOICES = (
(u'freshman', u'Freshman'),
(u'sophomore', u'Sophomore'),
(u'junior', u'Junior'),
(u'senior', u'Senior')
)
STATUS_EMPTY = 0 # this member is pending "creation" by its owner
STATUS_ACTIVE = 1 # this member has been created
STATUS_ARCHIVED = 2 # this member has been archived and is frozen
STATUS_CHOICES = (
(STATUS_EMPTY, u'Empty'),
(STATUS_ACTIVE, u'Active'),
(STATUS_ARCHIVED, u'Archived')
<|code_end|>
. Write the next line using the current file imports:
import hashlib
import os
from django.db import models
from django.conf import settings
from django.db.models import signals
from django.contrib.auth.models import User
from django.db.models import Q
from cms.managers import BlogPostManager
from cms.storage import OverwriteStorage
and context from other files:
# Path: cms/managers.py
# class BlogPostManager(models.Manager):
# def by_academic_year(self, year):
# if isinstance(year, list):
# return reduce(lambda a,b: a|b, [ self.by_academic_year(x) for x in year ])
# else:
# if isinstance(year, str) or isinstance(year, unicode):
# year = int(year)
#
# lower_limit = datetime.datetime(year, 7, 1)
# upper_limit = datetime.datetime(year+1, 6, 30)
#
# return self.get_query_set().filter(date__gte=lower_limit, date__lte=upper_limit)
#
# Path: cms/storage.py
# class OverwriteStorage(FileSystemStorage):
#
# def get_available_name(self, name):
# """
# Returns a filename that's free on the target storage system, and
# available for new content to be written to.
# """
# # If the filename already exists, remove it as if it was a true file system
# if self.exists(name):
# os.remove(os.path.join(settings.MEDIA_ROOT, name))
# return name
, which may include functions, classes, or code. Output only the next line. | ) |
Here is a snippet: <|code_start|>from __future__ import absolute_import
from __future__ import print_function
class PeerchatProxyClient(ProxyClient):
cipher = None ##a little HACKy
def connectionMade(self):
self.peer.setPeer(self)
print('writing crypt')
self.transport.write('CRYPT des 1 %s\n' % (self.peer.factory.gameName,))
def dataReceived(self, data):
print(repr(data))
## first receive should have challenges
if not self.cipher:
cryptInfo, data = data.split('\n', 1)
sChal = cryptInfo.split(' ')[-2].strip()
cChal = cryptInfo.split(' ')[-1].strip()
self.cipher = CipherProxy(sChal, cChal, self.peer.factory.gameKey)
## only resume once crypt response was received
self.peer.transport.resumeProducing()
if data:
data = self.cipher.serverIngress.crypt(data)
print(repr(data))
<|code_end|>
. Write the next line using the current file imports:
from twisted.protocols.portforward import *
from twisted.application.service import MultiService
from twisted.application.internet import TCPServer
from ..db import Game
from .peerchat import *
from twisted.internet import reactor
and context from other files:
# Path: eaEmu/db.py
# def makeMod(modname):
# def makeClasses(modname, classes):
# def Join(self, session): # TODO?
# def Leave(self, session):
# def getKey(cls, gameName):
# def _get_login_dirty(self):
# def _set_login_dirty(self, value):
# def GetUser(cls, **kw):
# def CreateUser(name, pwd=None):
# def addPersona(self, name):
# def add():
# def ebAdd(err):
# def getPersonas(self):
# def getPersona(self):
# def _get_umode(self):
# def _set_umode(self, value):
# def getChanMode(self, channel):
# def setChanMode(self, channel, value):
# def modifyMode(mode, mod):
# def getUser(cls, **kw):
# def getStats(cls, name, chanName):
# def dbOps():
# def dumpFields(self, fields, withNames=False):
# def syncAccount(username):
# def ebRunQuery(err):
# def cbRunQuery(result):
# def __init__(self, *args, **kw):
# def Login(self, username, pwd):
# def cbSync(user):
# def ebSync(err):
# def ebGetUser(err):
# def cbGotUser(user):
# def cbUserAuth(isMatch, user):
# def ebPlainAuth(err, user):
# def ebBadPwd(err):
# def cbCheckNumSessions(user):
# def getTheater(cls, name):
# def PlayerLeaveGame(self, session, game_id):
# def PlayerJoinGame(self, session, game_id):
# def CreateSession(self, ip, port):
# def DeleteSession(self, ip, port):
# def ConnectionEstablished(self, key, user):
# def ConnectionClosed(self, sess):
# def GetSession(self, key):
# def CreateGame(self):
# def ListGames(self, filters=None):
# def GetGame(self, game_id=None, host=None):
# def __init__(self, model):
# def __call__(self, func):
# def djangoAsyncWrapper(*args, **kw):
# def sigSaved(**kw):
# class _GameSessionAspect(object):
# class _GameAspect(object):
# class _UserAspect:
# class _PersonaAspect:
# class _StatsAspect:
# class _LoginSessionAspect(object):
# class _TheaterAspect(object):
# class djangoAsync(object):
, which may include functions, classes, or code. Output only the next line. | ProxyClient.dataReceived(self, data) |
Based on the snippet: <|code_start|>
class DjangoRaceConditions(unittest.TestCase):
def setUp(self):
pass
def test_getOrCreate(self):
chan = Channel.objects.get(id=1)
user = User.objects.get(login='Keb')
chan.users.clear()
def cbAdded(result, times):
chan = Channel.objects.get(id=1)
user = User.objects.get(login='Keb')
print 'added', chan.users.all()
return deferToThread(chan.users.remove, user).addCallback(cbRemoved, times)
def cbRemoved(result, times):
chan = Channel.objects.get(id=1)
user = User.objects.get(login='Keb')
print 'removed', chan.users.all()
if times > 0:
return deferToThread(chan.users.add, user).addCallback(cbAdded, times - 1)
else:
print 'done', chan.users.all()
return defer.succeed(None)
print
print user.login
print chan.prettyName
<|code_end|>
, predict the immediate next line with the help of imports:
from twisted.trial import unittest
from twisted.internet.threads import deferToThread
from twisted.internet import defer
from ..db import Channel, User
and context (classes, functions, sometimes code) from other files:
# Path: eaEmu/db.py
# def makeMod(modname):
# def makeClasses(modname, classes):
# def Join(self, session): # TODO?
# def Leave(self, session):
# def getKey(cls, gameName):
# def _get_login_dirty(self):
# def _set_login_dirty(self, value):
# def GetUser(cls, **kw):
# def CreateUser(name, pwd=None):
# def addPersona(self, name):
# def add():
# def ebAdd(err):
# def getPersonas(self):
# def getPersona(self):
# def _get_umode(self):
# def _set_umode(self, value):
# def getChanMode(self, channel):
# def setChanMode(self, channel, value):
# def modifyMode(mode, mod):
# def getUser(cls, **kw):
# def getStats(cls, name, chanName):
# def dbOps():
# def dumpFields(self, fields, withNames=False):
# def syncAccount(username):
# def ebRunQuery(err):
# def cbRunQuery(result):
# def __init__(self, *args, **kw):
# def Login(self, username, pwd):
# def cbSync(user):
# def ebSync(err):
# def ebGetUser(err):
# def cbGotUser(user):
# def cbUserAuth(isMatch, user):
# def ebPlainAuth(err, user):
# def ebBadPwd(err):
# def cbCheckNumSessions(user):
# def getTheater(cls, name):
# def PlayerLeaveGame(self, session, game_id):
# def PlayerJoinGame(self, session, game_id):
# def CreateSession(self, ip, port):
# def DeleteSession(self, ip, port):
# def ConnectionEstablished(self, key, user):
# def ConnectionClosed(self, sess):
# def GetSession(self, key):
# def CreateGame(self):
# def ListGames(self, filters=None):
# def GetGame(self, game_id=None, host=None):
# def __init__(self, model):
# def __call__(self, func):
# def djangoAsyncWrapper(*args, **kw):
# def sigSaved(**kw):
# class _GameSessionAspect(object):
# class _GameAspect(object):
# class _UserAspect:
# class _PersonaAspect:
# class _StatsAspect:
# class _LoginSessionAspect(object):
# class _TheaterAspect(object):
# class djangoAsync(object):
. Output only the next line. | dfr = deferToThread(chan.users.add, user).addCallback(cbAdded, 2) |
Here is a snippet: <|code_start|>from __future__ import absolute_import
class DjangoRaceConditions(unittest.TestCase):
def setUp(self):
<|code_end|>
. Write the next line using the current file imports:
from twisted.trial import unittest
from twisted.internet.threads import deferToThread
from twisted.internet import defer
from ..db import Channel, User
and context from other files:
# Path: eaEmu/db.py
# def makeMod(modname):
# def makeClasses(modname, classes):
# def Join(self, session): # TODO?
# def Leave(self, session):
# def getKey(cls, gameName):
# def _get_login_dirty(self):
# def _set_login_dirty(self, value):
# def GetUser(cls, **kw):
# def CreateUser(name, pwd=None):
# def addPersona(self, name):
# def add():
# def ebAdd(err):
# def getPersonas(self):
# def getPersona(self):
# def _get_umode(self):
# def _set_umode(self, value):
# def getChanMode(self, channel):
# def setChanMode(self, channel, value):
# def modifyMode(mode, mod):
# def getUser(cls, **kw):
# def getStats(cls, name, chanName):
# def dbOps():
# def dumpFields(self, fields, withNames=False):
# def syncAccount(username):
# def ebRunQuery(err):
# def cbRunQuery(result):
# def __init__(self, *args, **kw):
# def Login(self, username, pwd):
# def cbSync(user):
# def ebSync(err):
# def ebGetUser(err):
# def cbGotUser(user):
# def cbUserAuth(isMatch, user):
# def ebPlainAuth(err, user):
# def ebBadPwd(err):
# def cbCheckNumSessions(user):
# def getTheater(cls, name):
# def PlayerLeaveGame(self, session, game_id):
# def PlayerJoinGame(self, session, game_id):
# def CreateSession(self, ip, port):
# def DeleteSession(self, ip, port):
# def ConnectionEstablished(self, key, user):
# def ConnectionClosed(self, sess):
# def GetSession(self, key):
# def CreateGame(self):
# def ListGames(self, filters=None):
# def GetGame(self, game_id=None, host=None):
# def __init__(self, model):
# def __call__(self, func):
# def djangoAsyncWrapper(*args, **kw):
# def sigSaved(**kw):
# class _GameSessionAspect(object):
# class _GameAspect(object):
# class _UserAspect:
# class _PersonaAspect:
# class _StatsAspect:
# class _LoginSessionAspect(object):
# class _TheaterAspect(object):
# class djangoAsync(object):
, which may include functions, classes, or code. Output only the next line. | pass |
Next line prediction: <|code_start|>
class UtilTest(unittest.TestCase):
def test_sim_mat(self):
def sim_func(a, b):
return a * b
items = [1,2,3,4]
expected = np.array([
[1., 2., 3., 4.],
[2., 1., 6., 8.],
[3., 6., 1., 12.],
[4., 8., 12., 1.]
])
sim_mat = util.build_sim_mat(items, sim_func)
<|code_end|>
. Use current file imports:
(import unittest
import numpy as np
from broca.common import util)
and context including class names, function names, or small code snippets from other files:
# Path: broca/common/util.py
# def penn_to_wordnet(tag):
# def gram_size(term):
# def build_sim_mat(items, sim_func):
# def sim_to_dist(sim_mat):
# def dist_to_sim(dist_mat):
# def parallel(func, inputs, n_jobs, expand_args=False):
. Output only the next line. | np.testing.assert_array_equal(sim_mat, expected) |
Here is a snippet: <|code_start|># http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HISTORY_PATH = os.path.expanduser('~/.ndb_shell_history')
def shell():
if (not os.environ.get('DATASTORE_APP_ID', None)
and not os.environ.get('DATASTORE_PROJECT_ID', None)):
raise ValueError('Must set either DATASTORE_APP_ID or DATASTORE_PROJECT_ID'
' environment variable.')
ndb.get_context().set_memcache_policy(False)
ndb.get_context().set_cache_policy(False)
# ndb will set the application ID.
application_id = os.environ['APPLICATION_ID']
id_resolver = datastore_pbs.IdResolver((application_id,))
project_id = id_resolver.resolve_project_id(application_id)
banner = """ndb shell
Python %s
Project: %s
<|code_end|>
. Write the next line using the current file imports:
import atexit
import code
import readline
import os
import sys
import ndb
from ndb import tasklets
from ndb.google_imports import datastore_pbs
and context from other files:
# Path: ndb/tasklets.py
# _CALLBACK_KEY = '__CALLBACK__'
# IDLE = apiproxy_rpc.RPC.IDLE # Not yet running (unused)
# RUNNING = apiproxy_rpc.RPC.RUNNING # Not yet completed.
# FINISHING = apiproxy_rpc.RPC.FINISHING # Completed.
# _CONTEXT_KEY = '__CONTEXT__'
# _DATASTORE_APP_ID_ENV = 'DATASTORE_APP_ID'
# _DATASTORE_PROJECT_ID_ENV = 'DATASTORE_PROJECT_ID'
# _DATASTORE_ADDITIONAL_APP_IDS_ENV = 'DATASTORE_ADDITIONAL_APP_IDS'
# _DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV = 'DATASTORE_USE_PROJECT_ID_AS_APP_ID'
# def _is_generator(obj):
# def __init__(self):
# def set_context(self, ctx):
# def add_generator(self, gen):
# def add_pending(self, fut):
# def remove_pending(self, fut, status='success'):
# def clear_all_generators(self):
# def clear_all_pending(self):
# def dump_all_pending(self, verbose=False):
# def reset(self, unused_req_id):
# def add_flow_exception(exc):
# def _init_flow_exceptions():
# def __init__(self, info=None):
# def _reset(self):
# def __repr__(self):
# def dump(self):
# def dump_stack(self):
# def add_callback(self, callback, *args, **kwds):
# def add_immediate_callback(self, callback, *args, **kwds):
# def set_result(self, result):
# def set_exception(self, exc, tb=None):
# def done(self):
# def state(self):
# def wait(self):
# def get_exception(self):
# def get_traceback(self):
# def check_success(self):
# def get_result(self):
# def wait_any(cls, futures):
# def wait_all(cls, futures):
# def _help_tasklet_along(self, ns, ds_conn, gen, val=None, exc=None, tb=None):
# def _on_rpc_completion(self, rpc, ns, ds_conn, gen):
# def _on_future_completion(self, future, ns, ds_conn, gen):
# def sleep(dt):
# def __init__(self, info=None):
# def __repr__(self):
# def complete(self):
# def set_exception(self, exc, tb=None):
# def _finish(self):
# def putq(self, value):
# def add_dependent(self, fut):
# def _signal_dependent_done(self, fut):
# def __init__(self, info=None):
# def complete(self):
# def set_exception(self, exc, tb=None):
# def putq(self, value):
# def add_dependent(self, fut):
# def _signal_dependent_done(self, fut):
# def _mark_finished(self):
# def getq(self):
# def _pass_eof(self, fut):
# def _pass_result(self, fut, exc, tb, val):
# def __init__(self, info=None):
# def complete(self):
# def set_exception(self, exc, tb=None):
# def putq(self, value):
# def add_dependent(self, fut):
# def getq(self):
# def _transfer_result(fut1, fut2):
# def __init__(self, reducer, info=None, batch_size=20):
# def complete(self):
# def set_exception(self, exc, tb=None):
# def putq(self, value):
# def add_dependent(self, fut):
# def _internal_add_dependent(self, fut):
# def _signal_dependent_done(self, fut):
# def _mark_finished(self):
# def get_return_value(err):
# def tasklet(func):
# def tasklet_wrapper(*args, **kwds):
# def synctasklet(func):
# def synctasklet_wrapper(*args, **kwds):
# def toplevel(func):
# def add_context_wrapper(*args, **kwds):
# def get_context():
# def make_default_context():
# def make_context(conn=None, config=None):
# def _make_cloud_datastore_context(app_id, external_app_ids=()):
# def set_context(new_context):
# def MakeSyncCall(self, service, call, request, response):
# def CreateRPC(self):
# class _State(utils.threading_local):
# class Future(object):
# class MultiFuture(Future):
# class QueueFuture(Future):
# class SerialQueueFuture(Future):
# class ReducingFuture(Future):
# class _ThrowingStub(object):
#
# Path: ndb/google_imports.py
# GOOGLE_PACKAGE_PATH = set(google.__path__)
# GOOGLE_PACKAGE_PATH = None
# def set_appengine_imports():
, which may include functions, classes, or code. Output only the next line. | The ndb module is already imported. |
Predict the next line for this snippet: <|code_start|>
def test_predict(self):
X = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
clustering = KMeans(k=5, embedded=False)
self.assertRaises(RuntimeError, clustering.predict(X))
clustering.fit(X)
y_pred = clustering.predict(X)
self.assertEqual(len(y_pred), X.shape[0])
def test_fit_predict(self):
X = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
clustering = KMeans(k=5, embedded=False)
self.assertTrue(not clustering.fitted)
y_pred = clustering.fit_predict(X)
self.assertTrue(clustering.fitted)
self.assertEqual(len(y_pred), X.shape[0])
class GMMTest(TestCase):
def test_simple(self):
clustering = GMM(embedded=False)
clustering.stop()
@requireEmbedded
def test_embedded(self):
clustering = GMM(embedded=True)
def test_method(self):
clustering = GMM(embedded=False)
<|code_end|>
with the help of current file imports:
from unittest import TestCase
from jubakit.wrapper.clustering import KMeans, GMM, DBSCAN
from . import requireEmbedded
import numpy as np
and context from other files:
# Path: jubakit/wrapper/clustering.py
# class KMeans(BaseKFixedClustering):
#
# def _method(self):
# return 'kmeans'
#
# class GMM(BaseKFixedClustering):
#
# def _method(self):
# return 'gmm'
#
# class DBSCAN(BaseJubatusClustering):
#
# def __init__(self, eps=0.2, min_core_point=3,
# bucket_size=100, compressed_bucket_size=100,
# bicriteria_base_size=10, bucket_length=2,
# forgetting_factor=0.0, forgetting_threshold=0.5,
# seed=0, embedded=True, distance='euclidean'):
# super(DBSCAN, self).__init__('simple', bucket_size,
# compressed_bucket_size, bicriteria_base_size,
# bucket_length, forgetting_factor,
# forgetting_threshold, seed, embedded,
# distance)
# self.eps = eps
# self.min_core_point = min_core_point
#
# def _launch_clustering(self):
# self.method = 'dbscan'
# self.parameter = {
# 'eps': self.eps,
# 'min_core_point': self.min_core_point
# }
# self.config_ = Config(method=self.method, parameter=self.parameter,
# compressor_method=self.compressor_method,
# compressor_parameter=self.compressor_parameter,
# distance=self.distance)
# self.clustering_ = Clustering.run(config=self.config_,
# embedded=self.embedded)
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual('gmm', clustering._method()) |
Predict the next line for this snippet: <|code_start|>
try:
except ImportError:
pass
class KMeansTest(TestCase):
def test_simple(self):
clustering = KMeans(embedded=False)
clustering.stop()
@requireEmbedded
def test_embedded(self):
clustering = KMeans(embedded=True)
def test_init(self):
clustering = KMeans(embedded=False)
self.assertEqual(2, clustering.k)
self.assertEqual('simple', clustering.compressor_method)
self.assertEqual(100, clustering.bucket_size)
self.assertEqual(100, clustering.compressed_bucket_size)
self.assertEqual(10, clustering.bicriteria_base_size)
self.assertEqual(2, clustering.bucket_length)
self.assertEqual(0.0, clustering.forgetting_factor)
self.assertEqual(0.5, clustering.forgetting_threshold)
self.assertEqual(0, clustering.seed)
self.asssrtTrue(not clustering.embedded)
<|code_end|>
with the help of current file imports:
from unittest import TestCase
from jubakit.wrapper.clustering import KMeans, GMM, DBSCAN
from . import requireEmbedded
import numpy as np
and context from other files:
# Path: jubakit/wrapper/clustering.py
# class KMeans(BaseKFixedClustering):
#
# def _method(self):
# return 'kmeans'
#
# class GMM(BaseKFixedClustering):
#
# def _method(self):
# return 'gmm'
#
# class DBSCAN(BaseJubatusClustering):
#
# def __init__(self, eps=0.2, min_core_point=3,
# bucket_size=100, compressed_bucket_size=100,
# bicriteria_base_size=10, bucket_length=2,
# forgetting_factor=0.0, forgetting_threshold=0.5,
# seed=0, embedded=True, distance='euclidean'):
# super(DBSCAN, self).__init__('simple', bucket_size,
# compressed_bucket_size, bicriteria_base_size,
# bucket_length, forgetting_factor,
# forgetting_threshold, seed, embedded,
# distance)
# self.eps = eps
# self.min_core_point = min_core_point
#
# def _launch_clustering(self):
# self.method = 'dbscan'
# self.parameter = {
# 'eps': self.eps,
# 'min_core_point': self.min_core_point
# }
# self.config_ = Config(method=self.method, parameter=self.parameter,
# compressor_method=self.compressor_method,
# compressor_parameter=self.compressor_parameter,
# distance=self.distance)
# self.clustering_ = Clustering.run(config=self.config_,
# embedded=self.embedded)
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual('euclidean', clustering.distance) |
Given snippet: <|code_start|>
class GMMTest(TestCase):
def test_simple(self):
clustering = GMM(embedded=False)
clustering.stop()
@requireEmbedded
def test_embedded(self):
clustering = GMM(embedded=True)
def test_method(self):
clustering = GMM(embedded=False)
self.assertEqual('gmm', clustering._method())
clustering.stop()
class DBSCANTest(testCase):
def test_simple(self):
clustering = DBSCAN(embedded=False)
clustering.stop()
@requireEmbedded
def test_embedded(self):
clustering = DBSCAN(embedded=True)
def test_init(self):
clustering = DBSCAN(embedded=False)
self.assertEqual(0.2, clustering.eps)
self.assertEqual(3, clustering.min_core_point)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import TestCase
from jubakit.wrapper.clustering import KMeans, GMM, DBSCAN
from . import requireEmbedded
import numpy as np
and context:
# Path: jubakit/wrapper/clustering.py
# class KMeans(BaseKFixedClustering):
#
# def _method(self):
# return 'kmeans'
#
# class GMM(BaseKFixedClustering):
#
# def _method(self):
# return 'gmm'
#
# class DBSCAN(BaseJubatusClustering):
#
# def __init__(self, eps=0.2, min_core_point=3,
# bucket_size=100, compressed_bucket_size=100,
# bicriteria_base_size=10, bucket_length=2,
# forgetting_factor=0.0, forgetting_threshold=0.5,
# seed=0, embedded=True, distance='euclidean'):
# super(DBSCAN, self).__init__('simple', bucket_size,
# compressed_bucket_size, bicriteria_base_size,
# bucket_length, forgetting_factor,
# forgetting_threshold, seed, embedded,
# distance)
# self.eps = eps
# self.min_core_point = min_core_point
#
# def _launch_clustering(self):
# self.method = 'dbscan'
# self.parameter = {
# 'eps': self.eps,
# 'min_core_point': self.min_core_point
# }
# self.config_ = Config(method=self.method, parameter=self.parameter,
# compressor_method=self.compressor_method,
# compressor_parameter=self.compressor_parameter,
# distance=self.distance)
# self.clustering_ = Clustering.run(config=self.config_,
# embedded=self.embedded)
which might include code, classes, or functions. Output only the next line. | self.assertEqual('simple', clustering.compressor_method) |
Given the code snippet: <|code_start|>
def print_usage():
print('JubaModel - Jubatus Low-Level Model Manipulation Tool')
print()
parser.print_help(get_stdio()[1]) # stdout
print()
print('Supported Formats:')
print(' IN_FORMAT: auto | binary | json')
print(' OUT_FORMAT: text | binary | json')
(args, files) = parser.parse_args(args)
# Failed to parse options.
if parser._error:
print_usage()
return 2
# Help option is specified.
if args.help:
print_usage()
return 0
# Validate parameters.
if len(files) == 0:
print('Error: no model file specified')
print_usage()
return 1
if len(files) != 1:
print('Error: cannot specify multiple model files at once')
print_usage()
<|code_end|>
, generate the next line using the imports in this file:
import sys
import copy
import struct
import base64
import subprocess
import tempfile
import optparse
import msgpack
import json
from binascii import crc32
from io import BytesIO
from .compat import *
from ._stdio import print, printe, get_stdio
from ._process import JubaProcess
and context (functions, classes, or occasionally code) from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_process.py
# class JubaProcess(object):
# @classmethod
# def get_process(cls, cmdline, env=None, *args, **kwargs):
# """
# Returns subprocess.Popen instance.
# """
# envvars = env if env else dict(os.environ)
# if platform.system() == 'Darwin' and 'DYLD_FALLBACK_LIBRARY_PATH' not in envvars:
# """
# Due to homebrew-jubatus issue #15, when using Homebrew with locations other than
# the standard installation path (/usr/local), Jubatus processes built on OS X cannot
# be run without DYLD_FALLBACK_LIBRARY_PATH. However, on El Capitan or later,
# DYLD_FALLBACK_LIBRARY_PATH are not propagated from parent process. We workaround
# the problem by automatically estimating DYLD_FALLBACK_LIBRARY_PATH based on PATH.
# """
# cmdpath = distutils.spawn.find_executable(cmdline[0])
# if cmdpath is None:
# raise RuntimeError('{0} command not found; confirm that PATH is properly set'.format(cmdline[0]))
# libpath = os.sep.join(cmdpath.split(os.sep)[:-2] + ['lib'])
# if os.path.isfile(os.sep.join([libpath, 'libjubatus_core.dylib'])):
# # If the estimated libpath is already in the default DYLD_FALLBACK_LIBRARY_PATH,
# # we don't have to add it. See ``man 1 dyld`` for the list of default search paths.
# if libpath not in [os.path.expanduser('~/lib'), '/usr/local/lib', '/lib', '/usr/lib']:
# envvars['DYLD_FALLBACK_LIBRARY_PATH'] = libpath
# _logger.info('setting DYLD_FALLBACK_LIBRARY_PATH to %s', libpath)
# return subprocess.Popen(cmdline, env=envvars, *args, **kwargs)
. Output only the next line. | return 1 |
Based on the snippet: <|code_start|> m.dump_text(f)
except Exception as e:
raise JubaModelError('{0}: failed to write model'.format(output), e)
# Output config
if output_config:
try:
with open(output_config, 'w') as f:
f.write(m.system.config)
except Exception as e:
raise JubaModelError('{0}: failed to write config'.format(output_config), e)
@classmethod
def start(cls, args):
USAGE = '''
jubamodel [--in-format IN_FORMAT] [--out-format OUT_FORMAT]
[--output OUTPUT] [--output-config OUTPUT_CONFIG]
[--transform TRANSFORM]
[--no-validate] [--fix-header] model_file
jubamodel --help'''
EPILOG = ' model_file input model file in format specified by --in-format'
parser = _JubaModelOptionParser(add_help_option=False, usage=USAGE, epilog=EPILOG)
# arguments
parser.add_option('-i', '--in-format', choices=('auto','binary','json'), default='auto',
help='model input format (default: %default)')
parser.add_option('-o', '--out-format', choices=('text','binary','json'), default='text',
help='model output format (default: %default)')
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import copy
import struct
import base64
import subprocess
import tempfile
import optparse
import msgpack
import json
from binascii import crc32
from io import BytesIO
from .compat import *
from ._stdio import print, printe, get_stdio
from ._process import JubaProcess
and context (classes, functions, sometimes code) from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_process.py
# class JubaProcess(object):
# @classmethod
# def get_process(cls, cmdline, env=None, *args, **kwargs):
# """
# Returns subprocess.Popen instance.
# """
# envvars = env if env else dict(os.environ)
# if platform.system() == 'Darwin' and 'DYLD_FALLBACK_LIBRARY_PATH' not in envvars:
# """
# Due to homebrew-jubatus issue #15, when using Homebrew with locations other than
# the standard installation path (/usr/local), Jubatus processes built on OS X cannot
# be run without DYLD_FALLBACK_LIBRARY_PATH. However, on El Capitan or later,
# DYLD_FALLBACK_LIBRARY_PATH are not propagated from parent process. We workaround
# the problem by automatically estimating DYLD_FALLBACK_LIBRARY_PATH based on PATH.
# """
# cmdpath = distutils.spawn.find_executable(cmdline[0])
# if cmdpath is None:
# raise RuntimeError('{0} command not found; confirm that PATH is properly set'.format(cmdline[0]))
# libpath = os.sep.join(cmdpath.split(os.sep)[:-2] + ['lib'])
# if os.path.isfile(os.sep.join([libpath, 'libjubatus_core.dylib'])):
# # If the estimated libpath is already in the default DYLD_FALLBACK_LIBRARY_PATH,
# # we don't have to add it. See ``man 1 dyld`` for the list of default search paths.
# if libpath not in [os.path.expanduser('~/lib'), '/usr/local/lib', '/lib', '/usr/lib']:
# envvars['DYLD_FALLBACK_LIBRARY_PATH'] = libpath
# _logger.info('setting DYLD_FALLBACK_LIBRARY_PATH to %s', libpath)
# return subprocess.Popen(cmdline, env=envvars, *args, **kwargs)
. Output only the next line. | parser.add_option('-O', '--output', type='str', default=None, |
Based on the snippet: <|code_start|> m.dump_text(get_stdio()[1]) # stdout
else:
with open(output, 'w') as f:
m.dump_text(f)
except Exception as e:
raise JubaModelError('{0}: failed to write model'.format(output), e)
# Output config
if output_config:
try:
with open(output_config, 'w') as f:
f.write(m.system.config)
except Exception as e:
raise JubaModelError('{0}: failed to write config'.format(output_config), e)
@classmethod
def start(cls, args):
USAGE = '''
jubamodel [--in-format IN_FORMAT] [--out-format OUT_FORMAT]
[--output OUTPUT] [--output-config OUTPUT_CONFIG]
[--transform TRANSFORM]
[--no-validate] [--fix-header] model_file
jubamodel --help'''
EPILOG = ' model_file input model file in format specified by --in-format'
parser = _JubaModelOptionParser(add_help_option=False, usage=USAGE, epilog=EPILOG)
# arguments
parser.add_option('-i', '--in-format', choices=('auto','binary','json'), default='auto',
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import copy
import struct
import base64
import subprocess
import tempfile
import optparse
import msgpack
import json
from binascii import crc32
from io import BytesIO
from .compat import *
from ._stdio import print, printe, get_stdio
from ._process import JubaProcess
and context (classes, functions, sometimes code) from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_process.py
# class JubaProcess(object):
# @classmethod
# def get_process(cls, cmdline, env=None, *args, **kwargs):
# """
# Returns subprocess.Popen instance.
# """
# envvars = env if env else dict(os.environ)
# if platform.system() == 'Darwin' and 'DYLD_FALLBACK_LIBRARY_PATH' not in envvars:
# """
# Due to homebrew-jubatus issue #15, when using Homebrew with locations other than
# the standard installation path (/usr/local), Jubatus processes built on OS X cannot
# be run without DYLD_FALLBACK_LIBRARY_PATH. However, on El Capitan or later,
# DYLD_FALLBACK_LIBRARY_PATH are not propagated from parent process. We workaround
# the problem by automatically estimating DYLD_FALLBACK_LIBRARY_PATH based on PATH.
# """
# cmdpath = distutils.spawn.find_executable(cmdline[0])
# if cmdpath is None:
# raise RuntimeError('{0} command not found; confirm that PATH is properly set'.format(cmdline[0]))
# libpath = os.sep.join(cmdpath.split(os.sep)[:-2] + ['lib'])
# if os.path.isfile(os.sep.join([libpath, 'libjubatus_core.dylib'])):
# # If the estimated libpath is already in the default DYLD_FALLBACK_LIBRARY_PATH,
# # we don't have to add it. See ``man 1 dyld`` for the list of default search paths.
# if libpath not in [os.path.expanduser('~/lib'), '/usr/local/lib', '/lib', '/usr/lib']:
# envvars['DYLD_FALLBACK_LIBRARY_PATH'] = libpath
# _logger.info('setting DYLD_FALLBACK_LIBRARY_PATH to %s', libpath)
# return subprocess.Popen(cmdline, env=envvars, *args, **kwargs)
. Output only the next line. | help='model input format (default: %default)') |
Given snippet: <|code_start|> # Output config
if output_config:
try:
with open(output_config, 'w') as f:
f.write(m.system.config)
except Exception as e:
raise JubaModelError('{0}: failed to write config'.format(output_config), e)
@classmethod
def start(cls, args):
USAGE = '''
jubamodel [--in-format IN_FORMAT] [--out-format OUT_FORMAT]
[--output OUTPUT] [--output-config OUTPUT_CONFIG]
[--transform TRANSFORM]
[--no-validate] [--fix-header] model_file
jubamodel --help'''
EPILOG = ' model_file input model file in format specified by --in-format'
parser = _JubaModelOptionParser(add_help_option=False, usage=USAGE, epilog=EPILOG)
# arguments
parser.add_option('-i', '--in-format', choices=('auto','binary','json'), default='auto',
help='model input format (default: %default)')
parser.add_option('-o', '--out-format', choices=('text','binary','json'), default='text',
help='model output format (default: %default)')
parser.add_option('-O', '--output', type='str', default=None,
help='specify output file instead of stdout')
parser.add_option('-C', '--output-config', type='str', default=None,
help='specify output file of config extracted from model')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import copy
import struct
import base64
import subprocess
import tempfile
import optparse
import msgpack
import json
from binascii import crc32
from io import BytesIO
from .compat import *
from ._stdio import print, printe, get_stdio
from ._process import JubaProcess
and context:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_process.py
# class JubaProcess(object):
# @classmethod
# def get_process(cls, cmdline, env=None, *args, **kwargs):
# """
# Returns subprocess.Popen instance.
# """
# envvars = env if env else dict(os.environ)
# if platform.system() == 'Darwin' and 'DYLD_FALLBACK_LIBRARY_PATH' not in envvars:
# """
# Due to homebrew-jubatus issue #15, when using Homebrew with locations other than
# the standard installation path (/usr/local), Jubatus processes built on OS X cannot
# be run without DYLD_FALLBACK_LIBRARY_PATH. However, on El Capitan or later,
# DYLD_FALLBACK_LIBRARY_PATH are not propagated from parent process. We workaround
# the problem by automatically estimating DYLD_FALLBACK_LIBRARY_PATH based on PATH.
# """
# cmdpath = distutils.spawn.find_executable(cmdline[0])
# if cmdpath is None:
# raise RuntimeError('{0} command not found; confirm that PATH is properly set'.format(cmdline[0]))
# libpath = os.sep.join(cmdpath.split(os.sep)[:-2] + ['lib'])
# if os.path.isfile(os.sep.join([libpath, 'libjubatus_core.dylib'])):
# # If the estimated libpath is already in the default DYLD_FALLBACK_LIBRARY_PATH,
# # we don't have to add it. See ``man 1 dyld`` for the list of default search paths.
# if libpath not in [os.path.expanduser('~/lib'), '/usr/local/lib', '/lib', '/usr/lib']:
# envvars['DYLD_FALLBACK_LIBRARY_PATH'] = libpath
# _logger.info('setting DYLD_FALLBACK_LIBRARY_PATH to %s', libpath)
# return subprocess.Popen(cmdline, env=envvars, *args, **kwargs)
which might include code, classes, or functions. Output only the next line. | parser.add_option('-T', '--transform', type='str', default=None, |
Predict the next line after this snippet: <|code_start|> loader = MergeChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
ArrayLoader([[0,1],[2,3],[4,5]], ['v3','v4']),
)
for row in loader:
self.assertEqual(set(['v1','v2','v3','v4']), set(row.keys()))
if row['v1'] == 0:
self.assertEqual(1, row['v2'])
self.assertEqual(0, row['v3'])
self.assertEqual(1, row['v4'])
elif row['v1'] ==2:
self.assertEqual(3, row['v2'])
self.assertEqual(2, row['v3'])
self.assertEqual(3, row['v4'])
elif row['v1'] == 4:
self.assertEqual(5, row['v2'])
self.assertEqual(4, row['v3'])
self.assertEqual(5, row['v4'])
else:
self.fail('unexpected row: {0}'.format(row))
class ValueMapChainLoaderTest(TestCase):
def test_simple(self):
loader = ValueMapChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
'v2',
{1: '_test1', 3: '_test3', 5: '_test5'}
)
for row in loader:
self.assertEqual(set(['v1','v2']), set(row.keys()))
<|code_end|>
using the current file's imports:
from unittest import TestCase
from jubakit.loader.array import ArrayLoader, ZipArrayLoader
from jubakit.loader.chain import MergeChainLoader, ValueMapChainLoader, ConcatLoader
and any relevant context from other files:
# Path: jubakit/loader/array.py
# class ArrayLoader(BaseLoader):
# """
# ArrayLoader is a loader to read from 2-d array.
# Expected to load row-oriented data.
#
# For example:
#
# >>> ArrayLoader([[1,2,3], [4,5,6]], ['k1','k2','k3'])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, array, feature_names=None):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(array[0]))]
#
# self._array = array
# self._feature_names = feature_names
#
# def rows(self):
# for ent in self._array:
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# class ZipArrayLoader(BaseLoader):
# """
# ZipArrayLoader zips multiple 1-d arrays that have the same length.
# Expected to load column-oriented data.
#
# For example:
#
# >>> ZipArrayLoader([[1,4], [2,5], [3,6]], ['k1','k2','k3'])
#
# ... or simply:
#
# >>> ZipArrayLoader(k1=[1,4], k2=[2,5], k3=[3,6])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, arrays=[], feature_names=None, **named_arrays):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(arrays))]
#
# if len(arrays) != len(feature_names):
# raise RuntimeError('number of arrays and feature names mismatch')
#
# self._feature_names = feature_names
# self._arrays = list(arrays)
# for name in named_arrays:
# self._feature_names.append(name)
# self._arrays.append(named_arrays[name])
#
# def rows(self):
# for ent in zip(*self._arrays):
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# Path: jubakit/loader/chain.py
# class MergeChainLoader(BaseLoader):
# """
# MergeChainLoader merges multiple loaders.
# """
#
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for ent in zip_longest(*self._loaders, fillvalue={}):
# merged = {}
# for d in ent:
# merged.update(d)
# yield merged
#
# class ValueMapChainLoader(BaseLoader):
# """
# ValueMapChainLoader is a loader to map value of the specified key in each
# record loaded from another loader.
# """
#
# def __init__(self, loader, key, mapping):
# self._loader = loader
# self._key = key
# self._mapping = mapping
#
# def rows(self):
# for ent in self._loader:
# ent[self._key] = self._mapping[ent[self._key]]
# yield ent
#
# class ConcatLoader(BaseLoader):
# """
# ConcatLoader is a loader to concat multiple loaders.
# """
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for loader in self._loaders:
# for row in loader.rows():
# yield row
. Output only the next line. | if row['v1'] == 0: |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class MergeChainLoaderTest(TestCase):
def test_simple(self):
loader = MergeChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
ArrayLoader([[0,1],[2,3],[4,5]], ['v3','v4']),
)
for row in loader:
self.assertEqual(set(['v1','v2','v3','v4']), set(row.keys()))
if row['v1'] == 0:
self.assertEqual(1, row['v2'])
self.assertEqual(0, row['v3'])
self.assertEqual(1, row['v4'])
elif row['v1'] ==2:
self.assertEqual(3, row['v2'])
self.assertEqual(2, row['v3'])
self.assertEqual(3, row['v4'])
elif row['v1'] == 4:
self.assertEqual(5, row['v2'])
self.assertEqual(4, row['v3'])
self.assertEqual(5, row['v4'])
else:
<|code_end|>
. Use current file imports:
(from unittest import TestCase
from jubakit.loader.array import ArrayLoader, ZipArrayLoader
from jubakit.loader.chain import MergeChainLoader, ValueMapChainLoader, ConcatLoader)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/loader/array.py
# class ArrayLoader(BaseLoader):
# """
# ArrayLoader is a loader to read from 2-d array.
# Expected to load row-oriented data.
#
# For example:
#
# >>> ArrayLoader([[1,2,3], [4,5,6]], ['k1','k2','k3'])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, array, feature_names=None):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(array[0]))]
#
# self._array = array
# self._feature_names = feature_names
#
# def rows(self):
# for ent in self._array:
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# class ZipArrayLoader(BaseLoader):
# """
# ZipArrayLoader zips multiple 1-d arrays that have the same length.
# Expected to load column-oriented data.
#
# For example:
#
# >>> ZipArrayLoader([[1,4], [2,5], [3,6]], ['k1','k2','k3'])
#
# ... or simply:
#
# >>> ZipArrayLoader(k1=[1,4], k2=[2,5], k3=[3,6])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, arrays=[], feature_names=None, **named_arrays):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(arrays))]
#
# if len(arrays) != len(feature_names):
# raise RuntimeError('number of arrays and feature names mismatch')
#
# self._feature_names = feature_names
# self._arrays = list(arrays)
# for name in named_arrays:
# self._feature_names.append(name)
# self._arrays.append(named_arrays[name])
#
# def rows(self):
# for ent in zip(*self._arrays):
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# Path: jubakit/loader/chain.py
# class MergeChainLoader(BaseLoader):
# """
# MergeChainLoader merges multiple loaders.
# """
#
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for ent in zip_longest(*self._loaders, fillvalue={}):
# merged = {}
# for d in ent:
# merged.update(d)
# yield merged
#
# class ValueMapChainLoader(BaseLoader):
# """
# ValueMapChainLoader is a loader to map value of the specified key in each
# record loaded from another loader.
# """
#
# def __init__(self, loader, key, mapping):
# self._loader = loader
# self._key = key
# self._mapping = mapping
#
# def rows(self):
# for ent in self._loader:
# ent[self._key] = self._mapping[ent[self._key]]
# yield ent
#
# class ConcatLoader(BaseLoader):
# """
# ConcatLoader is a loader to concat multiple loaders.
# """
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for loader in self._loaders:
# for row in loader.rows():
# yield row
. Output only the next line. | self.fail('unexpected row: {0}'.format(row)) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class MergeChainLoaderTest(TestCase):
def test_simple(self):
loader = MergeChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
ArrayLoader([[0,1],[2,3],[4,5]], ['v3','v4']),
)
for row in loader:
self.assertEqual(set(['v1','v2','v3','v4']), set(row.keys()))
if row['v1'] == 0:
self.assertEqual(1, row['v2'])
self.assertEqual(0, row['v3'])
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import TestCase
from jubakit.loader.array import ArrayLoader, ZipArrayLoader
from jubakit.loader.chain import MergeChainLoader, ValueMapChainLoader, ConcatLoader
and context:
# Path: jubakit/loader/array.py
# class ArrayLoader(BaseLoader):
# """
# ArrayLoader is a loader to read from 2-d array.
# Expected to load row-oriented data.
#
# For example:
#
# >>> ArrayLoader([[1,2,3], [4,5,6]], ['k1','k2','k3'])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, array, feature_names=None):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(array[0]))]
#
# self._array = array
# self._feature_names = feature_names
#
# def rows(self):
# for ent in self._array:
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# class ZipArrayLoader(BaseLoader):
# """
# ZipArrayLoader zips multiple 1-d arrays that have the same length.
# Expected to load column-oriented data.
#
# For example:
#
# >>> ZipArrayLoader([[1,4], [2,5], [3,6]], ['k1','k2','k3'])
#
# ... or simply:
#
# >>> ZipArrayLoader(k1=[1,4], k2=[2,5], k3=[3,6])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, arrays=[], feature_names=None, **named_arrays):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(arrays))]
#
# if len(arrays) != len(feature_names):
# raise RuntimeError('number of arrays and feature names mismatch')
#
# self._feature_names = feature_names
# self._arrays = list(arrays)
# for name in named_arrays:
# self._feature_names.append(name)
# self._arrays.append(named_arrays[name])
#
# def rows(self):
# for ent in zip(*self._arrays):
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# Path: jubakit/loader/chain.py
# class MergeChainLoader(BaseLoader):
# """
# MergeChainLoader merges multiple loaders.
# """
#
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for ent in zip_longest(*self._loaders, fillvalue={}):
# merged = {}
# for d in ent:
# merged.update(d)
# yield merged
#
# class ValueMapChainLoader(BaseLoader):
# """
# ValueMapChainLoader is a loader to map value of the specified key in each
# record loaded from another loader.
# """
#
# def __init__(self, loader, key, mapping):
# self._loader = loader
# self._key = key
# self._mapping = mapping
#
# def rows(self):
# for ent in self._loader:
# ent[self._key] = self._mapping[ent[self._key]]
# yield ent
#
# class ConcatLoader(BaseLoader):
# """
# ConcatLoader is a loader to concat multiple loaders.
# """
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for loader in self._loaders:
# for row in loader.rows():
# yield row
which might include code, classes, or functions. Output only the next line. | self.assertEqual(1, row['v4']) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class MergeChainLoaderTest(TestCase):
def test_simple(self):
loader = MergeChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
ArrayLoader([[0,1],[2,3],[4,5]], ['v3','v4']),
<|code_end|>
, predict the next line using imports from the current file:
from unittest import TestCase
from jubakit.loader.array import ArrayLoader, ZipArrayLoader
from jubakit.loader.chain import MergeChainLoader, ValueMapChainLoader, ConcatLoader
and context including class names, function names, and sometimes code from other files:
# Path: jubakit/loader/array.py
# class ArrayLoader(BaseLoader):
# """
# ArrayLoader is a loader to read from 2-d array.
# Expected to load row-oriented data.
#
# For example:
#
# >>> ArrayLoader([[1,2,3], [4,5,6]], ['k1','k2','k3'])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, array, feature_names=None):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(array[0]))]
#
# self._array = array
# self._feature_names = feature_names
#
# def rows(self):
# for ent in self._array:
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# class ZipArrayLoader(BaseLoader):
# """
# ZipArrayLoader zips multiple 1-d arrays that have the same length.
# Expected to load column-oriented data.
#
# For example:
#
# >>> ZipArrayLoader([[1,4], [2,5], [3,6]], ['k1','k2','k3'])
#
# ... or simply:
#
# >>> ZipArrayLoader(k1=[1,4], k2=[2,5], k3=[3,6])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, arrays=[], feature_names=None, **named_arrays):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(arrays))]
#
# if len(arrays) != len(feature_names):
# raise RuntimeError('number of arrays and feature names mismatch')
#
# self._feature_names = feature_names
# self._arrays = list(arrays)
# for name in named_arrays:
# self._feature_names.append(name)
# self._arrays.append(named_arrays[name])
#
# def rows(self):
# for ent in zip(*self._arrays):
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# Path: jubakit/loader/chain.py
# class MergeChainLoader(BaseLoader):
# """
# MergeChainLoader merges multiple loaders.
# """
#
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for ent in zip_longest(*self._loaders, fillvalue={}):
# merged = {}
# for d in ent:
# merged.update(d)
# yield merged
#
# class ValueMapChainLoader(BaseLoader):
# """
# ValueMapChainLoader is a loader to map value of the specified key in each
# record loaded from another loader.
# """
#
# def __init__(self, loader, key, mapping):
# self._loader = loader
# self._key = key
# self._mapping = mapping
#
# def rows(self):
# for ent in self._loader:
# ent[self._key] = self._mapping[ent[self._key]]
# yield ent
#
# class ConcatLoader(BaseLoader):
# """
# ConcatLoader is a loader to concat multiple loaders.
# """
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for loader in self._loaders:
# for row in loader.rows():
# yield row
. Output only the next line. | ) |
Continue the code snippet: <|code_start|>
class MergeChainLoaderTest(TestCase):
def test_simple(self):
loader = MergeChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
ArrayLoader([[0,1],[2,3],[4,5]], ['v3','v4']),
)
for row in loader:
self.assertEqual(set(['v1','v2','v3','v4']), set(row.keys()))
if row['v1'] == 0:
self.assertEqual(1, row['v2'])
self.assertEqual(0, row['v3'])
self.assertEqual(1, row['v4'])
elif row['v1'] ==2:
self.assertEqual(3, row['v2'])
self.assertEqual(2, row['v3'])
self.assertEqual(3, row['v4'])
elif row['v1'] == 4:
self.assertEqual(5, row['v2'])
self.assertEqual(4, row['v3'])
self.assertEqual(5, row['v4'])
else:
self.fail('unexpected row: {0}'.format(row))
class ValueMapChainLoaderTest(TestCase):
def test_simple(self):
loader = ValueMapChainLoader(
ArrayLoader([[0,1],[2,3],[4,5]], ['v1','v2']),
'v2',
<|code_end|>
. Use current file imports:
from unittest import TestCase
from jubakit.loader.array import ArrayLoader, ZipArrayLoader
from jubakit.loader.chain import MergeChainLoader, ValueMapChainLoader, ConcatLoader
and context (classes, functions, or code) from other files:
# Path: jubakit/loader/array.py
# class ArrayLoader(BaseLoader):
# """
# ArrayLoader is a loader to read from 2-d array.
# Expected to load row-oriented data.
#
# For example:
#
# >>> ArrayLoader([[1,2,3], [4,5,6]], ['k1','k2','k3'])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, array, feature_names=None):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(array[0]))]
#
# self._array = array
# self._feature_names = feature_names
#
# def rows(self):
# for ent in self._array:
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# class ZipArrayLoader(BaseLoader):
# """
# ZipArrayLoader zips multiple 1-d arrays that have the same length.
# Expected to load column-oriented data.
#
# For example:
#
# >>> ZipArrayLoader([[1,4], [2,5], [3,6]], ['k1','k2','k3'])
#
# ... or simply:
#
# >>> ZipArrayLoader(k1=[1,4], k2=[2,5], k3=[3,6])
#
# ... will load two entries:
#
# - {'k1': 1, 'k2': 2, 'k3': 3}
# - {'k1': 4, 'k2': 5, 'k3': 6}
# """
#
# def __init__(self, arrays=[], feature_names=None, **named_arrays):
# if feature_names is None:
# feature_names = ['v{0}'.format(i) for i in range(len(arrays))]
#
# if len(arrays) != len(feature_names):
# raise RuntimeError('number of arrays and feature names mismatch')
#
# self._feature_names = feature_names
# self._arrays = list(arrays)
# for name in named_arrays:
# self._feature_names.append(name)
# self._arrays.append(named_arrays[name])
#
# def rows(self):
# for ent in zip(*self._arrays):
# yield dict([x for x in zip(self._feature_names, ent) if x[1] is not None])
#
# Path: jubakit/loader/chain.py
# class MergeChainLoader(BaseLoader):
# """
# MergeChainLoader merges multiple loaders.
# """
#
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for ent in zip_longest(*self._loaders, fillvalue={}):
# merged = {}
# for d in ent:
# merged.update(d)
# yield merged
#
# class ValueMapChainLoader(BaseLoader):
# """
# ValueMapChainLoader is a loader to map value of the specified key in each
# record loaded from another loader.
# """
#
# def __init__(self, loader, key, mapping):
# self._loader = loader
# self._key = key
# self._mapping = mapping
#
# def rows(self):
# for ent in self._loader:
# ent[self._key] = self._mapping[ent[self._key]]
# yield ent
#
# class ConcatLoader(BaseLoader):
# """
# ConcatLoader is a loader to concat multiple loaders.
# """
# def __init__(self, *loaders):
# self._loaders = loaders
#
# def rows(self):
# for loader in self._loaders:
# for row in loader.rows():
# yield row
. Output only the next line. | {1: '_test1', 3: '_test3', 5: '_test5'} |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class NearestNeighborCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from jubatus.nearest_neighbor.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional, TDatum
from ..util import *
from ..._stdio import print
and context:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
which might include code, classes, or functions. Output only the next line. | return 'nearest_neighbor' |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class GraphCLI(GenericCLI):
@classmethod
<|code_end|>
. Use current file imports:
from jubatus.graph.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional, TProperty
from ..util import *
from ..._stdio import print
and context (classes, functions, or code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# class TProperty(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last property key ({0}) is missing'.format(args[len(args) - 1]))
# p = {}
# for i in range(int(len(args) / 2)):
# p[args[i*2]] = args[i*2+1]
# return (len(args), p)
#
# def min_max(self):
# return (2, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _name(cls): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class GraphCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
with the help of current file imports:
from jubatus.graph.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional, TProperty
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# class TProperty(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last property key ({0}) is missing'.format(args[len(args) - 1]))
# p = {}
# for i in range(int(len(args) / 2)):
# p[args[i*2]] = args[i*2+1]
# return (len(args), p)
#
# def min_max(self):
# return (2, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may contain function names, class names, or code. Output only the next line. | return 'graph' |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class GraphCLI(GenericCLI):
@classmethod
def _name(cls):
return 'graph'
@Arguments()
<|code_end|>
. Write the next line using the current file imports:
from jubatus.graph.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional, TProperty
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# class TProperty(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last property key ({0}) is missing'.format(args[len(args) - 1]))
# p = {}
# for i in range(int(len(args) / 2)):
# p[args[i*2]] = args[i*2+1]
# return (len(args), p)
#
# def min_max(self):
# return (2, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may include functions, classes, or code. Output only the next line. | def do_create_node(self): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class StatCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
, generate the next line using the imports in this file:
from jubatus.stat.types import *
from .generic import GenericCLI
from ..args import Arguments
from ..util import *
from ..._stdio import print
and context (functions, classes, or occasionally code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | return 'stat' |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class StatCLI(GenericCLI):
@classmethod
def _name(cls):
return 'stat'
@Arguments(str, float)
<|code_end|>
. Use current file imports:
(from jubatus.stat.types import *
from .generic import GenericCLI
from ..args import Arguments
from ..util import *
from ..._stdio import print)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def do_push(self, key, value): |
Predict the next line for this snippet: <|code_start|> USAGE = '''
jubash [--host HOST] [--port PORT] [--cluster CLUSTER]
[--service SERVICE] [--command COMMAND]
[--keepalive] [--fail-fast] [--prompt PROMPT]
[--verbose] [--debug] [--help] [script ...]'''
EPILOG = ' script ... execute shell script instead of interactive shell'
services = sorted(JubaShell.get_cli_classes().keys())
parser = _JubashOptionParser(add_help_option=False, usage=USAGE, epilog=EPILOG)
parser.add_option('-H', '--host', type='string', default='127.0.0.1',
help='host name or IP address of the server / proxy (default: %default)')
parser.add_option('-P', '--port', type='int', default=9199,
help='port number of the server / proxy (default: %default)')
parser.add_option('-C', '--cluster', type='string', default='',
help='cluster name; only required when connecting to proxy')
parser.add_option('-s', '--service', type='string', default=None,
help='type of the server; see below for services available (default: auto-detect)')
parser.add_option('-e', '--engine', type='string', default=None,
help='(deprecated) equivalent to --service')
parser.add_option('-c', '--command', type='string', default=None,
help='run one-shot command instead of interactive shell')
parser.add_option('-t', '--timeout', type='int', default=10,
help='client-side timeout in seconds (default: %default)')
parser.add_option('-k', '--keepalive', default=False, action='store_true',
help='use keep-alive connection; recommended when server-side timeout is disabled')
parser.add_option('-F', '--fail-fast', default=False, action='store_true',
help='exit on error when running script')
parser.add_option('-p', '--prompt', type='string', default=JubaShell._PS,
<|code_end|>
with the help of current file imports:
import sys
import socket
import optparse
import msgpackrpc
import jubatus
from .compat import *
from ._stdio import print, get_stdio
from ._cli.base import BaseCLI, CLIInvalidatedException, CLIUnknownCommandException
and context from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_cli/base.py
# class BaseCLI(ExtendedCmd):
# """
# A base class for CLI interface of all services.
# """
#
# def __init__(self, shell, *args, **kwargs):
# super(BaseCLI, self).__init__(*args, **kwargs)
#
# self._sh = shell
#
# # Set help sentences.
# self.doc_header = "Commands:"
# self.undoc_header = "Commands (no help available):"
# self.misc_header = "Documents:"
# self.nohelp = "No help available for %s."
#
# # Register aliases.
# self.register_alias('EOF', 'do_exit')
# self.register_alias('ls', 'do_help')
# self.register_alias('shell', 'shell_command')
#
# #################################################################
# # Override methods to tweak CLI behavior
# #################################################################
#
# def emptyline(self):
# """
# By default, empty line causes the previous command to run again.
# This overrides the default handler for emptyline so it behaves like an usual shell.
# """
# pass
#
# def postcmd(self, stop, line):
# """
# After a single command is executed, we discard the connection if not in
# keepalive mode.
# """
# if not self._sh._keepalive:
# self._sh.disconnect()
# self._sh.connect()
# return stop
#
# def default(self, line):
# """
# Raise exception for unhandled commands.
# """
# raise CLIUnknownCommandException(line)
#
# #################################################################
# # Common interfaces for Jubatus CLI
# #################################################################
#
# @classmethod
# def _name(cls):
# """
# Returns the name of the service (e.g., `classifier`).
# You must override this in subclasses.
# """
# raise NotImplementedError
#
# def _verbose(self, msg):
# """
# Outputs logs only when in verbose mode.
# """
# if self._sh._verbose:
# print(msg)
#
# @property
# def client(self):
# """
# Returns the client instance.
# """
# return self._sh.get_client()
#
# #################################################################
# # Built-in shell commands
# #################################################################
#
# @Arguments()
# def do_exit(self):
# """Syntax: exit
# Exits the shell. You can also use EOF (Ctrl-D).
# """
# print()
# return True
#
# def help_help(self):
# print(
# """Syntax: help [command]
# Displays the list of commands available.
# If ``command`` is specified, displays the help for the command."""
# )
#
# def shell_command(self, param):
# """
# Runs the command in the *real* shell.
# """
# subprocess.call(param, shell=True)
#
# class CLIInvalidatedException(Exception):
# """
# Notify Shell to regenerate CLI instance.
# """
# pass
#
# class CLIUnknownCommandException(Exception):
# """
# Notify Shell that unknown command is specified.
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | help='use customized shell prompt (default: %default)') |
Given the code snippet: <|code_start|> print('Jubash - Jubatus Shell')
print()
parser.print_help(get_stdio()[1]) # stdout
print()
print('Available Services:')
print(' {0}'.format(', '.join(services)))
(args, scripts) = parser.parse_args(args)
# Failed to parse options.
if parser._error:
print_usage()
return 2
# Help option is specified.
if args.help:
print_usage()
return 0
# Support for deprecated parameters.
if args.service is None:
args.service = args.engine
# Validate parameters.
if args.port < 1 or 65535 < args.port:
print('Error: port number out of range: {0}'.format(args.port))
print_usage()
return 1
if args.service is not None and args.service not in services:
print('Error: unknown service name: {0}'.format(args.service))
<|code_end|>
, generate the next line using the imports in this file:
import sys
import socket
import optparse
import msgpackrpc
import jubatus
from .compat import *
from ._stdio import print, get_stdio
from ._cli.base import BaseCLI, CLIInvalidatedException, CLIUnknownCommandException
and context (functions, classes, or occasionally code) from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_cli/base.py
# class BaseCLI(ExtendedCmd):
# """
# A base class for CLI interface of all services.
# """
#
# def __init__(self, shell, *args, **kwargs):
# super(BaseCLI, self).__init__(*args, **kwargs)
#
# self._sh = shell
#
# # Set help sentences.
# self.doc_header = "Commands:"
# self.undoc_header = "Commands (no help available):"
# self.misc_header = "Documents:"
# self.nohelp = "No help available for %s."
#
# # Register aliases.
# self.register_alias('EOF', 'do_exit')
# self.register_alias('ls', 'do_help')
# self.register_alias('shell', 'shell_command')
#
# #################################################################
# # Override methods to tweak CLI behavior
# #################################################################
#
# def emptyline(self):
# """
# By default, empty line causes the previous command to run again.
# This overrides the default handler for emptyline so it behaves like an usual shell.
# """
# pass
#
# def postcmd(self, stop, line):
# """
# After a single command is executed, we discard the connection if not in
# keepalive mode.
# """
# if not self._sh._keepalive:
# self._sh.disconnect()
# self._sh.connect()
# return stop
#
# def default(self, line):
# """
# Raise exception for unhandled commands.
# """
# raise CLIUnknownCommandException(line)
#
# #################################################################
# # Common interfaces for Jubatus CLI
# #################################################################
#
# @classmethod
# def _name(cls):
# """
# Returns the name of the service (e.g., `classifier`).
# You must override this in subclasses.
# """
# raise NotImplementedError
#
# def _verbose(self, msg):
# """
# Outputs logs only when in verbose mode.
# """
# if self._sh._verbose:
# print(msg)
#
# @property
# def client(self):
# """
# Returns the client instance.
# """
# return self._sh.get_client()
#
# #################################################################
# # Built-in shell commands
# #################################################################
#
# @Arguments()
# def do_exit(self):
# """Syntax: exit
# Exits the shell. You can also use EOF (Ctrl-D).
# """
# print()
# return True
#
# def help_help(self):
# print(
# """Syntax: help [command]
# Displays the list of commands available.
# If ``command`` is specified, displays the help for the command."""
# )
#
# def shell_command(self, param):
# """
# Runs the command in the *real* shell.
# """
# subprocess.call(param, shell=True)
#
# class CLIInvalidatedException(Exception):
# """
# Notify Shell to regenerate CLI instance.
# """
# pass
#
# class CLIUnknownCommandException(Exception):
# """
# Notify Shell that unknown command is specified.
# """
# pass
. Output only the next line. | print_usage() |
Predict the next line after this snippet: <|code_start|>
def print_usage():
print('Jubash - Jubatus Shell')
print()
parser.print_help(get_stdio()[1]) # stdout
print()
print('Available Services:')
print(' {0}'.format(', '.join(services)))
(args, scripts) = parser.parse_args(args)
# Failed to parse options.
if parser._error:
print_usage()
return 2
# Help option is specified.
if args.help:
print_usage()
return 0
# Support for deprecated parameters.
if args.service is None:
args.service = args.engine
# Validate parameters.
if args.port < 1 or 65535 < args.port:
print('Error: port number out of range: {0}'.format(args.port))
print_usage()
return 1
<|code_end|>
using the current file's imports:
import sys
import socket
import optparse
import msgpackrpc
import jubatus
from .compat import *
from ._stdio import print, get_stdio
from ._cli.base import BaseCLI, CLIInvalidatedException, CLIUnknownCommandException
and any relevant context from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_cli/base.py
# class BaseCLI(ExtendedCmd):
# """
# A base class for CLI interface of all services.
# """
#
# def __init__(self, shell, *args, **kwargs):
# super(BaseCLI, self).__init__(*args, **kwargs)
#
# self._sh = shell
#
# # Set help sentences.
# self.doc_header = "Commands:"
# self.undoc_header = "Commands (no help available):"
# self.misc_header = "Documents:"
# self.nohelp = "No help available for %s."
#
# # Register aliases.
# self.register_alias('EOF', 'do_exit')
# self.register_alias('ls', 'do_help')
# self.register_alias('shell', 'shell_command')
#
# #################################################################
# # Override methods to tweak CLI behavior
# #################################################################
#
# def emptyline(self):
# """
# By default, empty line causes the previous command to run again.
# This overrides the default handler for emptyline so it behaves like an usual shell.
# """
# pass
#
# def postcmd(self, stop, line):
# """
# After a single command is executed, we discard the connection if not in
# keepalive mode.
# """
# if not self._sh._keepalive:
# self._sh.disconnect()
# self._sh.connect()
# return stop
#
# def default(self, line):
# """
# Raise exception for unhandled commands.
# """
# raise CLIUnknownCommandException(line)
#
# #################################################################
# # Common interfaces for Jubatus CLI
# #################################################################
#
# @classmethod
# def _name(cls):
# """
# Returns the name of the service (e.g., `classifier`).
# You must override this in subclasses.
# """
# raise NotImplementedError
#
# def _verbose(self, msg):
# """
# Outputs logs only when in verbose mode.
# """
# if self._sh._verbose:
# print(msg)
#
# @property
# def client(self):
# """
# Returns the client instance.
# """
# return self._sh.get_client()
#
# #################################################################
# # Built-in shell commands
# #################################################################
#
# @Arguments()
# def do_exit(self):
# """Syntax: exit
# Exits the shell. You can also use EOF (Ctrl-D).
# """
# print()
# return True
#
# def help_help(self):
# print(
# """Syntax: help [command]
# Displays the list of commands available.
# If ``command`` is specified, displays the help for the command."""
# )
#
# def shell_command(self, param):
# """
# Runs the command in the *real* shell.
# """
# subprocess.call(param, shell=True)
#
# class CLIInvalidatedException(Exception):
# """
# Notify Shell to regenerate CLI instance.
# """
# pass
#
# class CLIUnknownCommandException(Exception):
# """
# Notify Shell that unknown command is specified.
# """
# pass
. Output only the next line. | if args.service is not None and args.service not in services: |
Next line prediction: <|code_start|> # Create shell instance.
shell = JubaShell(
host=args.host,
port=args.port,
cluster=args.cluster,
service=args.service,
timeout=args.timeout,
keepalive=args.keepalive,
verbose=args.verbose,
prompt=args.prompt,
)
# Run the shell.
if args.command:
# One-shot command mode.
success = shell.run(args.command)
elif len(scripts) != 0:
# Batch script mode.
for script in scripts:
success = True
# TODO improve handling of lines and support keepalive mode
for line in open(script, 'r'):
line = line.rstrip()
if line and not line.startswith('#'):
success = shell.run(line)
if not success and args.fail_fast: break
else:
# Interactive shell mode.
success = shell.interact()
except Exception as e:
<|code_end|>
. Use current file imports:
(import sys
import socket
import optparse
import msgpackrpc
import jubatus
from .compat import *
from ._stdio import print, get_stdio
from ._cli.base import BaseCLI, CLIInvalidatedException, CLIUnknownCommandException)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_cli/base.py
# class BaseCLI(ExtendedCmd):
# """
# A base class for CLI interface of all services.
# """
#
# def __init__(self, shell, *args, **kwargs):
# super(BaseCLI, self).__init__(*args, **kwargs)
#
# self._sh = shell
#
# # Set help sentences.
# self.doc_header = "Commands:"
# self.undoc_header = "Commands (no help available):"
# self.misc_header = "Documents:"
# self.nohelp = "No help available for %s."
#
# # Register aliases.
# self.register_alias('EOF', 'do_exit')
# self.register_alias('ls', 'do_help')
# self.register_alias('shell', 'shell_command')
#
# #################################################################
# # Override methods to tweak CLI behavior
# #################################################################
#
# def emptyline(self):
# """
# By default, empty line causes the previous command to run again.
# This overrides the default handler for emptyline so it behaves like an usual shell.
# """
# pass
#
# def postcmd(self, stop, line):
# """
# After a single command is executed, we discard the connection if not in
# keepalive mode.
# """
# if not self._sh._keepalive:
# self._sh.disconnect()
# self._sh.connect()
# return stop
#
# def default(self, line):
# """
# Raise exception for unhandled commands.
# """
# raise CLIUnknownCommandException(line)
#
# #################################################################
# # Common interfaces for Jubatus CLI
# #################################################################
#
# @classmethod
# def _name(cls):
# """
# Returns the name of the service (e.g., `classifier`).
# You must override this in subclasses.
# """
# raise NotImplementedError
#
# def _verbose(self, msg):
# """
# Outputs logs only when in verbose mode.
# """
# if self._sh._verbose:
# print(msg)
#
# @property
# def client(self):
# """
# Returns the client instance.
# """
# return self._sh.get_client()
#
# #################################################################
# # Built-in shell commands
# #################################################################
#
# @Arguments()
# def do_exit(self):
# """Syntax: exit
# Exits the shell. You can also use EOF (Ctrl-D).
# """
# print()
# return True
#
# def help_help(self):
# print(
# """Syntax: help [command]
# Displays the list of commands available.
# If ``command`` is specified, displays the help for the command."""
# )
#
# def shell_command(self, param):
# """
# Runs the command in the *real* shell.
# """
# subprocess.call(param, shell=True)
#
# class CLIInvalidatedException(Exception):
# """
# Notify Shell to regenerate CLI instance.
# """
# pass
#
# class CLIUnknownCommandException(Exception):
# """
# Notify Shell that unknown command is specified.
# """
# pass
. Output only the next line. | if args.debug: raise |
Next line prediction: <|code_start|> def start(cls, args):
USAGE = '''
jubash [--host HOST] [--port PORT] [--cluster CLUSTER]
[--service SERVICE] [--command COMMAND]
[--keepalive] [--fail-fast] [--prompt PROMPT]
[--verbose] [--debug] [--help] [script ...]'''
EPILOG = ' script ... execute shell script instead of interactive shell'
services = sorted(JubaShell.get_cli_classes().keys())
parser = _JubashOptionParser(add_help_option=False, usage=USAGE, epilog=EPILOG)
parser.add_option('-H', '--host', type='string', default='127.0.0.1',
help='host name or IP address of the server / proxy (default: %default)')
parser.add_option('-P', '--port', type='int', default=9199,
help='port number of the server / proxy (default: %default)')
parser.add_option('-C', '--cluster', type='string', default='',
help='cluster name; only required when connecting to proxy')
parser.add_option('-s', '--service', type='string', default=None,
help='type of the server; see below for services available (default: auto-detect)')
parser.add_option('-e', '--engine', type='string', default=None,
help='(deprecated) equivalent to --service')
parser.add_option('-c', '--command', type='string', default=None,
help='run one-shot command instead of interactive shell')
parser.add_option('-t', '--timeout', type='int', default=10,
help='client-side timeout in seconds (default: %default)')
parser.add_option('-k', '--keepalive', default=False, action='store_true',
help='use keep-alive connection; recommended when server-side timeout is disabled')
parser.add_option('-F', '--fail-fast', default=False, action='store_true',
help='exit on error when running script')
<|code_end|>
. Use current file imports:
(import sys
import socket
import optparse
import msgpackrpc
import jubatus
from .compat import *
from ._stdio import print, get_stdio
from ._cli.base import BaseCLI, CLIInvalidatedException, CLIUnknownCommandException)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/_stdio.py
# class _StandardIO(object):
# def get(cls):
# def set(cls, stdin=None, stdout=None, stderr=None):
# def print(cls, *args, **kwargs):
# def printe(cls, *args, **kwargs):
# def devnull(cls):
#
# Path: jubakit/_cli/base.py
# class BaseCLI(ExtendedCmd):
# """
# A base class for CLI interface of all services.
# """
#
# def __init__(self, shell, *args, **kwargs):
# super(BaseCLI, self).__init__(*args, **kwargs)
#
# self._sh = shell
#
# # Set help sentences.
# self.doc_header = "Commands:"
# self.undoc_header = "Commands (no help available):"
# self.misc_header = "Documents:"
# self.nohelp = "No help available for %s."
#
# # Register aliases.
# self.register_alias('EOF', 'do_exit')
# self.register_alias('ls', 'do_help')
# self.register_alias('shell', 'shell_command')
#
# #################################################################
# # Override methods to tweak CLI behavior
# #################################################################
#
# def emptyline(self):
# """
# By default, empty line causes the previous command to run again.
# This overrides the default handler for emptyline so it behaves like an usual shell.
# """
# pass
#
# def postcmd(self, stop, line):
# """
# After a single command is executed, we discard the connection if not in
# keepalive mode.
# """
# if not self._sh._keepalive:
# self._sh.disconnect()
# self._sh.connect()
# return stop
#
# def default(self, line):
# """
# Raise exception for unhandled commands.
# """
# raise CLIUnknownCommandException(line)
#
# #################################################################
# # Common interfaces for Jubatus CLI
# #################################################################
#
# @classmethod
# def _name(cls):
# """
# Returns the name of the service (e.g., `classifier`).
# You must override this in subclasses.
# """
# raise NotImplementedError
#
# def _verbose(self, msg):
# """
# Outputs logs only when in verbose mode.
# """
# if self._sh._verbose:
# print(msg)
#
# @property
# def client(self):
# """
# Returns the client instance.
# """
# return self._sh.get_client()
#
# #################################################################
# # Built-in shell commands
# #################################################################
#
# @Arguments()
# def do_exit(self):
# """Syntax: exit
# Exits the shell. You can also use EOF (Ctrl-D).
# """
# print()
# return True
#
# def help_help(self):
# print(
# """Syntax: help [command]
# Displays the list of commands available.
# If ``command`` is specified, displays the help for the command."""
# )
#
# def shell_command(self, param):
# """
# Runs the command in the *real* shell.
# """
# subprocess.call(param, shell=True)
#
# class CLIInvalidatedException(Exception):
# """
# Notify Shell to regenerate CLI instance.
# """
# pass
#
# class CLIUnknownCommandException(Exception):
# """
# Notify Shell that unknown command is specified.
# """
# pass
. Output only the next line. | parser.add_option('-p', '--prompt', type='string', default=JubaShell._PS, |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class RegressionCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
, predict the next line using imports from the current file:
from jubatus.regression.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context including class names, function names, and sometimes code from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | return 'regression' |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class RegressionCLI(GenericCLI):
@classmethod
<|code_end|>
using the current file's imports:
from jubatus.regression.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and any relevant context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _name(cls): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class RegressionCLI(GenericCLI):
@classmethod
def _name(cls):
return 'regression'
@Arguments(float, TDatum)
<|code_end|>
. Write the next line using the current file imports:
from jubatus.regression.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may include functions, classes, or code. Output only the next line. | def do_train(self, score, d): |
Continue the code snippet: <|code_start|> connection.commit()
connection.close()
def test_simple(self):
loader = PostgreSQLoader(self.auth, table='test')
for row in loader:
self.assertEqual(set(['id','num', 'data']), set(row.keys()))
if row['id'] == 1:
self.assertEqual(100, row['num'])
self.assertEqual('abcdef', row['data'])
elif row['id'] == 2:
self.assertEqual(200, row['num'])
self.assertEqual('ghijkl', row['data'])
elif row['id'] == 3:
self.assertEqual(300, row['num'])
self.assertEqual('mnopqr', row['data'])
else:
self.fail('unexpected row: {0}'.format(row))
def test_specify_dbname(self):
self.auth = PostgreSQLAuthHandler(dbname='test', user='postgres', password='postgres', host='localhost', port='5432')
loader = PostgreSQLoader(self.auth, table='test2')
for row in loader:
self.assertEqual(set(['id','num', 'data']), set(row.keys()))
if row['id'] == 1:
self.assertEqual(1000, row['num'])
self.assertEqual('st', row['data'])
elif row['id'] == 2:
self.assertEqual(2000, row['num'])
self.assertEqual('uv', row['data'])
<|code_end|>
. Use current file imports:
from unittest import TestCase
from jubakit.loader.postgresql import PostgreSQLoader
from jubakit.loader.postgresql import PostgreSQLAuthHandler
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import psycopg2
and context (classes, functions, or code) from other files:
# Path: jubakit/loader/postgresql.py
# class PostgreSQLoader(BaseLoader):
# """
# Loader to process columns of PostgreSQL.
#
# This loader that load data from PostgreSQL's table as below.
# We access the "test" table of the "test" database in the below example.
#
# Example:
# from jubakit.loader.postgresql import PostgreSQLoader
# from jubakit.loader.postgresql import PostgreSQLAuthHandler
#
# auth = PostgreSQLAuthHandler(dbname='test', user='postgres', password='postgres', host='localhost', port='5432')
#
# loader = PostgreSQLoader(auth, table='test')
# for row in loader:
# print(row)
#
# # {'id': 1, 'num': 100, 'data': 'abcdef'}
# # {'id': 2, 'num': 200, 'data': 'ghijkl'}
# # {'id': 3, 'num': 300, 'data': 'mnopqr'}
# """
#
# def __init__(self, auth, table, **kwargs):
# self.auth = auth
# self.table = table
# self.kwargs = kwargs
#
# def rows(self):
# with connect(self.auth.get()) as connection:
# with connection.cursor(cursor_factory=DictCursor) as cursor:
# cursor.execute(
# sql.SQL("SELECT * FROM {}")
# .format(sql.Identifier(self.table)))
# column_names = [column.name for column in cursor.description]
#
# for row in cursor:
# yield dict(zip(column_names, row))
#
# Path: jubakit/loader/postgresql.py
# class PostgreSQLAuthHandler(object):
# """
# Handles authentication required to access PostgreSQL.
# """
#
# def __init__(self, **kwargs):
# """
# Authentication information must be specified as follows:
#
# >>> PostgreSQLAuth(
# ... user='XXXXXXXXXXXXXXXXXXXX',
# ... password='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
# ... host='XXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
# ... port='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
# ... )
#
# Any other connection parameter supported by this loader can be passed as a keyword.
# The complete list of the supported parameters are contained the PostgreSQL documentation.
# (https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
# """
# auth = ''
# auth_informations = (
# 'host', 'hostaddr',
# 'port',
# 'dbname',
# 'user',
# 'password', 'passfile',
# 'connect_timeout',
# 'client_encoding',
# 'options',
# 'application_name',
# 'fallback_application_name',
# 'keepalives', 'keepalives_idle', 'keepalives_interval', 'keepalives_count',
# 'tty',
# 'sslmode', 'requiressl', 'sslcompression', 'sslcert', 'sslkey', 'sslrootcert', 'sslcrl',
# 'requirepeer',
# 'krbsrvname',
# 'gsslib',
# 'service',
# 'target_session_attrs')
# for auth_key in auth_informations:
# if auth_key in kwargs:
# auth = auth + '%s=%s ' % (auth_key, kwargs[auth_key])
# self.auth = auth
#
# def get(self):
# return self.auth
. Output only the next line. | elif row['id'] == 3: |
Using the snippet: <|code_start|>
from __future__ import absolute_import, division, print_function, unicode_literals
class PostgreSQLoaderTest(TestCase):
auth = PostgreSQLAuthHandler(user='postgres', password='postgres', host='localhost', port='5432')
def setUp(self):
print("setUp")
connection = psycopg2.connect("host=localhost port=5432 user=postgres password=postgres")
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = connection.cursor()
cursor.execute("DROP DATABASE IF EXISTS test;")
cursor.execute("CREATE DATABASE test;")
cursor.execute("DROP TABLE IF EXISTS test;")
cursor.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);")
cursor.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (100, "abcdef"))
cursor.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (200, "ghijkl"))
cursor.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (300, "mnopqr"))
connection.commit()
connection.close()
connection = psycopg2.connect("dbname=test host=localhost port=5432 user=postgres password=postgres")
cursor = connection.cursor()
cursor.execute("DROP TABLE IF EXISTS test2;")
<|code_end|>
, determine the next line of code. You have imports:
from unittest import TestCase
from jubakit.loader.postgresql import PostgreSQLoader
from jubakit.loader.postgresql import PostgreSQLAuthHandler
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import psycopg2
and context (class names, function names, or code) available:
# Path: jubakit/loader/postgresql.py
# class PostgreSQLoader(BaseLoader):
# """
# Loader to process columns of PostgreSQL.
#
# This loader that load data from PostgreSQL's table as below.
# We access the "test" table of the "test" database in the below example.
#
# Example:
# from jubakit.loader.postgresql import PostgreSQLoader
# from jubakit.loader.postgresql import PostgreSQLAuthHandler
#
# auth = PostgreSQLAuthHandler(dbname='test', user='postgres', password='postgres', host='localhost', port='5432')
#
# loader = PostgreSQLoader(auth, table='test')
# for row in loader:
# print(row)
#
# # {'id': 1, 'num': 100, 'data': 'abcdef'}
# # {'id': 2, 'num': 200, 'data': 'ghijkl'}
# # {'id': 3, 'num': 300, 'data': 'mnopqr'}
# """
#
# def __init__(self, auth, table, **kwargs):
# self.auth = auth
# self.table = table
# self.kwargs = kwargs
#
# def rows(self):
# with connect(self.auth.get()) as connection:
# with connection.cursor(cursor_factory=DictCursor) as cursor:
# cursor.execute(
# sql.SQL("SELECT * FROM {}")
# .format(sql.Identifier(self.table)))
# column_names = [column.name for column in cursor.description]
#
# for row in cursor:
# yield dict(zip(column_names, row))
#
# Path: jubakit/loader/postgresql.py
# class PostgreSQLAuthHandler(object):
# """
# Handles authentication required to access PostgreSQL.
# """
#
# def __init__(self, **kwargs):
# """
# Authentication information must be specified as follows:
#
# >>> PostgreSQLAuth(
# ... user='XXXXXXXXXXXXXXXXXXXX',
# ... password='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
# ... host='XXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
# ... port='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
# ... )
#
# Any other connection parameter supported by this loader can be passed as a keyword.
# The complete list of the supported parameters are contained the PostgreSQL documentation.
# (https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
# """
# auth = ''
# auth_informations = (
# 'host', 'hostaddr',
# 'port',
# 'dbname',
# 'user',
# 'password', 'passfile',
# 'connect_timeout',
# 'client_encoding',
# 'options',
# 'application_name',
# 'fallback_application_name',
# 'keepalives', 'keepalives_idle', 'keepalives_interval', 'keepalives_count',
# 'tty',
# 'sslmode', 'requiressl', 'sslcompression', 'sslcert', 'sslkey', 'sslrootcert', 'sslcrl',
# 'requirepeer',
# 'krbsrvname',
# 'gsslib',
# 'service',
# 'target_session_attrs')
# for auth_key in auth_informations:
# if auth_key in kwargs:
# auth = auth + '%s=%s ' % (auth_key, kwargs[auth_key])
# self.auth = auth
#
# def get(self):
# return self.auth
. Output only the next line. | cursor.execute("CREATE TABLE test2 (id serial PRIMARY KEY, num integer, data varchar);") |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class WeightCLI(GenericCLI):
@classmethod
<|code_end|>
, determine the next line of code. You have imports:
from jubatus.weight.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context (class names, function names, or code) available:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _name(cls): |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class WeightCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
. Use current file imports:
(from jubatus.weight.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | return 'weight' |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class BanditCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
. Write the next line using the current file imports:
from jubatus.bandit.types import *
from .generic import GenericCLI
from ..args import Arguments
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may include functions, classes, or code. Output only the next line. | return 'bandit' |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class BanditCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from jubatus.bandit.types import *
from .generic import GenericCLI
from ..args import Arguments
from ..util import *
from ..._stdio import print
and context:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
which might include code, classes, or functions. Output only the next line. | return 'bandit' |
Continue the code snippet: <|code_start|># This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
<|code_end|>
. Use current file imports:
from jubakit._version import VERSION
and context (classes, functions, or code) from other files:
# Path: jubakit/_version.py
# VERSION = (0, 6, 2)
. Output only the next line. | 'sphinx.ext.viewcode', |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class BurstCLI(GenericCLI):
@classmethod
def _name(cls):
return 'burst'
def _clear_cache(self):
<|code_end|>
, generate the next line using the imports in this file:
import time
from jubatus.burst.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional
from ..util import *
from ..._stdio import print
and context (functions, classes, or occasionally code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | self._cached_keywords = set() |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class BurstCLI(GenericCLI):
@classmethod
def _name(cls):
return 'burst'
<|code_end|>
. Use current file imports:
(import time
from jubatus.burst.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional
from ..util import *
from ..._stdio import print)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _clear_cache(self): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class BurstCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
, generate the next line using the imports in this file:
import time
from jubatus.burst.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional
from ..util import *
from ..._stdio import print
and context (functions, classes, or occasionally code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | return 'burst' |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class BurstCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
, predict the next line using imports from the current file:
import time
from jubatus.burst.types import *
from .generic import GenericCLI
from ..args import Arguments, Optional
from ..util import *
from ..._stdio import print
and context including class names, function names, and sometimes code from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | return 'burst' |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class ClusteringCLI(GenericCLI):
@classmethod
<|code_end|>
, generate the next line using the imports in this file:
from jubatus.clustering.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context (functions, classes, or occasionally code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _name(cls): |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class ClusteringCLI(GenericCLI):
@classmethod
def _name(cls):
return 'clustering'
@Arguments(str, TDatum)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from jubatus.clustering.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
which might include code, classes, or functions. Output only the next line. | def do_push(self, point_id, d): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class AnomalyCLI(GenericCLI):
@classmethod
<|code_end|>
. Use current file imports:
from jubatus.anomaly.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context (classes, functions, or code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _name(cls): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class AnomalyCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
. Write the next line using the current file imports:
from jubatus.anomaly.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may include functions, classes, or code. Output only the next line. | return 'anomaly' |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class ClassifierCLI(GenericCLI):
@classmethod
def _name(cls):
<|code_end|>
. Write the next line using the current file imports:
from jubatus.classifier.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may include functions, classes, or code. Output only the next line. | return 'classifier' |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class ClassifierCLI(GenericCLI):
@classmethod
<|code_end|>
. Write the next line using the current file imports:
from jubatus.classifier.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
, which may include functions, classes, or code. Output only the next line. | def _name(cls): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class ClassifierCLI(GenericCLI):
@classmethod
def _name(cls):
return 'classifier'
<|code_end|>
. Use current file imports:
from jubatus.classifier.types import *
from .generic import GenericCLI
from ..args import Arguments, TDatum
from ..util import *
from ..._stdio import print
and context (classes, functions, or code) from other files:
# Path: jubakit/_cli/service/generic.py
# class GenericCLI(BaseRpcCLI):
# """
# Base CLI implementation for all other services.
# """
#
# def __init__(self, *args, **kwargs):
# super(GenericCLI, self).__init__(*args, **kwargs)
# self._clear_cache()
#
# @classmethod
# def _name(cls):
# return 'generic'
#
# def _clear_cache(self):
# """
# Subclasses using cache must override this method; initialize or invalidate
# cache when this method is called.
# """
# pass
#
# def _print_status(self, status):
# """
# Pretty-print the status structure.
# """
# for (host_port, status) in status.items():
# print("Server {0}:{1}".format(*(host_port.split('_'))))
# for key in sorted(status.keys()):
# print(" {0}: {1}".format(key, status[key]))
#
# @Arguments()
# def do_get_config(self):
# """Syntax: get_config
# Display algorithm and converter configuration set in server.
# """
# config = self.client.get_config()
# print(json.dumps(json.loads(config), sort_keys=True, indent=4))
#
# @Arguments(str)
# def do_save(self, model_id):
# """Syntax: save model_id
# Save the model.
# """
# result = self.client.save(model_id)
# if result:
# for (server_id, path) in result.items():
# print("{0}:\t{1}".format(server_id, path))
# else:
# print("Failed")
#
# @Arguments()
# def do_clear(self):
# """Syntax: clear
# Clear the model.
# """
# result = self.client.clear()
# if not result:
# print("Failed")
# self._clear_cache()
#
# @Arguments(str)
# def do_load(self, model_id):
# """Syntax: load model_id
# Load the given model.
# """
# result = self.client.load(model_id)
# if not result:
# print("Failed")
#
# @Arguments()
# def do_get_status(self):
# """Syntax: get_status
# Displays status of servers.
# """
# status = self.client.get_status()
# self._print_status(status)
#
# @Arguments()
# def do_get_proxy_status(self):
# """Syntax: get_proxy_status
# Displays status of the proxy.
# Available only when connected to proxies.
# """
# status = self.client.get_proxy_status()
# self._print_status(status)
#
# @Arguments()
# def do_do_mix(self):
# """Syntax: do_mix
# Trigger MIX.
# Available only when connected to servers.
# """
# result = self.client.do_mix()
# if not result:
# print("Failed")
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class TDatum(Type):
# def convert(self, args):
# if len(args) % 2 != 0:
# raise ValueError('value for the last datum key ({0}) is missing'.format(args[len(args) - 1]))
#
# d = Datum()
# for i in range(int(len(args) / 2)):
# feat_key = args[i*2]
# feat_val = args[i*2+1]
# try:
# d.add_number(feat_key, float(feat_val))
# except ValueError:
# d.add_string(feat_key, feat_val)
# return (len(args), d)
#
# def min_max(self):
# return (0, None)
#
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
. Output only the next line. | def _clear_cache(self): |
Given snippet: <|code_start|> self.assertTrue(classifier.clf_ is not None)
self.assertEqual(classifier.fitted_, True)
classifier.stop()
@requireEmbedded
def test_predict(self):
X = np.array([[1,1], [0,0]])
y = np.array([1,2])
classifier = NearestNeighborsClassifier()
self.assertRaises(RuntimeError, classifier.predict, X)
classifier.fit(X, y)
y_pred = classifier.predict(X)
self.assertEqual(y_pred.shape[0], X.shape[0])
@requireEmbedded
def test_decision_function(self):
X = np.array([[1,1], [0,0]])
y = np.array([1,2])
c = np.unique(y)
classifier = NearestNeighborsClassifier()
self.assertRaises(RuntimeError, classifier.predict, X)
classifier.fit(X, y)
y_pred = classifier.decision_function(X)
self.assertEqual(y_pred.shape, (X.shape[0], c.shape[0]))
@requireEmbedded
def test_class_params(self):
classifier = NearestNeighborsClassifier()
params = ['method', 'nearest_neighbor_num', 'local_sensitivity',
'hash_num', 'softmax', 'n_iter', 'shuffle', 'embedded', 'seed']
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import TestCase
from jubakit.wrapper.classifier import LinearClassifier, NearestNeighborsClassifier
from . import requireEmbedded
import numpy as np
and context:
# Path: jubakit/wrapper/classifier.py
# class LinearClassifier(BaseJubatusClassifier):
#
# def __init__(self, method='AROW', regularization_weight=1.0,
# softmax=False, n_iter=1, shuffle=False, embedded=True, seed=None):
# super(LinearClassifier, self).__init__(n_iter, shuffle, softmax, embedded, seed)
# self.method = method
# self.regularization_weight = regularization_weight
#
# def _launch_classifier(self):
# if self.method in ('perceptron', 'PA'):
# self.config_ = Config(method=self.method)
# elif self.method in ('PA1', 'PA2', 'CW', 'AROW', 'NHERD'):
# self.config_ = Config(method=self.method,
# parameter={'regularization_weight': self.regularization_weight})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.classifier_ = Classifier.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'regularization_weight': self.regularization_weight,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'softmax': self.softmax,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
#
# class NearestNeighborsClassifier(BaseJubatusClassifier):
#
# def __init__(self, method='euclid_lsh', nearest_neighbor_num=5, local_sensitivity=1.0,
# hash_num=128, n_iter=1, shuffle=False, softmax=False, embedded=True, seed=None):
# super(NearestNeighborsClassifier, self).__init__(n_iter, shuffle, softmax, embedded, seed)
# self.method = method
# self.nearest_neighbor_num = nearest_neighbor_num
# self.local_sensitivity = local_sensitivity
# self.hash_num = hash_num
#
# def _launch_classifier(self):
# if self.method in ('euclid_lsh', 'lsh', 'minhash'):
# self.config_ = Config(method='NN', parameter={'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'local_sensitivity': self.local_sensitivity,
# 'parameter': {'hash_num': self.hash_num}})
# elif self.method in ('euclidean', 'cosine'):
# self.config_ = Config(method=self.method,
# parameter={'nearest_neighbor_num': self.nearest_neighbor_num,
# 'local_sensitivity': self.local_sensitivity})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.classifier_ = Classifier.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'local_sensitivity': self.local_sensitivity,
# 'hash_num': self.hash_num,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'softmax': self.softmax,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
which might include code, classes, or functions. Output only the next line. | for param in params: |
Predict the next line for this snippet: <|code_start|> }
classifier = LinearClassifier(**params)
self.assertDictEqual(params, classifier.get_params())
classifier.stop()
@requireEmbedded
def test_set_params(self):
params = {
'method': 'CW',
'regularization_weight': 5.0,
'softmax': True,
'n_iter': 5,
'shuffle': True,
'embedded': True,
'seed': 42
}
classifier = LinearClassifier()
classifier.set_params(**params)
self.assertEqual(classifier.method, params['method'])
self.assertEqual(classifier.regularization_weight, params['regularization_weight'])
self.assertEqual(classifier.softmax, params['softmax'])
self.assertEqual(classifier.n_iter, params['n_iter'])
self.assertEqual(classifier.shuffle, params['shuffle'])
self.assertEqual(classifier.embedded, params['embedded'])
self.assertEqual(classifier.seed, params['seed'])
@requireEmbedded
def test_save(self):
name = 'test'
classifier = LinearClassifier()
<|code_end|>
with the help of current file imports:
from unittest import TestCase
from jubakit.wrapper.classifier import LinearClassifier, NearestNeighborsClassifier
from . import requireEmbedded
import numpy as np
and context from other files:
# Path: jubakit/wrapper/classifier.py
# class LinearClassifier(BaseJubatusClassifier):
#
# def __init__(self, method='AROW', regularization_weight=1.0,
# softmax=False, n_iter=1, shuffle=False, embedded=True, seed=None):
# super(LinearClassifier, self).__init__(n_iter, shuffle, softmax, embedded, seed)
# self.method = method
# self.regularization_weight = regularization_weight
#
# def _launch_classifier(self):
# if self.method in ('perceptron', 'PA'):
# self.config_ = Config(method=self.method)
# elif self.method in ('PA1', 'PA2', 'CW', 'AROW', 'NHERD'):
# self.config_ = Config(method=self.method,
# parameter={'regularization_weight': self.regularization_weight})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.classifier_ = Classifier.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'regularization_weight': self.regularization_weight,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'softmax': self.softmax,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
#
# class NearestNeighborsClassifier(BaseJubatusClassifier):
#
# def __init__(self, method='euclid_lsh', nearest_neighbor_num=5, local_sensitivity=1.0,
# hash_num=128, n_iter=1, shuffle=False, softmax=False, embedded=True, seed=None):
# super(NearestNeighborsClassifier, self).__init__(n_iter, shuffle, softmax, embedded, seed)
# self.method = method
# self.nearest_neighbor_num = nearest_neighbor_num
# self.local_sensitivity = local_sensitivity
# self.hash_num = hash_num
#
# def _launch_classifier(self):
# if self.method in ('euclid_lsh', 'lsh', 'minhash'):
# self.config_ = Config(method='NN', parameter={'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'local_sensitivity': self.local_sensitivity,
# 'parameter': {'hash_num': self.hash_num}})
# elif self.method in ('euclidean', 'cosine'):
# self.config_ = Config(method=self.method,
# parameter={'nearest_neighbor_num': self.nearest_neighbor_num,
# 'local_sensitivity': self.local_sensitivity})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.classifier_ = Classifier.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'local_sensitivity': self.local_sensitivity,
# 'hash_num': self.hash_num,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'softmax': self.softmax,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
, which may contain function names, class names, or code. Output only the next line. | classifier.save(name) |
Next line prediction: <|code_start|> regression = NearestNeighborsRegression(embedded=True)
@requireEmbedded
def test_launch_regression(self):
methods = ['euclid_lsh', 'lsh', 'minhash', 'euclidean', 'cosine']
def launch_regression(method):
regression = NearestNeighborsRegression(method=method)
regression._launch_regression()
for method in methods:
self.assertEqual(launch_regression(method), None)
self.assertRaises(NotImplementedError, launch_regression, 'inverted_index')
@requireEmbedded
def test_partial_fit(self):
X = np.array([[1,1], [0,0]])
y = np.array([1,2])
regression = NearestNeighborsRegression()
self.assertTrue(regression.clf_ is None)
self.assertEqual(regression.fitted_, False)
regression.partial_fit(X, y)
self.assertTrue(regression.clf_ is not None)
self.assertEqual(regression.fitted_, True)
regression.stop()
@requireEmbedded
def test_predict(self):
X = np.array([[1,1], [0,0]])
y = np.array([1,2])
regression = NearestNeighborsRegression()
self.assertRaises(RuntimeError, regression.predict, X)
<|code_end|>
. Use current file imports:
(from unittest import TestCase
from jubakit.wrapper.regression import LinearRegression, NearestNeighborsRegression
from . import requireEmbedded
import numpy as np)
and context including class names, function names, or small code snippets from other files:
# Path: jubakit/wrapper/regression.py
# class LinearRegression(BaseJubatusRegression):
#
# def __init__(self, method='AROW', regularization_weight=1.0, sensitivity=1.0, learning_rate=1.0,
# n_iter=1, shuffle=False, embedded=True, seed=None):
# super(LinearRegression, self).__init__(n_iter, shuffle, embedded, seed)
# self.method = method
# self.regularization_weight = regularization_weight
# self.sensitivity = sensitivity
# self.learning_rate = learning_rate
#
# def _launch_regression(self):
# if self.method in ('perceptron'):
# self.config_ = Config(method=self.method,
# parameter={'learning_rate': self.learning_rate})
# elif self.method in ('PA'):
# self.config_ = Config(method=self.method,
# parameter={'sensitivity': self.sensitivity})
# elif self.method in ('PA1', 'PA2', 'CW', 'AROW', 'NHERD'):
# self.config_ = Config(method=self.method,
# parameter={'regularization_weight': self.regularization_weight,
# 'sensitivity': self.sensitivity})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.regression_ = Regression.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'regularization_weight': self.regularization_weight,
# 'sensitivity': self.sensitivity,
# 'learning_rate': self.learning_rate,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
#
# class NearestNeighborsRegression(BaseJubatusRegression):
#
# def __init__(self, method='euclid_lsh', nearest_neighbor_num=5,
# hash_num=128, n_iter=1, shuffle=False, embedded=True, seed=None):
# super(NearestNeighborsRegression, self).__init__(n_iter, shuffle, embedded, seed)
# self.method = method
# self.nearest_neighbor_num = nearest_neighbor_num
# self.hash_num = hash_num
#
# def _launch_regression(self):
# if self.method in ('euclid_lsh', 'lsh', 'minhash'):
# self.config_ = Config(method='NN', parameter={'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'parameter': {'hash_num': self.hash_num}})
# elif self.method in ('euclidean', 'cosine'):
# self.config_ = Config(method=self.method,
# parameter={'nearest_neighbor_num': self.nearest_neighbor_num})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.regression_ = Regression.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'hash_num': self.hash_num,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'softmax': self.softmax,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
. Output only the next line. | regression.fit(X, y) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
try:
except ImportError:
pass
class LinearRegressionTest(TestCase):
def test_simple(self):
regression = LinearRegression()
regression.stop()
@requireEmbedded
def test_embedded(self):
regression = LinearRegression(embedded=True)
@requireEmbedded
def test_launch_regression(self):
methods = ['AROW', 'CW', 'NHERD', 'PA', 'PA1', 'PA2', 'perceptron']
def launch_regression(method):
regression = LinearRegression(method=method)
regression._launch_regression()
<|code_end|>
. Write the next line using the current file imports:
from unittest import TestCase
from jubakit.wrapper.regression import LinearRegression, NearestNeighborsRegression
from . import requireEmbedded
import numpy as np
and context from other files:
# Path: jubakit/wrapper/regression.py
# class LinearRegression(BaseJubatusRegression):
#
# def __init__(self, method='AROW', regularization_weight=1.0, sensitivity=1.0, learning_rate=1.0,
# n_iter=1, shuffle=False, embedded=True, seed=None):
# super(LinearRegression, self).__init__(n_iter, shuffle, embedded, seed)
# self.method = method
# self.regularization_weight = regularization_weight
# self.sensitivity = sensitivity
# self.learning_rate = learning_rate
#
# def _launch_regression(self):
# if self.method in ('perceptron'):
# self.config_ = Config(method=self.method,
# parameter={'learning_rate': self.learning_rate})
# elif self.method in ('PA'):
# self.config_ = Config(method=self.method,
# parameter={'sensitivity': self.sensitivity})
# elif self.method in ('PA1', 'PA2', 'CW', 'AROW', 'NHERD'):
# self.config_ = Config(method=self.method,
# parameter={'regularization_weight': self.regularization_weight,
# 'sensitivity': self.sensitivity})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.regression_ = Regression.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'regularization_weight': self.regularization_weight,
# 'sensitivity': self.sensitivity,
# 'learning_rate': self.learning_rate,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
#
# class NearestNeighborsRegression(BaseJubatusRegression):
#
# def __init__(self, method='euclid_lsh', nearest_neighbor_num=5,
# hash_num=128, n_iter=1, shuffle=False, embedded=True, seed=None):
# super(NearestNeighborsRegression, self).__init__(n_iter, shuffle, embedded, seed)
# self.method = method
# self.nearest_neighbor_num = nearest_neighbor_num
# self.hash_num = hash_num
#
# def _launch_regression(self):
# if self.method in ('euclid_lsh', 'lsh', 'minhash'):
# self.config_ = Config(method='NN', parameter={'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'parameter': {'hash_num': self.hash_num}})
# elif self.method in ('euclidean', 'cosine'):
# self.config_ = Config(method=self.method,
# parameter={'nearest_neighbor_num': self.nearest_neighbor_num})
# else:
# raise NotImplementedError('method {} is not implemented yet.'.format(self.method))
# self.regression_ = Regression.run(config=self.config_, embedded=self.embedded)
#
# def get_params(self, deep=True):
# return {
# 'method': self.method,
# 'nearest_neighbor_num': self.nearest_neighbor_num,
# 'hash_num': self.hash_num,
# 'n_iter': self.n_iter,
# 'shuffle': self.shuffle,
# 'softmax': self.softmax,
# 'embedded': self.embedded,
# 'seed': self.seed
# }
, which may include functions, classes, or code. Output only the next line. | for method in methods: |
Continue the code snippet: <|code_start|> """
if self._sh._verbose:
print(msg)
@property
def client(self):
"""
Returns the client instance.
"""
return self._sh.get_client()
#################################################################
# Built-in shell commands
#################################################################
@Arguments()
def do_exit(self):
"""Syntax: exit
Exits the shell. You can also use EOF (Ctrl-D).
"""
print()
return True
def help_help(self):
print(
"""Syntax: help [command]
Displays the list of commands available.
If ``command`` is specified, displays the help for the command."""
)
<|code_end|>
. Use current file imports:
import subprocess
from .._stdio import print
from .cmd import ExtendedCmd
from .args import Arguments, Optional
and context (classes, functions, or code) from other files:
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
#
# Path: jubakit/_cli/cmd.py
# class ExtendedCmd(cmd.Cmd, object):
# """
# Extended framework of standard cmd.Cmd class with better completion
# support and alias features.
# """
#
# def parseline(self, line):
# line = line.strip()
# if not line:
# return None, None, line
# elif line[0] == '?':
# line = 'help ' + line[1:]
# elif line[0] == '!':
# if hasattr(self, 'do_shell'):
# line = 'shell ' + line[1:]
# else:
# return None, None, line
# i, n = 0, len(line)
# while i < n and line[i] in self.identchars: i = i+1
# cmd, arg = line[:i], line[i:].strip()
# return cmd, arg, line
#
# def complete(self, text, state):
# result = super(ExtendedCmd, self).complete(text, state)
# if len(self.completion_matches) == 1:
# return self.completion_matches[state] + ' ' if state == 0 else None
# return result
#
# def register_alias(self, alias, name):
# """
# Register alias function to the method.
# Aliases are not listed by `help` command.
# """
# self.__dict__['do_' + alias] = getattr(self, name)
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
. Output only the next line. | def shell_command(self, param): |
Predict the next line after this snippet: <|code_start|> """
Outputs logs only when in verbose mode.
"""
if self._sh._verbose:
print(msg)
@property
def client(self):
"""
Returns the client instance.
"""
return self._sh.get_client()
#################################################################
# Built-in shell commands
#################################################################
@Arguments()
def do_exit(self):
"""Syntax: exit
Exits the shell. You can also use EOF (Ctrl-D).
"""
print()
return True
def help_help(self):
print(
"""Syntax: help [command]
Displays the list of commands available.
If ``command`` is specified, displays the help for the command."""
<|code_end|>
using the current file's imports:
import subprocess
from .._stdio import print
from .cmd import ExtendedCmd
from .args import Arguments, Optional
and any relevant context from other files:
# Path: jubakit/_stdio.py
# @classmethod
# def print(cls, *args, **kwargs):
# builtins.print(file=cls.stdout, *args, **kwargs)
#
# Path: jubakit/_cli/cmd.py
# class ExtendedCmd(cmd.Cmd, object):
# """
# Extended framework of standard cmd.Cmd class with better completion
# support and alias features.
# """
#
# def parseline(self, line):
# line = line.strip()
# if not line:
# return None, None, line
# elif line[0] == '?':
# line = 'help ' + line[1:]
# elif line[0] == '!':
# if hasattr(self, 'do_shell'):
# line = 'shell ' + line[1:]
# else:
# return None, None, line
# i, n = 0, len(line)
# while i < n and line[i] in self.identchars: i = i+1
# cmd, arg = line[:i], line[i:].strip()
# return cmd, arg, line
#
# def complete(self, text, state):
# result = super(ExtendedCmd, self).complete(text, state)
# if len(self.completion_matches) == 1:
# return self.completion_matches[state] + ' ' if state == 0 else None
# return result
#
# def register_alias(self, alias, name):
# """
# Register alias function to the method.
# Aliases are not listed by `help` command.
# """
# self.__dict__['do_' + alias] = getattr(self, name)
#
# Path: jubakit/_cli/args.py
# def Arguments(*expected_types):
# def wrap_by_preprocessor(func):
# assert len(inspect.getargspec(func).args) == (len(expected_types) + 1)
#
# def preprocessor(self, line):
# params = shell_split(line)
# types = []
# (min_count, max_count) = (0, 0)
#
# for t in expected_types:
# if not isinstance(t, Requirement):
# t = Mandatory(t)
# types.append(t)
# (t_min, t_max) = t.min_max()
# min_count += t_min
# if t_max is None:
# max_count = None
# elif max_count is not None:
# max_count += t_max
#
# params_len = len(params)
# if params_len < min_count:
# raise ValueError('Too few arguments ({0} required at least, only got {1})'.format(min_count, params_len))
# if max_count is not None and max_count < params_len:
# raise ValueError('Too many arguments ({0} required at most, got {1})'.format(max_count, params_len))
#
# index = 0
# argv = []
# for t in types:
# try:
# (consumed, value) = t.convert(params[index:])
# except ValueError as e:
# raise ValueError('argument {0}: {1}'.format(index + 1, e))
# argv.append(value)
# index += consumed
# return func(self, *argv)
# preprocessor.__doc__ = func.__doc__
# return preprocessor
# return wrap_by_preprocessor
#
# class Optional(Mandatory):
# def convert(self, args):
# if len(args) == 0:
# return (0, None)
# return super(Optional, self).convert(args)
#
# def min_max(self):
# return (0, super(Optional, self).min_max()[1])
. Output only the next line. | ) |
Here is a snippet: <|code_start|>
class DefaultLoader(object):
def get_substitutes(self, obj, path, theme):
if isinstance(obj, BaseForm):
return {'path': path.strip("/"),
'theme': theme,
'form': normalize(type(obj).__name__)}
elif isinstance(obj, BaseFormSet):
return {'path': path.strip("/"),
'theme': theme,
'formset': normalize(type(obj).__name__)}
elif isinstance(obj, BoundField):
return {'path': path.strip("/"),
'theme': theme,
'form': normalize(type(obj.form).__name__),
'field': normalize(obj.name),
'widget': normalize(type(obj.field.widget).__name__)}
raise ValueError("Object {} of type {} is not supported by {}".format(obj, type(obj), type(self)))
def get_template(self, obj, template_type, path=None, theme=None, patterns=None):
path = path or Silhouette.PATH
theme = theme or Silhouette.THEME
patterns = patterns or Silhouette.PATTERNS
<|code_end|>
. Write the next line using the current file imports:
import re
from django.forms.forms import BaseForm, BoundField
from django.forms.formsets import BaseFormSet
from django.template.loader import select_template
from .apps import Silhouette
from .utils import normalize
and context from other files:
# Path: silhouette/apps.py
# class Silhouette(AppSettings, AppConfig):
# name = "silhouette"
# settings_module = "silhouette.settings"
# settings_imports = ('LOADER',)
#
# Path: silhouette/utils.py
# def normalize(name):
# return re.sub('(((?<=[a-z])[A-Z1-9])|([A-Z1-9](?![A-Z1-9]|$)))', '_\\1', name).strip('_').lower()
, which may include functions, classes, or code. Output only the next line. | substitutes = self.get_substitutes(obj, path, theme) |
Given snippet: <|code_start|>
class DefaultLoader(object):
def get_substitutes(self, obj, path, theme):
if isinstance(obj, BaseForm):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from django.forms.forms import BaseForm, BoundField
from django.forms.formsets import BaseFormSet
from django.template.loader import select_template
from .apps import Silhouette
from .utils import normalize
and context:
# Path: silhouette/apps.py
# class Silhouette(AppSettings, AppConfig):
# name = "silhouette"
# settings_module = "silhouette.settings"
# settings_imports = ('LOADER',)
#
# Path: silhouette/utils.py
# def normalize(name):
# return re.sub('(((?<=[a-z])[A-Z1-9])|([A-Z1-9](?![A-Z1-9]|$)))', '_\\1', name).strip('_').lower()
which might include code, classes, or functions. Output only the next line. | return {'path': path.strip("/"), |
Predict the next line for this snippet: <|code_start|>
@silhouette_tag("mock")
class MockTag(BaseSilhouette):
def get_extra_context(self):
ctx = {"obj": self.obj}
# merge if any "cascaded_*" attributes in context
attrs = self.merge_attrs(self.cascaded_attrs('cascaded'), self.kwargs)
# cascade any "cascaded_*" attributes to sub contexts
attrs = self.build_attrs(attrs, 'cascaded')
<|code_end|>
with the help of current file imports:
from silhouette.templatetags.silhouette_tags import BaseSilhouette, silhouette_tag
and context from other files:
# Path: silhouette/templatetags/silhouette_tags.py
# class BaseSilhouette(object):
# """
# Base class for Silhouette Renderers
#
# """
# PATH_CONTEXT_KEY = 'silhouette_path'
# THEME_CONTEXT_KEY = 'silhouette_theme'
#
# def __init__(self, context, obj, template=None, theme=None, path=None, **kwargs):
# self.context = context
# self.obj = obj
# self.path_override = path or context.get(self.PATH_CONTEXT_KEY, None)
# self.theme_override = theme or context.get(self.THEME_CONTEXT_KEY, None)
# self.template_override = template
# self.kwargs = kwargs
#
# def __enter__(self):
# scope = {self.PATH_CONTEXT_KEY: self.path_override, self.THEME_CONTEXT_KEY: self.theme_override}
# scope.update(self.get_extra_context())
# self.context.update(scope)
# return self.context
#
# def __exit__(self, *args, **kwargs):
# self.context.pop()
#
# @property
# def template_type(self):
# """
# Template type to use when loading the tag template. It corresponds to a key in silhouette.settings.PATTERNS.
#
# """
# return normalize(type(self).__name__)
#
# @property
# def template(self):
# """
# Load a template based on the object's template_type or template_override.
#
# """
# if self.template_override:
# return get_template(self.template_override)
# return get_silhouette(self.obj, self.template_type, path=self.path_override, theme=self.theme_override)
#
# def merge_attrs(self, *holders):
# """
# Merge html attributes from different holders. CSS classes are concatenated and all
# other attributes are overridden with the rightmost holders taking precedence over
# the leftmost holders.
#
# """
# attrs = {}
# classes = []
# for holder in holders:
# if 'class' in holder:
# classes.append(holder['class'])
# attrs.update({k: v for k, v in six.iteritems(holder) if v is not None})
# if classes:
# attrs['class'] = ' '.join(set(' '.join([cls.strip() for cls in classes if cls is not None]).split(' ')))
# return attrs
#
# def build_attrs(self, attrs, *prefixes):
# """
# Nest html attributes by prefix. Non prefixed attributes fall under the default "attrs" key
#
# """
# if not prefixes:
# return {'attrs': attrs}
# split_attrs = {'attrs': {}}
# for key, value in six.iteritems(attrs):
# match = re.match("^({})_".format("|".join(re.escape(p) for p in prefixes)), key)
# if match:
# parent_key, nested_key = "{}_attrs".format(key[:match.end() - 1]), key[match.end():]
# if parent_key not in split_attrs:
# split_attrs[parent_key] = {}
# split_attrs[parent_key][nested_key] = value
# else:
# split_attrs['attrs'][key] = value
# return split_attrs
#
# def cascaded_attrs(self, prefix, context=None):
# """
# Retrieve cascaded attributes for prefix from context
#
# """
# context = context or self.context
# return context.get("{}_attrs".format(prefix), {})
#
# def render(self, context):
# """
# Render template using context
#
# """
# return self.template.render(context)
#
# def get_extra_context(self): # pragma: no cover
# """
# Extra variables for context that are added before rendering and removed after rendering
#
# """
# raise NotImplementedError()
#
# def silhouette_tag(tag_name):
# """
# Register a class as a template tag.
#
# The class must be initialised with a context, and object and keyword arguments,
# and implement __enter__, __exit__ and render
#
# """
# def register_tag(silhouette_class):
# def tag(context, obj, **kwargs):
# silhouette = silhouette_class(context, obj, **kwargs)
# with silhouette as context:
# return silhouette.render(context)
# register.simple_tag(tag, True, tag_name)
# return silhouette_class
# return register_tag
, which may contain function names, class names, or code. Output only the next line. | ctx.update(attrs) |
Continue the code snippet: <|code_start|> self.form = MockForm({})
self.context = Context({"form": self.form})
def tearDown(self):
self.form = None
self.context = None
clear_app_settings_cache()
def test_field(self):
template_source = """{% load silhouette_tags %}{% field form.url_input widget_class="url-widget" label_class="url-label" label_contents="I need a url" help_text_contents="Url should look like http://example.org" widget_id="widget-id" %}"""
template_target = """<label class="url-label" for="widget-id">I need a url:</label><input class="url-widget" id="widget-id" name="url_input" type="url" /><p>Url should look like http://example.org</p><ul><li>This field is required.</li></ul>"""
self.assertEqual(template_target, get_template_from_string(template_source).render(self.context).strip())
def test_field_label(self):
template_source = """{% load silhouette_tags %}{% field_label form.text_input class="label-class" id="label-id" contents="Aren't I awesome" suffix="?" %}"""
template_target = """<label class="label-class" for="id_text_input" id="label-id">Aren't I awesome?</label>"""
self.assertEqual(template_target, get_template_from_string(template_source).render(self.context).strip())
def test_field_widget(self):
template_source = """{% load silhouette_tags %}{% field_widget form.text_input class="field-class" id="field-id" %}"""
template_target = """<input class="field-class" id="field-id" name="text_input" type="text" />"""
self.assertEqual(template_target, get_template_from_string(template_source).render(self.context).strip())
def test_field_help_text(self):
template_source = """{% load silhouette_tags %}{% field_help_text form.text_input class="field-help" contents="Need any help?" %}"""
template_target = """<p class="field-help">Need any help?</p>"""
self.assertEqual(template_target, get_template_from_string(template_source).render(self.context).strip())
def test_field_help_text_missing_template(self):
template_source = """{% load silhouette_tags %}{% field_help_text form.text_input template="does/not/exist.html" %}"""
<|code_end|>
. Use current file imports:
from django.test import SimpleTestCase
from django.template.context import Context
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template_from_string
from django.template import engines
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock.forms import MockForm, MockFormSet
from .mock.tags import MockTag
and context (classes, functions, or code) from other files:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: tests/mock/tags.py
# class MockTag(BaseSilhouette):
#
# def get_extra_context(self):
# ctx = {"obj": self.obj}
# # merge if any "cascaded_*" attributes in context
# attrs = self.merge_attrs(self.cascaded_attrs('cascaded'), self.kwargs)
# # cascade any "cascaded_*" attributes to sub contexts
# attrs = self.build_attrs(attrs, 'cascaded')
# ctx.update(attrs)
# return ctx
. Output only the next line. | template_target = "" |
Predict the next line after this snippet: <|code_start|>
def test_form_fields_missing_template(self):
template_source = """{% load silhouette_tags %}{% form_fields form class="form-fields" template="does/not/exist.html" %}"""
with self.assertRaises(TemplateDoesNotExist):
get_template_from_string(template_source).render(self.context)
def test_form_controls(self):
template_source = """{% load silhouette_tags %}{% form_controls form class="form-controls" %}"""
template_target = """<button type="submit" class="form-controls">Submit</button>"""
self.assertEqual(template_target, get_template_from_string(template_source).render(self.context).strip())
def test_form_controls_missing_template(self):
template_source = """{% load silhouette_tags %}{% form_controls form class="form-controls" template="does/not/exist.html" %}"""
template_target = ""
self.assertEqual(template_target, get_template_from_string(template_source).render(self.context).strip())
@override_settings(SILHOUETTE_PATH="test_tags/formsets")
class TestFormsetTags(SimpleTestCase):
def setUp(self):
self.form = MockForm({})
self.formset = MockFormSet(data={'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-field1': u'val',
'form-1-field1': u'value',})
self.context = Context({"form": self.form, 'formset': self.formset})
def tearDown(self):
<|code_end|>
using the current file's imports:
from django.test import SimpleTestCase
from django.template.context import Context
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template_from_string
from django.template import engines
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock.forms import MockForm, MockFormSet
from .mock.tags import MockTag
and any relevant context from other files:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: tests/mock/tags.py
# class MockTag(BaseSilhouette):
#
# def get_extra_context(self):
# ctx = {"obj": self.obj}
# # merge if any "cascaded_*" attributes in context
# attrs = self.merge_attrs(self.cascaded_attrs('cascaded'), self.kwargs)
# # cascade any "cascaded_*" attributes to sub contexts
# attrs = self.build_attrs(attrs, 'cascaded')
# ctx.update(attrs)
# return ctx
. Output only the next line. | self.form = None |
Predict the next line for this snippet: <|code_start|>
@override_settings(SILHOUETTE_PATH="test_tags/formsets")
class TestFormsetTags(SimpleTestCase):
def setUp(self):
self.form = MockForm({})
self.formset = MockFormSet(data={'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-field1': u'val',
'form-1-field1': u'value',})
self.context = Context({"form": self.form, 'formset': self.formset})
def tearDown(self):
self.form = None
self.formset = None
self.context = None
clear_app_settings_cache()
def test_formset(self):
template_source = """{% load silhouette_tags %}{% formset formset %}"""
result = get_template_from_string(template_source).render(self.context).strip()
self.assertIn("""<ul><li>Please submit 1 or fewer forms.</li></ul>""", result)
self.assertIn("""<input id="id_form-TOTAL_FORMS" name="form-TOTAL_FORMS" type="hidden" value="2" />""", result)
self.assertIn("""<ul><li>Ensure this value has at least 4 characters (it has 3).</li></ul>""", result)
self.assertIn("""<label for="id_form-1-field1">Field1:</label><input id="id_form-1-field1" name="form-1-field1" type="text" value="value" />""", result)
def test_formset_errors(self):
template_source = """{% load silhouette_tags %}{% formset_errors formset %}"""
<|code_end|>
with the help of current file imports:
from django.test import SimpleTestCase
from django.template.context import Context
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template_from_string
from django.template import engines
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock.forms import MockForm, MockFormSet
from .mock.tags import MockTag
and context from other files:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: tests/mock/tags.py
# class MockTag(BaseSilhouette):
#
# def get_extra_context(self):
# ctx = {"obj": self.obj}
# # merge if any "cascaded_*" attributes in context
# attrs = self.merge_attrs(self.cascaded_attrs('cascaded'), self.kwargs)
# # cascade any "cascaded_*" attributes to sub contexts
# attrs = self.build_attrs(attrs, 'cascaded')
# ctx.update(attrs)
# return ctx
, which may contain function names, class names, or code. Output only the next line. | template_target = """<ul><li>Please submit 1 or fewer forms.</li></ul>""" |
Predict the next line for this snippet: <|code_start|> self.assertEqual("silhouette/theme/mock_form", tag.render(self.context))
def test_theme_override(self):
tag = MockTag(self.context, self.form, theme="theme2")
self.assertEqual("silhouette/theme2/mock_form", tag.render(self.context))
def test_path_override(self):
tag = MockTag(self.context, self.form, path="test_tags/base/silhouette2")
self.assertEqual("silhouette2/theme/mock_form", tag.render(self.context))
def test_template_override(self):
tag = MockTag(self.context, self.form, template="test_tags/base/silhouette/mock_form.html")
self.assertEqual("silhouette/mock_form", tag.render(self.context))
def test_attributes(self):
tag = MockTag(self.context, self.form, attr="attr")
self.assertNotIn("obj", self.context)
self.assertNotIn("attrs", self.context)
with tag as local_context:
self.assertEqual(self.context, local_context)
self.assertDictContainsSubset({
"obj": self.form,
"attrs": {"attr": "attr"},
}, self.context)
self.assertNotIn("obj", self.context)
self.assertNotIn("attrs", self.context)
<|code_end|>
with the help of current file imports:
from django.test import SimpleTestCase
from django.template.context import Context
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template_from_string
from django.template import engines
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock.forms import MockForm, MockFormSet
from .mock.tags import MockTag
and context from other files:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: tests/mock/tags.py
# class MockTag(BaseSilhouette):
#
# def get_extra_context(self):
# ctx = {"obj": self.obj}
# # merge if any "cascaded_*" attributes in context
# attrs = self.merge_attrs(self.cascaded_attrs('cascaded'), self.kwargs)
# # cascade any "cascaded_*" attributes to sub contexts
# attrs = self.build_attrs(attrs, 'cascaded')
# ctx.update(attrs)
# return ctx
, which may contain function names, class names, or code. Output only the next line. | def test_cascaded_attributes(self): |
Continue the code snippet: <|code_start|>
PATH = 'test_loaders'
THEME = 'loader'
PATTERNS = {
"test_form": (
"{path}/{theme}/{form}.html",
),
"test_formset": (
"{path}/{theme}/{formset}.html",
),
"test_field": (
"{path}/{theme}/{form}-{field}-{widget}.html",
),
"test_fallback": (
"{path}/{theme}/does-not-exist-1-{form}.html",
"{path}/{theme}/does-not-exist-2-{form}.html",
"{path}/{theme}/fallback-{form}.html",
),
"test_notfound": (
"{path}/{theme}/does-not-exist-1.html",
"{path}/{theme}/does-not-exist-2.html",
),
}
<|code_end|>
. Use current file imports:
import unittest
from django.forms.formsets import formset_factory
from django.template.backends.django import Template
from django.template.base import Template
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock import forms
from silhouette.loaders import loader
and context (classes, functions, or code) from other files:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: silhouette/loaders.py
# class DefaultLoader(object):
# def get_substitutes(self, obj, path, theme):
# def get_template(self, obj, template_type, path=None, theme=None, patterns=None):
# def __call__(self, *args, **kwargs):
. Output only the next line. | class TestLoaders(unittest.TestCase): |
Given snippet: <|code_start|> ),
"test_fallback": (
"{path}/{theme}/does-not-exist-1-{form}.html",
"{path}/{theme}/does-not-exist-2-{form}.html",
"{path}/{theme}/fallback-{form}.html",
),
"test_notfound": (
"{path}/{theme}/does-not-exist-1.html",
"{path}/{theme}/does-not-exist-2.html",
),
}
class TestLoaders(unittest.TestCase):
def tearDown(self):
clear_app_settings_cache()
def test_get_template_for_form(self):
obj = forms.MockForm()
self.assertIsInstance(loader.get_template(obj, 'test_form', path=PATH, theme=THEME, patterns=PATTERNS), Template)
def test_get_template_for_formset(self):
obj = formset_factory(forms.MockForm)()
self.assertIsInstance(loader.get_template(obj, 'test_formset', path=PATH, theme=THEME, patterns=PATTERNS), Template)
def test_get_template_for_field(self):
obj = forms.MockForm()['text_input']
self.assertIsInstance(loader.get_template(obj, 'test_field', path=PATH, theme=THEME, patterns=PATTERNS), Template)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from django.forms.formsets import formset_factory
from django.template.backends.django import Template
from django.template.base import Template
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock import forms
from silhouette.loaders import loader
and context:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: silhouette/loaders.py
# class DefaultLoader(object):
# def get_substitutes(self, obj, path, theme):
# def get_template(self, obj, template_type, path=None, theme=None, patterns=None):
# def __call__(self, *args, **kwargs):
which might include code, classes, or functions. Output only the next line. | def test_get_template_using_fallback(self): |
Given the following code snippet before the placeholder: <|code_start|>try:
except ImportError: # pragma: nocover
try:
except ImportError:
PATH = 'test_loaders'
THEME = 'loader'
PATTERNS = {
"test_form": (
"{path}/{theme}/{form}.html",
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from django.forms.formsets import formset_factory
from django.template.backends.django import Template
from django.template.base import Template
from django.template.base import TemplateDoesNotExist
from django.template.exceptions import TemplateDoesNotExist
from django.test.utils import override_settings
from .pods_utils import clear_app_settings_cache
from .mock import forms
from silhouette.loaders import loader
and context including class names, function names, and sometimes code from other files:
# Path: tests/pods_utils.py
# def clear_app_settings_cache():
# # Clean up. Django Pods caches settings once loaded. Remove cached settings
# from silhouette.apps import Silhouette
# if hasattr(Silhouette.settings, 'PATH'):
# del Silhouette.settings.PATH
# if hasattr(Silhouette.settings, 'THEME'):
# del Silhouette.settings.THEME
# if hasattr(Silhouette.settings, 'PATTERNS'):
# del Silhouette.settings.PATTERNS
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
# class MockForm2(forms.Form):
#
# Path: silhouette/loaders.py
# class DefaultLoader(object):
# def get_substitutes(self, obj, path, theme):
# def get_template(self, obj, template_type, path=None, theme=None, patterns=None):
# def __call__(self, *args, **kwargs):
. Output only the next line. | ), |
Based on the snippet: <|code_start|> def test_is_number_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_number_input, {'number_input'})
def test_is_email_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_email_input, {'email_input'})
def test_is_url_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_url_input, {'url_input'})
def test_is_password_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_password_input, {'password_input'})
def test_is_hidden_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_hidden_input, {'hidden_input', 'multiple_hidden_input'})
def test_is_multiple_hidden_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_multiple_hidden_input, {'multiple_hidden_input'})
def test_is_file_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_file_input, {'file_input', 'clearable_file_input'})
def test_is_clearable_file_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_clearable_file_input, {'clearable_file_input'})
def test_is_date_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_date_input, {'date_input'})
def test_is_datetime_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_datetime_input, {'datetime_input'})
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
from collections import OrderedDict
from silhouette.templatetags import silhouette_filters
from tests.mock.forms import MockForm
and context (classes, functions, sometimes code) from other files:
# Path: silhouette/templatetags/silhouette_filters.py
# def to_html_attrs(attrs):
# def is_text_input(bound_field):
# def is_number_input(bound_field):
# def is_email_input(bound_field):
# def is_date_input(bound_field):
# def is_datetime_input(bound_field):
# def is_split_datetime_widget(bound_field):
# def is_time_input(bound_field):
# def is_url_input(bound_field):
# def is_password_input(bound_field):
# def is_hidden_input(bound_field):
# def is_multiple_hidden_input(bound_field):
# def is_file_input(bound_field):
# def is_clearable_file_input(bound_field):
# def is_textarea(bound_field):
# def is_checkbox_input(bound_field):
# def is_select(bound_field):
# def is_select_multiple(bound_field):
# def is_radio_select(bound_field):
# def is_checkbox_select_multiple(bound_field):
# def is_null_boolean_select(bound_field):
# def is_select_date_widget(bound_field):
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
#
# text_input = forms.CharField(widget=forms.TextInput)
# email_input = forms.EmailField()
# url_input = forms.URLField()
# number_input = forms.IntegerField()
# password_input = forms.CharField(widget=forms.PasswordInput)
# checkbox_input = forms.BooleanField()
# hidden_input = forms.CharField(widget=forms.HiddenInput)
# multiple_hidden_input = forms.CharField(widget=forms.MultipleHiddenInput)
# file_input = forms.FileField(widget=forms.FileInput)
# clearable_file_input = forms.FileField()
# textarea = forms.CharField(widget=forms.Textarea)
# date_input = forms.DateField()
# datetime_input = forms.DateTimeField()
# time_input = forms.TimeField()
# split_datetime_widget = forms.DateTimeField(widget=forms.SplitDateTimeWidget)
# select = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')))
# null_boolean_select = forms.NullBooleanField()
# select_multiple = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')), widget=forms.SelectMultiple)
# checkbox_select_multiple = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')), widget=forms.CheckboxSelectMultiple)
# radio_select = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')), widget=forms.RadioSelect)
# select_date_widget = forms.DateField(widget=SelectDateWidget)
. Output only the next line. | def test_is_time_input(self): |
Here is a snippet: <|code_start|> self.assertAllTrue(fields, test_func)
self.assertAllFalse(set(self.form.fields) - set(fields), test_func)
def test_is_text_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_text_input, {'text_input', 'email_input', 'url_input', 'number_input', 'password_input', 'date_input', 'datetime_input', 'time_input'})
def test_is_number_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_number_input, {'number_input'})
def test_is_email_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_email_input, {'email_input'})
def test_is_url_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_url_input, {'url_input'})
def test_is_password_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_password_input, {'password_input'})
def test_is_hidden_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_hidden_input, {'hidden_input', 'multiple_hidden_input'})
def test_is_multiple_hidden_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_multiple_hidden_input, {'multiple_hidden_input'})
def test_is_file_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_file_input, {'file_input', 'clearable_file_input'})
def test_is_clearable_file_input(self):
self.assertWidgetFilterTruth(silhouette_filters.is_clearable_file_input, {'clearable_file_input'})
<|code_end|>
. Write the next line using the current file imports:
import unittest
from collections import OrderedDict
from silhouette.templatetags import silhouette_filters
from tests.mock.forms import MockForm
and context from other files:
# Path: silhouette/templatetags/silhouette_filters.py
# def to_html_attrs(attrs):
# def is_text_input(bound_field):
# def is_number_input(bound_field):
# def is_email_input(bound_field):
# def is_date_input(bound_field):
# def is_datetime_input(bound_field):
# def is_split_datetime_widget(bound_field):
# def is_time_input(bound_field):
# def is_url_input(bound_field):
# def is_password_input(bound_field):
# def is_hidden_input(bound_field):
# def is_multiple_hidden_input(bound_field):
# def is_file_input(bound_field):
# def is_clearable_file_input(bound_field):
# def is_textarea(bound_field):
# def is_checkbox_input(bound_field):
# def is_select(bound_field):
# def is_select_multiple(bound_field):
# def is_radio_select(bound_field):
# def is_checkbox_select_multiple(bound_field):
# def is_null_boolean_select(bound_field):
# def is_select_date_widget(bound_field):
#
# Path: tests/mock/forms.py
# class MockForm(forms.Form):
#
# text_input = forms.CharField(widget=forms.TextInput)
# email_input = forms.EmailField()
# url_input = forms.URLField()
# number_input = forms.IntegerField()
# password_input = forms.CharField(widget=forms.PasswordInput)
# checkbox_input = forms.BooleanField()
# hidden_input = forms.CharField(widget=forms.HiddenInput)
# multiple_hidden_input = forms.CharField(widget=forms.MultipleHiddenInput)
# file_input = forms.FileField(widget=forms.FileInput)
# clearable_file_input = forms.FileField()
# textarea = forms.CharField(widget=forms.Textarea)
# date_input = forms.DateField()
# datetime_input = forms.DateTimeField()
# time_input = forms.TimeField()
# split_datetime_widget = forms.DateTimeField(widget=forms.SplitDateTimeWidget)
# select = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')))
# null_boolean_select = forms.NullBooleanField()
# select_multiple = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')), widget=forms.SelectMultiple)
# checkbox_select_multiple = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')), widget=forms.CheckboxSelectMultiple)
# radio_select = forms.ChoiceField((('option 1', 'Option 1'), ('option 2', 'Option 2')), widget=forms.RadioSelect)
# select_date_widget = forms.DateField(widget=SelectDateWidget)
, which may include functions, classes, or code. Output only the next line. | def test_is_date_input(self): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.