Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|>an http:// or https:// protocol specified.
"""
class HTTPBackend(BaseStorageBackend):
"""
Abstracts access to HTTP via the common set of file storage backend methods.
.. note:: ``upload_file`` is not implemented yet, not sure how
it should work.
"""
@classmethod
def download_file(cls, uri, fobj):
"""
Given a URI, download the file to the ``fobj`` file-like object.
:param str uri: The URI of a file to download.
:param file fobj: A file-like object to download the file to.
:rtype: file
:returns: A file handle to the downloaded file.
"""
request = urllib2.Request(uri)
try:
download = urllib2.urlopen(request)
except urllib2.URLError, e:
message = "The specified input file cannot be found: %s" % e
raise InfileNotFoundException(message)
fobj.write(download.read())
<|code_end|>
. Use current file imports:
import urllib2
from media_nommer.utils import logger
from media_nommer.core.storage_backends.exceptions import InfileNotFoundException
from media_nommer.core.storage_backends.base_backend import BaseStorageBackend
and context (classes, functions, or code) from other files:
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/core/storage_backends/exceptions.py
# class InfileNotFoundException(StorageException):
# """
# Raised by storage backends when the given source_path (the media to encode)
# cannot be found.
# """
# pass
#
# Path: media_nommer/core/storage_backends/base_backend.py
# class BaseStorageBackend(object):
# """
# This class serves as a protocol for storage backends. Each storage backend
# should override the following methods, if supported by whatever protocols
# they interact with.
# """
# @classmethod
# def download_file(cls, uri, fobj):
# """
# Given a URI, download the file to the ``fobj`` file-like object.
#
# :param str uri: The URI of a file to download.
# :param file fobj: A file-like object to download the file to.
# :rtype: file
# :returns: A file handle to the downloaded file.
# """
# msg = "Backend doesn't implement download_file()"
# raise NotImplementedError(msg)
#
# @classmethod
# def upload_file(cls, uri, fobj):
# """
# Given a file-like object, upload it to the specified URI.
#
# :param str uri: The URI to upload the file to.
# :param file fobj: The file-like object to upload.
# :returns: Return value and type depends on backend.
# """
# msg = "Backend doesn't implement upload_file()"
# raise NotImplementedError(msg)
. Output only the next line. | logger.debug("HTTPBackend.download_file(): " \ |
Given the code snippet: <|code_start|>"""
This module contains an HTTPBackend class for working with URIs that have
an http:// or https:// protocol specified.
"""
class HTTPBackend(BaseStorageBackend):
"""
Abstracts access to HTTP via the common set of file storage backend methods.
.. note:: ``upload_file`` is not implemented yet, not sure how
it should work.
"""
@classmethod
def download_file(cls, uri, fobj):
"""
Given a URI, download the file to the ``fobj`` file-like object.
:param str uri: The URI of a file to download.
:param file fobj: A file-like object to download the file to.
:rtype: file
:returns: A file handle to the downloaded file.
"""
request = urllib2.Request(uri)
try:
download = urllib2.urlopen(request)
except urllib2.URLError, e:
message = "The specified input file cannot be found: %s" % e
<|code_end|>
, generate the next line using the imports in this file:
import urllib2
from media_nommer.utils import logger
from media_nommer.core.storage_backends.exceptions import InfileNotFoundException
from media_nommer.core.storage_backends.base_backend import BaseStorageBackend
and context (functions, classes, or occasionally code) from other files:
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/core/storage_backends/exceptions.py
# class InfileNotFoundException(StorageException):
# """
# Raised by storage backends when the given source_path (the media to encode)
# cannot be found.
# """
# pass
#
# Path: media_nommer/core/storage_backends/base_backend.py
# class BaseStorageBackend(object):
# """
# This class serves as a protocol for storage backends. Each storage backend
# should override the following methods, if supported by whatever protocols
# they interact with.
# """
# @classmethod
# def download_file(cls, uri, fobj):
# """
# Given a URI, download the file to the ``fobj`` file-like object.
#
# :param str uri: The URI of a file to download.
# :param file fobj: A file-like object to download the file to.
# :rtype: file
# :returns: A file handle to the downloaded file.
# """
# msg = "Backend doesn't implement download_file()"
# raise NotImplementedError(msg)
#
# @classmethod
# def upload_file(cls, uri, fobj):
# """
# Given a file-like object, upload it to the specified URI.
#
# :param str uri: The URI to upload the file to.
# :param file fobj: The file-like object to upload.
# :returns: Return value and type depends on backend.
# """
# msg = "Backend doesn't implement upload_file()"
# raise NotImplementedError(msg)
. Output only the next line. | raise InfileNotFoundException(message) |
Next line prediction: <|code_start|> """
Abstracts storing and retrieving job state information to and from
SimpleDB_. Jobs are represented through the :py:class:`EncodingJob` class,
which are instantiated and returned as needed.
"""
JOB_STATES = ['PENDING', 'DOWNLOADING', 'ENCODING', 'UPLOADING',
'FINISHED', 'ERROR', 'ABANDONED']
"""All possible job states as a list of strings."""
FINISHED_STATES = ['FINISHED', 'ERROR', 'ABANDONED']
"""Any jobs in the following states are considered "finished" in that we
won't do anything else with them. This is a list of strings."""
# The following AWS fields are for lazy-loading.
__aws_sdb_connection = None
__aws_sdb_job_state_domain = None
__aws_sqs_connection = None
__aws_sqs_new_job_queue = None
__aws_sqs_state_change_queue = None
@classmethod
def _get_sdb_connection(cls):
"""
Lazy - loading of the SimpleDB boto connection. Refer to this instead of
referencing cls.__aws_sdb_connection directly.
:returns: A boto connection to Amazon's SimpleDB interface.
"""
if not cls.__aws_sdb_connection:
cls.__aws_sdb_connection = boto.connect_sdb(
<|code_end|>
. Use current file imports:
(import random
import hashlib
import datetime
import json
import boto
from boto.sqs.message import Message
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.mod_importing import import_class_from_module_string)
and context including class names, function names, or small code snippets from other files:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/utils/mod_importing.py
# def import_class_from_module_string(fqpn_str):
# """
# Given a FQPN for a class, import and return the class.
#
# :param str fqpn_str: The FQPN to the class to import.
# :returns: The class given in the FQPN.
# """
# components = fqpn_str.split('.')
# # Generate a FQPN with everything but the class name.
# nom_module_str = '.'.join(components[:-1])
# # Just the target module to import.
# class_name = components[-1]
# # Import the class's module and the class itself.
# nommer = __import__(nom_module_str, globals(), locals(), [class_name])
#
# try:
# return getattr(nommer, class_name)
# except AttributeError:
# raise ImportError('No class named %s' % class_name)
. Output only the next line. | settings.AWS_ACCESS_KEY_ID, |
Based on the snippet: <|code_start|> # Start populating values.
self.creation_dtime = now_dtime
self.job_state = 'PENDING'
else:
# Retrieve the existing item for the job.
job = JobStateBackend._get_sdb_job_state_domain().get_item(self.unique_id)
if job is None:
msg = 'EncodingJob.save(): ' \
'No match found in DB for ID: %s' % self.unique_id
raise Exception(msg)
if self.job_state_details and isinstance(self.job_state_details,
basestring):
# Get within AWS's limitations. We'll assume that the error message
# is probably near the tail end of the output (hopefully). Not
# a great assumption, but it'll have to do.
self.job_state_details = self.job_state_details[-1023:]
job['unique_id'] = self.unique_id
job['source_path'] = self.source_path
job['dest_path'] = self.dest_path
job['nommer'] = '%s.%s' % (self.nommer.__class__.__module__,
self.nommer.__class__.__name__)
job['job_options'] = json.dumps(self.job_options)
job['job_state'] = self.job_state
job['job_state_details'] = self.job_state_details
job['notify_url'] = self.notify_url
job['last_modified_dtime'] = now_dtime
job['creation_dtime'] = self.creation_dtime
<|code_end|>
, predict the immediate next line with the help of imports:
import random
import hashlib
import datetime
import json
import boto
from boto.sqs.message import Message
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.mod_importing import import_class_from_module_string
and context (classes, functions, sometimes code) from other files:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/utils/mod_importing.py
# def import_class_from_module_string(fqpn_str):
# """
# Given a FQPN for a class, import and return the class.
#
# :param str fqpn_str: The FQPN to the class to import.
# :returns: The class given in the FQPN.
# """
# components = fqpn_str.split('.')
# # Generate a FQPN with everything but the class name.
# nom_module_str = '.'.join(components[:-1])
# # Just the target module to import.
# class_name = components[-1]
# # Import the class's module and the class itself.
# nommer = __import__(nom_module_str, globals(), locals(), [class_name])
#
# try:
# return getattr(nommer, class_name)
# except AttributeError:
# raise ImportError('No class named %s' % class_name)
. Output only the next line. | logger.debug("EncodingJob.save(): Item pre-save values: %s" % job) |
Based on the snippet: <|code_start|> .. tip:: You generally won't be instantiating these objects yourself.
To retrieve an existing job, you may
use :py:meth:`JobStateBackend.get_job_object_from_id`.
"""
def __init__(self, source_path, dest_path, nommer, job_options,
unique_id=None, job_state='PENDING', job_state_details=None,
notify_url=None, creation_dtime=None,
last_modified_dtime=None):
"""
:param str source_path: The URI to the source media to encode.
:param str dest_path: The URI to upload the encoded media to.
:param dict job_options: The job options to pass to the Nommer in
key/value format. See each Nommer's documentation for details on
accepted options.
:keyword str unique_id: The unique hash ID for this job. If
:py:meth:`save` is called and this value is ``None``, an ID will
be generated for this job.
:keyword str job_state: The state that the job is in.
:keyword str job_state_details: Any details to go along with whatever
job state this job is in. For example, if job_state is `ERROR`,
this keyword might contain an error message.
:keyword str notify_url: The URL to hit when this job has finished.
:keyword datetime.datetime creation_dtime: The time when this job
was created.
:keyword datetime.datetime last_modified_dtime: The time when this job
was last modified.
"""
self.source_path = source_path
self.dest_path = dest_path
# __import__ doesn't like unicode, cast this to a str.
<|code_end|>
, predict the immediate next line with the help of imports:
import random
import hashlib
import datetime
import json
import boto
from boto.sqs.message import Message
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.mod_importing import import_class_from_module_string
and context (classes, functions, sometimes code) from other files:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/utils/mod_importing.py
# def import_class_from_module_string(fqpn_str):
# """
# Given a FQPN for a class, import and return the class.
#
# :param str fqpn_str: The FQPN to the class to import.
# :returns: The class given in the FQPN.
# """
# components = fqpn_str.split('.')
# # Generate a FQPN with everything but the class name.
# nom_module_str = '.'.join(components[:-1])
# # Just the target module to import.
# class_name = components[-1]
# # Import the class's module and the class itself.
# nommer = __import__(nom_module_str, globals(), locals(), [class_name])
#
# try:
# return getattr(nommer, class_name)
# except AttributeError:
# raise ImportError('No class named %s' % class_name)
. Output only the next line. | self.nommer = import_class_from_module_string(str(nommer))(self) |
Using the snippet: <|code_start|>Contains the :py:class:`NodeStateManager` class, which is an abstraction layer
for storing and communicating the status of EC2_ nodes.
"""
class NodeStateManager(object):
"""
Tracks this node's state, reports it to :doc:`../feederd`, and terminates
itself if certain conditions of inactivity are met.
"""
last_dtime_i_did_something = datetime.datetime.now()
# Used for lazy-loading the SDB connection. Do not refer to directly.
__aws_sdb_connection = None
# Used for lazy-loading the SDB domain. Do not refer to directly.
__aws_sdb_nommer_state_domain = None
# Used for lazy-loading the EC2 connection. Do not refer to directly.
__aws_ec2_connection = None
# Store the instance ID for this EC2 node (if not local).
__instance_id = None
@classmethod
def _aws_ec2_connection(cls):
"""
Lazy-loading of the EC2 boto connection. Refer to this instead of
referencing cls.__aws_ec2_connection directly.
:returns: A boto connection to Amazon's EC2 interface.
"""
if not cls.__aws_ec2_connection:
cls.__aws_ec2_connection = boto.connect_ec2(
<|code_end|>
, determine the next line of code. You have imports:
import urllib2
import datetime
import boto
from twisted.internet import reactor
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.compat import total_seconds
and context (class names, function names, or code) available:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/utils/compat.py
# def total_seconds(td):
# """
# Given a timedelta object, compute the total number of seconds elapsed
# for the entire delta. This is only available in the standard library for
# Python 2.7 and up.
#
# Source: http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
#
# :param datetime.timedelta td: A timedelta instance.
# :rtype: float
# :returns: The seconds elapsed during the timedelta.
# """
# return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6.0
. Output only the next line. | settings.AWS_ACCESS_KEY_ID, |
Using the snippet: <|code_start|> if not.
"""
if not cls.is_ec2_instance():
# Developing locally, don't go here.
return False
# This is -1 since this is also a thread doing the contemplation.
# This would always be 1, even if we had no jobs encoding, if we
# didn't take into account this thread.
num_active_threads = cls.get_num_active_threads() + thread_count_mod
if num_active_threads > 0:
# Encoding right now, don't terminate.
return False
tdelt = datetime.datetime.now() - cls.last_dtime_i_did_something
# Total seconds of inactivity.
inactive_secs = total_seconds(tdelt)
# If we're over the inactivity threshold...
if inactive_secs > settings.NOMMERD_MAX_INACTIVITY:
instance_id = cls.get_instance_id()
conn = cls._aws_ec2_connection()
# Find this particular EC2 instance via boto.
reservations = conn.get_all_instances(instance_ids=[instance_id])
# This should only be one match, but in the interest of
# playing along...
for reservation in reservations:
for instance in reservation.instances:
# Here's the instance, terminate it.
<|code_end|>
, determine the next line of code. You have imports:
import urllib2
import datetime
import boto
from twisted.internet import reactor
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.compat import total_seconds
and context (class names, function names, or code) available:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/utils/compat.py
# def total_seconds(td):
# """
# Given a timedelta object, compute the total number of seconds elapsed
# for the entire delta. This is only available in the standard library for
# Python 2.7 and up.
#
# Source: http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
#
# :param datetime.timedelta td: A timedelta instance.
# :rtype: float
# :returns: The seconds elapsed during the timedelta.
# """
# return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6.0
. Output only the next line. | logger.info("Goodbye, cruel world.") |
Here is a snippet: <|code_start|> item.save()
@classmethod
def contemplate_termination(cls, thread_count_mod=0):
"""
Looks at how long it's been since this worker has done something, and
decides whether to self-terminate.
:param int thread_count_mod: Add this to the amount returned by the call
to :py:meth:`get_num_active_threads`. This is useful when calling
this method from a non-encoder thread.
:rtype: bool
:returns: ``True`` if this instance terminated itself, ``False``
if not.
"""
if not cls.is_ec2_instance():
# Developing locally, don't go here.
return False
# This is -1 since this is also a thread doing the contemplation.
# This would always be 1, even if we had no jobs encoding, if we
# didn't take into account this thread.
num_active_threads = cls.get_num_active_threads() + thread_count_mod
if num_active_threads > 0:
# Encoding right now, don't terminate.
return False
tdelt = datetime.datetime.now() - cls.last_dtime_i_did_something
# Total seconds of inactivity.
<|code_end|>
. Write the next line using the current file imports:
import urllib2
import datetime
import boto
from twisted.internet import reactor
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.compat import total_seconds
and context from other files:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/utils/compat.py
# def total_seconds(td):
# """
# Given a timedelta object, compute the total number of seconds elapsed
# for the entire delta. This is only available in the standard library for
# Python 2.7 and up.
#
# Source: http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
#
# :param datetime.timedelta td: A timedelta instance.
# :rtype: float
# :returns: The seconds elapsed during the timedelta.
# """
# return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6.0
, which may include functions, classes, or code. Output only the next line. | inactive_secs = total_seconds(tdelt) |
Here is a snippet: <|code_start|>"""
Various configuration-related utility methods.
"""
def upload_settings(nomconf_module):
"""
Given a user-defined nomconf module (already imported), push said file
to the S3 conf bucket, as defined by settings.CONFIG_S3_BUCKET.
This is used by the nommers that require access to the config, like
FFmpegNommer.
:param module nomconf_module: The user's ``nomconf`` module. This may
be called something other than ``nomconf``, but the uploaded filename
will always be ``nomconf.py``, so the EC2 nodes can find it in your
settings.CONFIG_S3_BUCKET.
"""
<|code_end|>
. Write the next line using the current file imports:
import os
import boto
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.core.storage_backends.s3 import S3Backend
and context from other files:
# Path: media_nommer/conf/settings.py
# AWS_ACCESS_KEY_ID = None
# AWS_SECRET_ACCESS_KEY = None
# CONFIG_S3_BUCKET = 'nommer_config'
# SQS_NEW_JOB_QUEUE_NAME = 'media_nommer'
# SQS_JOB_STATE_CHANGE_QUEUE_NAME = 'media_nommer_jstate'
# SIMPLEDB_JOB_STATE_DOMAIN = 'media_nommer'
# SIMPLEDB_EC2_NOMMER_STATE_DOMAIN = 'media_nommer_ec2nommer_state'
# EC2_KEY_NAME = None
# EC2_SECURITY_GROUPS = ['media_nommer']
# EC2_AMI_ID = 'ami-eb558182'
# EC2_INSTANCE_TYPE = 'm1.large'
# MAX_ENCODING_JOBS_PER_EC2_INSTANCE = 2
# MAX_NUM_EC2_INSTANCES = 3
# JOB_OVERFLOW_THRESH = 2
# FEEDERD_JOB_STATE_CHANGE_CHECK_INTERVAL = 60
# FEEDERD_PRUNE_JOBS_INTERVAL = 60 * 5
# FEEDERD_ALLOW_EC2_LAUNCHES = True
# FEEDERD_ABANDON_INACTIVE_JOBS_THRESH = 3600 * 9
# FEEDERD_AUTO_SCALE_INTERVAL = 60
# NOMMERD_TERMINATE_WHEN_IDLE = True
# NOMMERD_MAX_INACTIVITY = 60 * 10
# NOMMERD_HEARTBEAT_INTERVAL = 60
# NOMMERD_NEW_JOB_CHECK_INTERVAL = 60
# NOMMERD_QTFASTSTART_BIN_PATH = '/home/nom/.virtualenvs/media_nommer/bin/qtfaststart'
# STORAGE_BACKENDS = {
# 's3': 'media_nommer.core.storage_backends.s3.S3Backend',
# 'http': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'https': 'media_nommer.core.storage_backends.http.HTTPBackend',
# 'file': 'media_nommer.core.storage_backends.file.FileBackend',
# }
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
#
# Path: media_nommer/core/storage_backends/s3.py
# class S3Backend(BaseStorageBackend):
# """
# Abstracts access to S3 via the common set of file storage backend methods.
# """
# @classmethod
# def _get_aws_s3_connection(cls, access_key, secret_access_key):
# """
# Lazy-loading of the S3 boto connection. Refer to this instead of
# referencing self._aws_s3_connection directly.
#
# :param str access_key: The AWS_ Access Key needed to get to the
# file in question.
# :param str secret_access_key: The AWS_ Secret Access Key needed to get
# to the file in question.
# :rtype: :py:class:`boto.s3.connection.Connection`
# :returns: A boto connection to Amazon's S3 interface.
# """
# return boto.connect_s3(access_key, secret_access_key)
#
# @classmethod
# def download_file(cls, uri, fobj):
# """
# Given a URI, download the file to the ``fobj`` file-like object.
#
# :param str uri: The URI of a file to download.
# :param file fobj: A file-like object to download the file to.
# :rtype: file
# :returns: A file handle to the downloaded file.
# """
# # Breaks the URI into usable componenents.
# values = get_values_from_media_uri(uri)
#
# conn = cls._get_aws_s3_connection(values['username'],
# values['password'])
# bucket = conn.get_bucket(values['host'])
# key = bucket.get_key(values['path'])
#
# logger.debug("S3Backend.download_file(): " \
# "Downloading: %s" % uri)
#
# try:
# key.get_contents_to_file(fobj)
# except AttributeError:
# # Raised by ResumableDownloadHandler in boto when the given S3
# # key can't be found.
# message = "The specified input file cannot be found."
# raise InfileNotFoundException(message)
#
# logger.debug("S3Backend.download_file(): " \
# "Download of %s completed." % uri)
# return fobj
#
# @classmethod
# def upload_file(cls, uri, fobj):
# """
# Given a file-like object, upload it to the specified URI.
#
# :param str uri: The URI to upload the file to.
# :param file fobj: The file-like object to populate the S3 key from.
# :rtype: :py:class:`boto.s3.key.Key`
# :returns: The newly set boto key.
# """
# # Breaks the URI into usable componenents.
# values = get_values_from_media_uri(uri)
# logger.debug("S3Backend.upload_file(): Received: %s" % values)
#
# conn = cls._get_aws_s3_connection(values['username'],
# values['password'])
# bucket = conn.create_bucket(values['host'])
# key = bucket.new_key(values['path'])
#
# logger.debug("S3Backend.upload_file(): "\
# "Settings contents of '%s' key from %s" % (
# values['path'], fobj.name))
# key.set_contents_from_filename(fobj.name)
#
# logger.debug("S3Backend.upload_file(): Upload complete.")
# return key
, which may include functions, classes, or code. Output only the next line. | logger.info("Uploading nomconf.py to S3.") |
Next line prediction: <|code_start|> )
def send_notification(job):
"""
Given an EncodingJob, see if it has a ``notify_url`` set, and dispatch
a notification to said URL (if set). Don't do any processing of the response.
:param EncodingJob job: The job whose state has changed.
"""
if not job.notify_url:
# No URL to notify, hang it up here.
return
job_state_details = job.job_state_details
if not job_state_details:
# Make sure we're not passing a 'None' string in like a silly boy.
job_state_details = ''
# This will be JSON-serialized and POSTed to the notify_url.
data = {
'unique_id': job.unique_id,
'job_state': job.job_state,
'job_state_details': job_state_details,
}
agent = Agent(reactor)
headers = Headers({
'User-Agent': ['media-nommer feederd'],
'Content-Type': ['application/x-www-form-urlencoded'],
})
<|code_end|>
. Use current file imports:
(import urllib
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from media_nommer.utils.http import StringProducer
from media_nommer.utils import logger)
and context including class names, function names, or small code snippets from other files:
# Path: media_nommer/utils/http.py
# class StringProducer(object):
# """
# This is useful for twisted.web.client.Agent, which must use a producer
# to spoon feed the request over a period of time, instead of one big chunk.
#
# Check the Twisted documentation for an example of how this fits together::
#
# http://twistedmatrix.com/documents/current/web/howto/client.html
# """
# implements(IBodyProducer)
#
# def __init__(self, body):
# """
# :param str body: The body to be sent to the remote server.
# """
# self.body = body
# self.length = len(body)
#
# def startProducing(self, consumer):
# consumer.write(self.body)
# return succeed(None)
#
# def pauseProducing(self):
# pass
#
# def stopProducing(self):
# pass
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
. Output only the next line. | body = StringProducer(urllib.urlencode(data)) if data else None |
Continue the code snippet: <|code_start|>"""
This module contains code that handles notifying external services of state
changes in EncodingJobs. For example, when the state changes from
PENDING to DOWNLOADING, or ENCODING to FINISHED.
"""
def cb_response_received(response, unique_id, req_url):
"""
This is a callback function that is hit when a response comes back from
the remote server given in EncodingJob.notify_url. We'll just log it here
for troubleshooting purposes.
:param str unique_id: The job's unique ID.
:param str req_url: The URL that we notified.
"""
# Shouldn't ever happen in this case, but...
http_code = getattr(response, 'code', 'N/A')
<|code_end|>
. Use current file imports:
import urllib
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from media_nommer.utils.http import StringProducer
from media_nommer.utils import logger
and context (classes, functions, or code) from other files:
# Path: media_nommer/utils/http.py
# class StringProducer(object):
# """
# This is useful for twisted.web.client.Agent, which must use a producer
# to spoon feed the request over a period of time, instead of one big chunk.
#
# Check the Twisted documentation for an example of how this fits together::
#
# http://twistedmatrix.com/documents/current/web/howto/client.html
# """
# implements(IBodyProducer)
#
# def __init__(self, body):
# """
# :param str body: The body to be sent to the remote server.
# """
# self.body = body
# self.length = len(body)
#
# def startProducing(self, consumer):
# consumer.write(self.body)
# return succeed(None)
#
# def pauseProducing(self):
# pass
#
# def stopProducing(self):
# pass
#
# Path: media_nommer/utils/logger.py
# def debug(message):
# def info(message):
# def warning(message):
# def error(message_or_obj=None):
. Output only the next line. | logger.info( |
Predict the next line after this snippet: <|code_start|>
def __init__(self, extension: Optional[str] = None) -> None:
Namer.__init__(self, extension)
self.set_for_stack(inspect.stack(1))
self.config: Dict[str, str] = {}
self.config_loaded = False
def set_for_stack(self, caller: List[FrameInfo]) -> None:
frame = self.get_test_frame(caller)
stacktrace = caller[frame]
self.MethodName = stacktrace[3]
self.ClassName = self.get_class_name_for_frame(stacktrace)
self.Directory = os.path.dirname(stacktrace[1])
def get_class_name_for_frame(self, stacktrace: FrameInfo) -> str:
if "self" not in stacktrace[0].f_locals:
return os.path.splitext(os.path.basename(stacktrace[1]))[0]
else:
return f"{stacktrace[0].f_locals['self'].__class__.__name__}"
def get_test_frame(self, caller: List[FrameInfo]) -> int:
tmp_array = []
for index, frame in enumerate(caller):
if self.is_test_method(frame):
tmp_array.append(index)
if tmp_array:
return tmp_array[-1]
message = """Could not find test method/function. Possible reasons could be:
1) approvaltests is not being used inside a test function
2) your test framework is not supported by ApprovalTests (unittest and pytest are currently supported)."""
<|code_end|>
using the current file's imports:
import inspect
import json
import os
from inspect import FrameInfo
from typing import Dict, List, Optional
from approvaltests.approval_exception import FrameNotFound
and any relevant context from other files:
# Path: approvaltests/approval_exception.py
# class FrameNotFound(ApprovalException):
# pass
. Output only the next line. | raise FrameNotFound(message) |
Using the snippet: <|code_start|> self.assertEqual("test_method", n.get_method_name())
def test_name_works_from_inside_an_other_method(self):
self.an_other_method()
def an_other_method(self):
n = StackFrameNamer()
self.assertEqual(
"test_name_works_from_inside_an_other_method", n.get_method_name()
)
def test_file(self):
directory = StackFrameNamer().get_directory()
assert os.path.exists(directory + "/test_namer.py")
def test_basename(self):
n = StackFrameNamer()
self.assertTrue(
n.get_basename().endswith("NamerTests.test_basename"), n.get_basename()
)
def test_received_name(self):
filename = StackFrameNamer().get_received_filename("./stuff")
self.assertEqual(filename, "./stuff.received.txt")
def test_approved_name(self):
filename = StackFrameNamer().get_approved_filename("./stuff")
self.assertEqual(filename, "./stuff.approved.txt")
def test_alternative_extension(self):
<|code_end|>
, determine the next line of code. You have imports:
import os
import unittest
from approvaltests.core.namer import Namer, StackFrameNamer
and context (class names, function names, or code) available:
# Path: approvaltests/core/namer.py
# class Namer(object):
# APPROVED = ".approved"
# RECEIVED = ".received"
#
# def __init__(self, extension: Optional[str] = None) -> None:
# self.extension_with_dot = extension or ".txt"
#
# def get_file_name(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_directory(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_config(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_basename(self) -> str:
# file_name = self.get_file_name()
# subdirectory = self.get_config().get("subdirectory", "")
# return str(os.path.join(self.get_directory(), subdirectory, file_name))
#
# def get_received_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.RECEIVED + self.extension_with_dot
#
# def get_approved_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.APPROVED + self.extension_with_dot
#
# def set_extension(self, extension):
# self.extension_with_dot = extension
#
# class StackFrameNamer(Namer):
# Directory = ""
# MethodName = ""
# ClassName = ""
#
# def __init__(self, extension: Optional[str] = None) -> None:
# Namer.__init__(self, extension)
# self.set_for_stack(inspect.stack(1))
# self.config: Dict[str, str] = {}
# self.config_loaded = False
#
# def set_for_stack(self, caller: List[FrameInfo]) -> None:
# frame = self.get_test_frame(caller)
# stacktrace = caller[frame]
# self.MethodName = stacktrace[3]
# self.ClassName = self.get_class_name_for_frame(stacktrace)
# self.Directory = os.path.dirname(stacktrace[1])
#
# def get_class_name_for_frame(self, stacktrace: FrameInfo) -> str:
# if "self" not in stacktrace[0].f_locals:
# return os.path.splitext(os.path.basename(stacktrace[1]))[0]
# else:
# return f"{stacktrace[0].f_locals['self'].__class__.__name__}"
#
# def get_test_frame(self, caller: List[FrameInfo]) -> int:
# tmp_array = []
# for index, frame in enumerate(caller):
# if self.is_test_method(frame):
# tmp_array.append(index)
# if tmp_array:
# return tmp_array[-1]
# message = """Could not find test method/function. Possible reasons could be:
# 1) approvaltests is not being used inside a test function
# 2) your test framework is not supported by ApprovalTests (unittest and pytest are currently supported)."""
# raise FrameNotFound(message)
#
# def is_test_method(self, frame: FrameInfo) -> bool:
# method_name = frame[3]
# is_unittest_test = (
# "self" in frame[0].f_locals
# and "_testMethodName" in frame[0].f_locals["self"].__dict__
# and method_name != "__call__"
# and method_name != "_callTestMethod"
# and method_name != "run"
# )
#
# is_pytest_test = method_name.startswith("test_")
#
# return is_unittest_test or is_pytest_test
#
# def get_class_name(self) -> str:
# return self.ClassName
#
# def get_method_name(self) -> str:
# return self.MethodName
#
# def get_directory(self) -> str:
# return self.Directory
#
# def config_directory(self) -> str:
# return self.Directory
#
# def get_config(self) -> Dict[str, str]:
# """lazy load config when we need it, then store it in the instance variable self.config"""
# if not self.config_loaded:
# config_file = os.path.join(
# self.config_directory(), "approvaltests_config.json"
# )
# if os.path.exists(config_file):
# with open(config_file, "r") as f:
# self.config = json.load(f)
# else:
# self.config = {}
# self.config_loaded = True
# return self.config
#
# def get_file_name(self) -> str:
# class_name = "" if (self.ClassName is None) else (self.ClassName + ".")
# return class_name + self.MethodName
. Output only the next line. | n = Namer(extension=".html") |
Next line prediction: <|code_start|>
class ScenarioNamer(Namer):
"""
For use with parameterized tests.
Use this namer when the same test case needs to verify more than one value, and produce more than one file.
"""
<|code_end|>
. Use current file imports:
(from typing import Optional
from approvaltests.core.namer import StackFrameNamer, Namer)
and context including class names, function names, or small code snippets from other files:
# Path: approvaltests/core/namer.py
# class StackFrameNamer(Namer):
# Directory = ""
# MethodName = ""
# ClassName = ""
#
# def __init__(self, extension: Optional[str] = None) -> None:
# Namer.__init__(self, extension)
# self.set_for_stack(inspect.stack(1))
# self.config: Dict[str, str] = {}
# self.config_loaded = False
#
# def set_for_stack(self, caller: List[FrameInfo]) -> None:
# frame = self.get_test_frame(caller)
# stacktrace = caller[frame]
# self.MethodName = stacktrace[3]
# self.ClassName = self.get_class_name_for_frame(stacktrace)
# self.Directory = os.path.dirname(stacktrace[1])
#
# def get_class_name_for_frame(self, stacktrace: FrameInfo) -> str:
# if "self" not in stacktrace[0].f_locals:
# return os.path.splitext(os.path.basename(stacktrace[1]))[0]
# else:
# return f"{stacktrace[0].f_locals['self'].__class__.__name__}"
#
# def get_test_frame(self, caller: List[FrameInfo]) -> int:
# tmp_array = []
# for index, frame in enumerate(caller):
# if self.is_test_method(frame):
# tmp_array.append(index)
# if tmp_array:
# return tmp_array[-1]
# message = """Could not find test method/function. Possible reasons could be:
# 1) approvaltests is not being used inside a test function
# 2) your test framework is not supported by ApprovalTests (unittest and pytest are currently supported)."""
# raise FrameNotFound(message)
#
# def is_test_method(self, frame: FrameInfo) -> bool:
# method_name = frame[3]
# is_unittest_test = (
# "self" in frame[0].f_locals
# and "_testMethodName" in frame[0].f_locals["self"].__dict__
# and method_name != "__call__"
# and method_name != "_callTestMethod"
# and method_name != "run"
# )
#
# is_pytest_test = method_name.startswith("test_")
#
# return is_unittest_test or is_pytest_test
#
# def get_class_name(self) -> str:
# return self.ClassName
#
# def get_method_name(self) -> str:
# return self.MethodName
#
# def get_directory(self) -> str:
# return self.Directory
#
# def config_directory(self) -> str:
# return self.Directory
#
# def get_config(self) -> Dict[str, str]:
# """lazy load config when we need it, then store it in the instance variable self.config"""
# if not self.config_loaded:
# config_file = os.path.join(
# self.config_directory(), "approvaltests_config.json"
# )
# if os.path.exists(config_file):
# with open(config_file, "r") as f:
# self.config = json.load(f)
# else:
# self.config = {}
# self.config_loaded = True
# return self.config
#
# def get_file_name(self) -> str:
# class_name = "" if (self.ClassName is None) else (self.ClassName + ".")
# return class_name + self.MethodName
#
# class Namer(object):
# APPROVED = ".approved"
# RECEIVED = ".received"
#
# def __init__(self, extension: Optional[str] = None) -> None:
# self.extension_with_dot = extension or ".txt"
#
# def get_file_name(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_directory(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_config(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_basename(self) -> str:
# file_name = self.get_file_name()
# subdirectory = self.get_config().get("subdirectory", "")
# return str(os.path.join(self.get_directory(), subdirectory, file_name))
#
# def get_received_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.RECEIVED + self.extension_with_dot
#
# def get_approved_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.APPROVED + self.extension_with_dot
#
# def set_extension(self, extension):
# self.extension_with_dot = extension
. Output only the next line. | def __init__(self, base_namer: StackFrameNamer, scenario_name: int) -> None: |
Given snippet: <|code_start|>
class ReportWithBeyondCompareWindows(GenericDiffReporter):
def __init__(self):
super().__init__(
config=GenericDiffReporterConfig(
name=self.__class__.__name__,
path="{ProgramFiles}/Beyond Compare 4/BCompare.exe",
)
)
class ReportWithWinMerge(GenericDiffReporter):
def __init__(self):
super().__init__(
config=GenericDiffReporterConfig(
name=self.__class__.__name__,
path="{ProgramFiles}/WinMerge/WinMergeU.exe",
)
)
class ReportWithPycharm(GenericDiffReporter):
def __init__(self):
super().__init__(
config=GenericDiffReporterConfig(
name=self.__class__.__name__,
path="{ProgramFiles}/JetBrains/PyCharm 2021.2.2/bin/pycharm64.exe",
extra_args=["diff"],
)
)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from approvaltests.reporters.first_working_reporter import FirstWorkingReporter
from approvaltests.reporters.generic_diff_reporter import (
GenericDiffReporter,
GenericDiffReporterConfig,
)
and context:
# Path: approvaltests/reporters/first_working_reporter.py
# class FirstWorkingReporter(Reporter):
# """
# A composite reporter that goes through a list
# of reporters, running the first one that is
# working on the current machine.
#
# This is mostly an implementation detail of other
# classes in the library, but it may be useful in scenarios
# where a team wants to supply a list of custom reporter,
# and have the first working one of these be used.
#
# See also MultiReporter.
# """
#
# def __init__(self, *reporters) -> None:
# self.reporters = reporters
#
# def report(self, received_path: str, approved_path: str) -> bool:
# for r in self.reporters:
# try:
# success = r.report(received_path, approved_path)
# if success:
# return True
# except:
# pass
#
# return False
#
# def __str__(self):
# reporters = ", ".join(str(s) for s in self.reporters)
# return f"FirstWorkingReporter({reporters})"
#
# __repr__ = __str__
#
# def __eq__(self, other) -> bool:
# return repr(self) == repr(other)
#
# Path: approvaltests/reporters/generic_diff_reporter.py
# class GenericDiffReporter(Reporter):
# """
# A reporter that launches
# an external diff tool given by config.
# """
#
# @staticmethod
# def create(diff_tool_path: str) -> "GenericDiffReporter":
# return GenericDiffReporter(create_config(["custom", diff_tool_path]))
#
# def __init__(self, config: GenericDiffReporterConfig) -> None:
# self.name = config.name
# self.path = self.expand_program_files(config.path)
# self.extra_args = config.extra_args
#
# def __str__(self) -> str:
# if self.extra_args:
# config = {
# "name": self.name,
# "path": self.path,
# "arguments": self.extra_args,
# }
# else:
# config = {"name": self.name, "path": self.path}
#
# return to_json(config)
#
# @staticmethod
# def run_command(command_array):
# subprocess.Popen(command_array)
#
# def get_command(self, received: str, approved: str) -> List[str]:
# return [self.path] + self.extra_args + [received, approved]
#
# def report(self, received_path: str, approved_path: str) -> bool:
# if not self.is_working():
# return False
# ensure_file_exists(approved_path)
# command_array = self.get_command(received_path, approved_path)
# self.run_command(command_array)
# return True
#
# def is_working(self) -> bool:
# found = Command(self.path).locate()
# if not found:
# return False
# else:
# self.path = found
# return True
#
# @staticmethod
# def expand_program_files(path: str) -> str:
# if PROGRAM_FILES not in path:
# return path
#
# for candidate in [
# r"C:/Program Files",
# r"C:/Program Files (x86)",
# r"C:/ProgramW6432",
# ]:
# possible = path.replace(PROGRAM_FILES, candidate)
# if Command.executable(possible):
# return possible
# return path.replace(PROGRAM_FILES, "C:/Program Files")
#
# class GenericDiffReporterConfig:
# def __init__(self, name: str, path: str, extra_args: Optional[List[str]] = None):
# self.name = name
# self.path = path
# self.extra_args = extra_args or []
#
# def serialize(self):
# result = [self.name, self.path]
# if self.extra_args:
# result.append(self.extra_args)
# return result
which might include code, classes, or functions. Output only the next line. | class ReportWithBeyondCompare(FirstWorkingReporter): |
Given the following code snippet before the placeholder: <|code_start|>
class ReportWithBeyondCompareLinux(GenericDiffReporter):
def __init__(self):
super().__init__(
<|code_end|>
, predict the next line using imports from the current file:
from approvaltests.reporters.first_working_reporter import FirstWorkingReporter
from approvaltests.reporters.generic_diff_reporter import (
GenericDiffReporter,
GenericDiffReporterConfig,
)
and context including class names, function names, and sometimes code from other files:
# Path: approvaltests/reporters/first_working_reporter.py
# class FirstWorkingReporter(Reporter):
# """
# A composite reporter that goes through a list
# of reporters, running the first one that is
# working on the current machine.
#
# This is mostly an implementation detail of other
# classes in the library, but it may be useful in scenarios
# where a team wants to supply a list of custom reporter,
# and have the first working one of these be used.
#
# See also MultiReporter.
# """
#
# def __init__(self, *reporters) -> None:
# self.reporters = reporters
#
# def report(self, received_path: str, approved_path: str) -> bool:
# for r in self.reporters:
# try:
# success = r.report(received_path, approved_path)
# if success:
# return True
# except:
# pass
#
# return False
#
# def __str__(self):
# reporters = ", ".join(str(s) for s in self.reporters)
# return f"FirstWorkingReporter({reporters})"
#
# __repr__ = __str__
#
# def __eq__(self, other) -> bool:
# return repr(self) == repr(other)
#
# Path: approvaltests/reporters/generic_diff_reporter.py
# class GenericDiffReporter(Reporter):
# """
# A reporter that launches
# an external diff tool given by config.
# """
#
# @staticmethod
# def create(diff_tool_path: str) -> "GenericDiffReporter":
# return GenericDiffReporter(create_config(["custom", diff_tool_path]))
#
# def __init__(self, config: GenericDiffReporterConfig) -> None:
# self.name = config.name
# self.path = self.expand_program_files(config.path)
# self.extra_args = config.extra_args
#
# def __str__(self) -> str:
# if self.extra_args:
# config = {
# "name": self.name,
# "path": self.path,
# "arguments": self.extra_args,
# }
# else:
# config = {"name": self.name, "path": self.path}
#
# return to_json(config)
#
# @staticmethod
# def run_command(command_array):
# subprocess.Popen(command_array)
#
# def get_command(self, received: str, approved: str) -> List[str]:
# return [self.path] + self.extra_args + [received, approved]
#
# def report(self, received_path: str, approved_path: str) -> bool:
# if not self.is_working():
# return False
# ensure_file_exists(approved_path)
# command_array = self.get_command(received_path, approved_path)
# self.run_command(command_array)
# return True
#
# def is_working(self) -> bool:
# found = Command(self.path).locate()
# if not found:
# return False
# else:
# self.path = found
# return True
#
# @staticmethod
# def expand_program_files(path: str) -> str:
# if PROGRAM_FILES not in path:
# return path
#
# for candidate in [
# r"C:/Program Files",
# r"C:/Program Files (x86)",
# r"C:/ProgramW6432",
# ]:
# possible = path.replace(PROGRAM_FILES, candidate)
# if Command.executable(possible):
# return possible
# return path.replace(PROGRAM_FILES, "C:/Program Files")
#
# class GenericDiffReporterConfig:
# def __init__(self, name: str, path: str, extra_args: Optional[List[str]] = None):
# self.name = name
# self.path = path
# self.extra_args = extra_args or []
#
# def serialize(self):
# result = [self.name, self.path]
# if self.extra_args:
# result.append(self.extra_args)
# return result
. Output only the next line. | config=GenericDiffReporterConfig( |
Predict the next line for this snippet: <|code_start|>
class IntroductionReporter(Reporter):
def report(self, received_path: str, approved_path: str) -> bool:
print(self.get_text())
<|code_end|>
with the help of current file imports:
from approvaltests.core.reporter import Reporter
from approvaltests.reporters.python_native_reporter import PythonNativeReporter
and context from other files:
# Path: approvaltests/core/reporter.py
# class Reporter(ABC):
# """
# Super class of all reporters in ApprovalTests.Python
#
# The only necessary function to implement for a
# reporter is 'report', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def report(self, received_path: str, approved_path: str) -> bool:
# """
# Apply the reporter to pair of files given
# as absolute paths parameters.
#
# A truthy return value from report means that it succeeded,
# such as because any command existed.
#
# A falsy return value from report means that its operation
# failed in some way.
#
# Note: At the time of writing, not all implementations of
# Reporter return this value correctly.
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/reporters/python_native_reporter.py
# class PythonNativeReporter(Reporter):
# """
# A reporter that outputs diff straight
# to standard output.
#
# This is useful when running in a non-GUI environment,
# such as in Continuous Integration systems.
# """
#
# def report(self, received_path: str, approved_path: str) -> bool:
# ensure_file_exists(approved_path)
# print(calculate_diff_with_approve_instruction(received_path, approved_path))
# return True
#
# def __str__(self):
# return self.__class__.__name__
#
# __repr__ = __str__
, which may contain function names, class names, or code. Output only the next line. | return PythonNativeReporter().report(received_path, approved_path) |
Using the snippet: <|code_start|>
class ReporterForTesting(Reporter):
def __init__(self, success: bool, additional: Optional[Callable] = None) -> None:
if additional is None:
additional = lambda: None
self.additional = additional
self.called = False
self.success = success
def __str__(self):
return f"{self.__class__.__name__}({self.success})"
__repr__ = __str__
def report(self, received_path: str, approved_path: str) -> bool:
self.called = True
self.additional()
return self.success
class TestFirstWorkingReporter(unittest.TestCase):
def test_first_one(self) -> None:
r1 = ReporterForTesting(True)
r2 = ReporterForTesting(False)
<|code_end|>
, determine the next line of code. You have imports:
import unittest
from approvaltests.core.reporter import Reporter
from approvaltests.reporters.first_working_reporter import FirstWorkingReporter
from typing import Callable, Optional
and context (class names, function names, or code) available:
# Path: approvaltests/core/reporter.py
# class Reporter(ABC):
# """
# Super class of all reporters in ApprovalTests.Python
#
# The only necessary function to implement for a
# reporter is 'report', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def report(self, received_path: str, approved_path: str) -> bool:
# """
# Apply the reporter to pair of files given
# as absolute paths parameters.
#
# A truthy return value from report means that it succeeded,
# such as because any command existed.
#
# A falsy return value from report means that its operation
# failed in some way.
#
# Note: At the time of writing, not all implementations of
# Reporter return this value correctly.
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/reporters/first_working_reporter.py
# class FirstWorkingReporter(Reporter):
# """
# A composite reporter that goes through a list
# of reporters, running the first one that is
# working on the current machine.
#
# This is mostly an implementation detail of other
# classes in the library, but it may be useful in scenarios
# where a team wants to supply a list of custom reporter,
# and have the first working one of these be used.
#
# See also MultiReporter.
# """
#
# def __init__(self, *reporters) -> None:
# self.reporters = reporters
#
# def report(self, received_path: str, approved_path: str) -> bool:
# for r in self.reporters:
# try:
# success = r.report(received_path, approved_path)
# if success:
# return True
# except:
# pass
#
# return False
#
# def __str__(self):
# reporters = ", ".join(str(s) for s in self.reporters)
# return f"FirstWorkingReporter({reporters})"
#
# __repr__ = __str__
#
# def __eq__(self, other) -> bool:
# return repr(self) == repr(other)
. Output only the next line. | first = FirstWorkingReporter(r1, r2) |
Given the code snippet: <|code_start|>
class DiffReporter(FirstWorkingReporter):
"""
The DiffReporter class goes through a chain of possible diffing tools,
to find the first option installed on your system.
If none are found, it falls back to writing the diffs on
the console.
At present, the default Reporter is the DiffReporter.
"""
def __init__(self, reporter_factory=None):
<|code_end|>
, generate the next line using the imports in this file:
from approvaltests.reporters.generic_diff_reporter_factory import (
GenericDiffReporterFactory,
)
from .first_working_reporter import FirstWorkingReporter
from .introduction_reporter import IntroductionReporter
from .python_native_reporter import PythonNativeReporter
and context (functions, classes, or occasionally code) from other files:
# Path: approvaltests/reporters/generic_diff_reporter_factory.py
# class GenericDiffReporterFactory(object):
# reporters: List[GenericDiffReporterConfig] = []
#
# def __init__(self) -> None:
# self.load(get_adjacent_file("reporters.json"))
#
# def add_default_reporter_config(self, config):
# self.reporters.insert(0, create_config(config))
#
# def list(self) -> List[str]:
# return [r.name for r in self.reporters]
#
# def get(self, reporter_name: str) -> Reporter:
# reporter = GenericDiffReporterFactory.get_reporter_programmmatically(
# reporter_name
# )
# return reporter or self.get_from_json_config(reporter_name)
#
# @staticmethod
# def get_reporter_programmmatically(reporter_name: str) -> Optional[Reporter]:
# reporters = {"BeyondCompare": ReportWithBeyondCompare,
# "WinMerge": ReportWithWinMerge}
# clazz = reporters.get(reporter_name)
# return clazz and clazz()
#
# def get_from_json_config(self, reporter_name: str) -> Reporter:
# config = next((r for r in self.reporters if r.name == reporter_name), None)
# if not config:
# return NoConfigReporter()
# return self._create_reporter(config)
#
# @staticmethod
# def _create_reporter(config: GenericDiffReporterConfig) -> GenericDiffReporter:
# return GenericDiffReporter(config)
#
# def save(self, file_name: str) -> str:
# with open(file_name, "w") as f:
# json.dump(
# [reporter.serialize() for reporter in self.reporters],
# f,
# sort_keys=True,
# indent=2,
# separators=(",", ": "),
# )
# return file_name
#
# def load(self, file_name: str) -> List[GenericDiffReporterConfig]:
# with open(file_name, "r") as f:
# configs = json.load(f)
# self.reporters = [create_config(config) for config in configs]
# return self.reporters
#
# def get_first_working(self) -> Optional[GenericDiffReporter]:
# working = (i for i in self.get_all_reporters() if i.is_working())
# return next(working, None)
#
# def get_all_reporters(self) -> Iterator[GenericDiffReporter]:
# instances = (self._create_reporter(r) for r in self.reporters)
# return instances
#
# def remove(self, reporter_name: str) -> None:
# self.reporters = [r for r in self.reporters if r.name != reporter_name]
#
# Path: approvaltests/reporters/first_working_reporter.py
# class FirstWorkingReporter(Reporter):
# """
# A composite reporter that goes through a list
# of reporters, running the first one that is
# working on the current machine.
#
# This is mostly an implementation detail of other
# classes in the library, but it may be useful in scenarios
# where a team wants to supply a list of custom reporter,
# and have the first working one of these be used.
#
# See also MultiReporter.
# """
#
# def __init__(self, *reporters) -> None:
# self.reporters = reporters
#
# def report(self, received_path: str, approved_path: str) -> bool:
# for r in self.reporters:
# try:
# success = r.report(received_path, approved_path)
# if success:
# return True
# except:
# pass
#
# return False
#
# def __str__(self):
# reporters = ", ".join(str(s) for s in self.reporters)
# return f"FirstWorkingReporter({reporters})"
#
# __repr__ = __str__
#
# def __eq__(self, other) -> bool:
# return repr(self) == repr(other)
#
# Path: approvaltests/reporters/introduction_reporter.py
# class IntroductionReporter(Reporter):
#
# def report(self, received_path: str, approved_path: str) -> bool:
# print(self.get_text())
# return PythonNativeReporter().report(received_path, approved_path)
#
# def get_text(self):
# return '''
# Welcome to ApprovalTests!
# No DiffReporters have been detected on this system.
# To learn more, visit [Introduction to Reporters](https://github.com/approvals/ApprovalTests.Python/blob/main/docs/tutorial/intro-to-reporters.md)
# '''
# def __str__(self):
# return self.__class__.__name__
#
# __repr__ = __str__
#
# Path: approvaltests/reporters/python_native_reporter.py
# class PythonNativeReporter(Reporter):
# """
# A reporter that outputs diff straight
# to standard output.
#
# This is useful when running in a non-GUI environment,
# such as in Continuous Integration systems.
# """
#
# def report(self, received_path: str, approved_path: str) -> bool:
# ensure_file_exists(approved_path)
# print(calculate_diff_with_approve_instruction(received_path, approved_path))
# return True
#
# def __str__(self):
# return self.__class__.__name__
#
# __repr__ = __str__
. Output only the next line. | factory = reporter_factory or GenericDiffReporterFactory() |
Based on the snippet: <|code_start|>
class DiffReporter(FirstWorkingReporter):
"""
The DiffReporter class goes through a chain of possible diffing tools,
to find the first option installed on your system.
If none are found, it falls back to writing the diffs on
the console.
At present, the default Reporter is the DiffReporter.
"""
def __init__(self, reporter_factory=None):
factory = reporter_factory or GenericDiffReporterFactory()
reporters = list(factory.get_all_reporters())
<|code_end|>
, predict the immediate next line with the help of imports:
from approvaltests.reporters.generic_diff_reporter_factory import (
GenericDiffReporterFactory,
)
from .first_working_reporter import FirstWorkingReporter
from .introduction_reporter import IntroductionReporter
from .python_native_reporter import PythonNativeReporter
and context (classes, functions, sometimes code) from other files:
# Path: approvaltests/reporters/generic_diff_reporter_factory.py
# class GenericDiffReporterFactory(object):
# reporters: List[GenericDiffReporterConfig] = []
#
# def __init__(self) -> None:
# self.load(get_adjacent_file("reporters.json"))
#
# def add_default_reporter_config(self, config):
# self.reporters.insert(0, create_config(config))
#
# def list(self) -> List[str]:
# return [r.name for r in self.reporters]
#
# def get(self, reporter_name: str) -> Reporter:
# reporter = GenericDiffReporterFactory.get_reporter_programmmatically(
# reporter_name
# )
# return reporter or self.get_from_json_config(reporter_name)
#
# @staticmethod
# def get_reporter_programmmatically(reporter_name: str) -> Optional[Reporter]:
# reporters = {"BeyondCompare": ReportWithBeyondCompare,
# "WinMerge": ReportWithWinMerge}
# clazz = reporters.get(reporter_name)
# return clazz and clazz()
#
# def get_from_json_config(self, reporter_name: str) -> Reporter:
# config = next((r for r in self.reporters if r.name == reporter_name), None)
# if not config:
# return NoConfigReporter()
# return self._create_reporter(config)
#
# @staticmethod
# def _create_reporter(config: GenericDiffReporterConfig) -> GenericDiffReporter:
# return GenericDiffReporter(config)
#
# def save(self, file_name: str) -> str:
# with open(file_name, "w") as f:
# json.dump(
# [reporter.serialize() for reporter in self.reporters],
# f,
# sort_keys=True,
# indent=2,
# separators=(",", ": "),
# )
# return file_name
#
# def load(self, file_name: str) -> List[GenericDiffReporterConfig]:
# with open(file_name, "r") as f:
# configs = json.load(f)
# self.reporters = [create_config(config) for config in configs]
# return self.reporters
#
# def get_first_working(self) -> Optional[GenericDiffReporter]:
# working = (i for i in self.get_all_reporters() if i.is_working())
# return next(working, None)
#
# def get_all_reporters(self) -> Iterator[GenericDiffReporter]:
# instances = (self._create_reporter(r) for r in self.reporters)
# return instances
#
# def remove(self, reporter_name: str) -> None:
# self.reporters = [r for r in self.reporters if r.name != reporter_name]
#
# Path: approvaltests/reporters/first_working_reporter.py
# class FirstWorkingReporter(Reporter):
# """
# A composite reporter that goes through a list
# of reporters, running the first one that is
# working on the current machine.
#
# This is mostly an implementation detail of other
# classes in the library, but it may be useful in scenarios
# where a team wants to supply a list of custom reporter,
# and have the first working one of these be used.
#
# See also MultiReporter.
# """
#
# def __init__(self, *reporters) -> None:
# self.reporters = reporters
#
# def report(self, received_path: str, approved_path: str) -> bool:
# for r in self.reporters:
# try:
# success = r.report(received_path, approved_path)
# if success:
# return True
# except:
# pass
#
# return False
#
# def __str__(self):
# reporters = ", ".join(str(s) for s in self.reporters)
# return f"FirstWorkingReporter({reporters})"
#
# __repr__ = __str__
#
# def __eq__(self, other) -> bool:
# return repr(self) == repr(other)
#
# Path: approvaltests/reporters/introduction_reporter.py
# class IntroductionReporter(Reporter):
#
# def report(self, received_path: str, approved_path: str) -> bool:
# print(self.get_text())
# return PythonNativeReporter().report(received_path, approved_path)
#
# def get_text(self):
# return '''
# Welcome to ApprovalTests!
# No DiffReporters have been detected on this system.
# To learn more, visit [Introduction to Reporters](https://github.com/approvals/ApprovalTests.Python/blob/main/docs/tutorial/intro-to-reporters.md)
# '''
# def __str__(self):
# return self.__class__.__name__
#
# __repr__ = __str__
#
# Path: approvaltests/reporters/python_native_reporter.py
# class PythonNativeReporter(Reporter):
# """
# A reporter that outputs diff straight
# to standard output.
#
# This is useful when running in a non-GUI environment,
# such as in Continuous Integration systems.
# """
#
# def report(self, received_path: str, approved_path: str) -> bool:
# ensure_file_exists(approved_path)
# print(calculate_diff_with_approve_instruction(received_path, approved_path))
# return True
#
# def __str__(self):
# return self.__class__.__name__
#
# __repr__ = __str__
. Output only the next line. | reporters.append(IntroductionReporter()) |
Given the code snippet: <|code_start|>
def exists(path: str) -> bool:
return os.path.isfile(path)
class ReporterNotWorkingException(Exception):
def __init__(self, reporter: Reporter):
super().__init__(f"Reporter {reporter} failed to work!")
<|code_end|>
, generate the next line using the imports in this file:
import filecmp
import os
import pathlib
from abc import ABC, abstractproperty
from typing import Optional
from approvaltests.core.comparator import Comparator
from approvaltests.core.namer import Namer
from approvaltests.core.reporter import Reporter
from approvaltests.core.writer import Writer
and context (functions, classes, or occasionally code) from other files:
# Path: approvaltests/core/comparator.py
# class Comparator(ABC):
# """
# Super class of all Comparators in ApprovalTests.Python
#
# The only necessary function to implement is
# 'compare', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def compare(self, received_path: str, approved_path: str) -> bool:
# """
# Checks if two files contain the same information
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/namer.py
# class Namer(object):
# APPROVED = ".approved"
# RECEIVED = ".received"
#
# def __init__(self, extension: Optional[str] = None) -> None:
# self.extension_with_dot = extension or ".txt"
#
# def get_file_name(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_directory(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_config(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_basename(self) -> str:
# file_name = self.get_file_name()
# subdirectory = self.get_config().get("subdirectory", "")
# return str(os.path.join(self.get_directory(), subdirectory, file_name))
#
# def get_received_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.RECEIVED + self.extension_with_dot
#
# def get_approved_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.APPROVED + self.extension_with_dot
#
# def set_extension(self, extension):
# self.extension_with_dot = extension
#
# Path: approvaltests/core/reporter.py
# class Reporter(ABC):
# """
# Super class of all reporters in ApprovalTests.Python
#
# The only necessary function to implement for a
# reporter is 'report', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def report(self, received_path: str, approved_path: str) -> bool:
# """
# Apply the reporter to pair of files given
# as absolute paths parameters.
#
# A truthy return value from report means that it succeeded,
# such as because any command existed.
#
# A falsy return value from report means that its operation
# failed in some way.
#
# Note: At the time of writing, not all implementations of
# Reporter return this value correctly.
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/writer.py
# class Writer(object):
# # interface
#
# def write_received_file(self, received_file: str) -> str:
# raise Exception("Interface member not implemented")
. Output only the next line. | class FileComparator(Comparator): |
Continue the code snippet: <|code_start|>
def exists(path: str) -> bool:
return os.path.isfile(path)
class ReporterNotWorkingException(Exception):
def __init__(self, reporter: Reporter):
super().__init__(f"Reporter {reporter} failed to work!")
class FileComparator(Comparator):
def compare(self, received_path: str, approved_path: str) -> bool:
if not exists(approved_path) or not exists(received_path):
return False
if filecmp.cmp(approved_path, received_path):
return True
try:
approved_raw = pathlib.Path(approved_path).read_text()
approved_text = approved_raw.replace("\r\n", "\n")
received_raw = pathlib.Path(received_path).read_text()
received_text = received_raw.replace("\r\n", "\n")
return approved_text == received_text
except:
return False
class FileApprover(object):
def verify(
self,
<|code_end|>
. Use current file imports:
import filecmp
import os
import pathlib
from abc import ABC, abstractproperty
from typing import Optional
from approvaltests.core.comparator import Comparator
from approvaltests.core.namer import Namer
from approvaltests.core.reporter import Reporter
from approvaltests.core.writer import Writer
and context (classes, functions, or code) from other files:
# Path: approvaltests/core/comparator.py
# class Comparator(ABC):
# """
# Super class of all Comparators in ApprovalTests.Python
#
# The only necessary function to implement is
# 'compare', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def compare(self, received_path: str, approved_path: str) -> bool:
# """
# Checks if two files contain the same information
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/namer.py
# class Namer(object):
# APPROVED = ".approved"
# RECEIVED = ".received"
#
# def __init__(self, extension: Optional[str] = None) -> None:
# self.extension_with_dot = extension or ".txt"
#
# def get_file_name(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_directory(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_config(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_basename(self) -> str:
# file_name = self.get_file_name()
# subdirectory = self.get_config().get("subdirectory", "")
# return str(os.path.join(self.get_directory(), subdirectory, file_name))
#
# def get_received_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.RECEIVED + self.extension_with_dot
#
# def get_approved_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.APPROVED + self.extension_with_dot
#
# def set_extension(self, extension):
# self.extension_with_dot = extension
#
# Path: approvaltests/core/reporter.py
# class Reporter(ABC):
# """
# Super class of all reporters in ApprovalTests.Python
#
# The only necessary function to implement for a
# reporter is 'report', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def report(self, received_path: str, approved_path: str) -> bool:
# """
# Apply the reporter to pair of files given
# as absolute paths parameters.
#
# A truthy return value from report means that it succeeded,
# such as because any command existed.
#
# A falsy return value from report means that its operation
# failed in some way.
#
# Note: At the time of writing, not all implementations of
# Reporter return this value correctly.
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/writer.py
# class Writer(object):
# # interface
#
# def write_received_file(self, received_file: str) -> str:
# raise Exception("Interface member not implemented")
. Output only the next line. | namer: Namer, |
Next line prediction: <|code_start|>
def exists(path: str) -> bool:
return os.path.isfile(path)
class ReporterNotWorkingException(Exception):
<|code_end|>
. Use current file imports:
(import filecmp
import os
import pathlib
from abc import ABC, abstractproperty
from typing import Optional
from approvaltests.core.comparator import Comparator
from approvaltests.core.namer import Namer
from approvaltests.core.reporter import Reporter
from approvaltests.core.writer import Writer)
and context including class names, function names, or small code snippets from other files:
# Path: approvaltests/core/comparator.py
# class Comparator(ABC):
# """
# Super class of all Comparators in ApprovalTests.Python
#
# The only necessary function to implement is
# 'compare', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def compare(self, received_path: str, approved_path: str) -> bool:
# """
# Checks if two files contain the same information
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/namer.py
# class Namer(object):
# APPROVED = ".approved"
# RECEIVED = ".received"
#
# def __init__(self, extension: Optional[str] = None) -> None:
# self.extension_with_dot = extension or ".txt"
#
# def get_file_name(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_directory(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_config(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_basename(self) -> str:
# file_name = self.get_file_name()
# subdirectory = self.get_config().get("subdirectory", "")
# return str(os.path.join(self.get_directory(), subdirectory, file_name))
#
# def get_received_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.RECEIVED + self.extension_with_dot
#
# def get_approved_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.APPROVED + self.extension_with_dot
#
# def set_extension(self, extension):
# self.extension_with_dot = extension
#
# Path: approvaltests/core/reporter.py
# class Reporter(ABC):
# """
# Super class of all reporters in ApprovalTests.Python
#
# The only necessary function to implement for a
# reporter is 'report', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def report(self, received_path: str, approved_path: str) -> bool:
# """
# Apply the reporter to pair of files given
# as absolute paths parameters.
#
# A truthy return value from report means that it succeeded,
# such as because any command existed.
#
# A falsy return value from report means that its operation
# failed in some way.
#
# Note: At the time of writing, not all implementations of
# Reporter return this value correctly.
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/writer.py
# class Writer(object):
# # interface
#
# def write_received_file(self, received_file: str) -> str:
# raise Exception("Interface member not implemented")
. Output only the next line. | def __init__(self, reporter: Reporter): |
Based on the snippet: <|code_start|>
def exists(path: str) -> bool:
return os.path.isfile(path)
class ReporterNotWorkingException(Exception):
def __init__(self, reporter: Reporter):
super().__init__(f"Reporter {reporter} failed to work!")
class FileComparator(Comparator):
def compare(self, received_path: str, approved_path: str) -> bool:
if not exists(approved_path) or not exists(received_path):
return False
if filecmp.cmp(approved_path, received_path):
return True
try:
approved_raw = pathlib.Path(approved_path).read_text()
approved_text = approved_raw.replace("\r\n", "\n")
received_raw = pathlib.Path(received_path).read_text()
received_text = received_raw.replace("\r\n", "\n")
return approved_text == received_text
except:
return False
class FileApprover(object):
def verify(
self,
namer: Namer,
<|code_end|>
, predict the immediate next line with the help of imports:
import filecmp
import os
import pathlib
from abc import ABC, abstractproperty
from typing import Optional
from approvaltests.core.comparator import Comparator
from approvaltests.core.namer import Namer
from approvaltests.core.reporter import Reporter
from approvaltests.core.writer import Writer
and context (classes, functions, sometimes code) from other files:
# Path: approvaltests/core/comparator.py
# class Comparator(ABC):
# """
# Super class of all Comparators in ApprovalTests.Python
#
# The only necessary function to implement is
# 'compare', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def compare(self, received_path: str, approved_path: str) -> bool:
# """
# Checks if two files contain the same information
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/namer.py
# class Namer(object):
# APPROVED = ".approved"
# RECEIVED = ".received"
#
# def __init__(self, extension: Optional[str] = None) -> None:
# self.extension_with_dot = extension or ".txt"
#
# def get_file_name(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_directory(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_config(self):
# raise Exception("This class is abstract, override this method in a subclass")
#
# def get_basename(self) -> str:
# file_name = self.get_file_name()
# subdirectory = self.get_config().get("subdirectory", "")
# return str(os.path.join(self.get_directory(), subdirectory, file_name))
#
# def get_received_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.RECEIVED + self.extension_with_dot
#
# def get_approved_filename(self, basename: Optional[str] = None) -> str:
# basename = basename or self.get_basename()
# return basename + Namer.APPROVED + self.extension_with_dot
#
# def set_extension(self, extension):
# self.extension_with_dot = extension
#
# Path: approvaltests/core/reporter.py
# class Reporter(ABC):
# """
# Super class of all reporters in ApprovalTests.Python
#
# The only necessary function to implement for a
# reporter is 'report', which takes the absolute
# paths of the received- and approved files, and
# returns a truthy value on success.
# """
#
# def __eq__(self, other):
# return repr(self) == repr(other)
#
# @abstractmethod
# def report(self, received_path: str, approved_path: str) -> bool:
# """
# Apply the reporter to pair of files given
# as absolute paths parameters.
#
# A truthy return value from report means that it succeeded,
# such as because any command existed.
#
# A falsy return value from report means that its operation
# failed in some way.
#
# Note: At the time of writing, not all implementations of
# Reporter return this value correctly.
# """
# raise Exception("Interface member not implemented")
#
# Path: approvaltests/core/writer.py
# class Writer(object):
# # interface
#
# def write_received_file(self, received_file: str) -> str:
# raise Exception("Interface member not implemented")
. Output only the next line. | writer: Writer, |
Predict the next line after this snippet: <|code_start|>
class EncryptionMixin(object):
@property
def encryption_backend(self):
if not hasattr(self, "_encryption"):
if hasattr(self, "bot") and hasattr(self.bot, "_encryption"):
self._encryption = self.bot._encryption
else:
# The ENCRYPTION_BACKEND setting points to a specific module namespace
# aes => will.encryption.aes
module_name = ''.join([
'will.backends.encryption.',
<|code_end|>
using the current file's imports:
import importlib
import logging
import dill as pickle
import functools
from will import settings
and any relevant context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
. Output only the next line. | getattr(settings, 'ENCRYPTION_BACKEND', 'aes'), |
Given snippet: <|code_start|>
class RegexBackend(GenerationBackend):
def do_generate(self, event):
exclude_list = ["fn", ]
matches = []
message = event.data
for name, l in self.bot.message_listeners.items():
search_matches = l["regex"].search(message.content)
if (
# The search regex matches and
search_matches
# It's not from me, or this search includes me, and
and (
message.will_said_it is False
or ("include_me" in l and l["include_me"])
)
# I'm mentioned, or this is an overheard, or we're in a 1-1
and (
message.is_private_chat
or ("direct_mentions_only" not in l or not l["direct_mentions_only"])
or message.is_direct
)
):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from will import settings
from will.decorators import require_settings
from will.utils import Bunch
from .base import GenerationBackend, GeneratedOption
and context:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/decorators.py
# def require_settings(*setting_names):
# def wrap(f):
# def wrapped_f(*args, **kwargs):
# f(*args, **kwargs)
# wrapped_f.will_fn_metadata = getattr(f, "will_fn_metadata", {})
# wrapped_f.will_fn_metadata["required_settings"] = setting_names
# return wrapped_f
# return wrap
#
# Path: will/utils.py
# class Bunch(dict):
# def __init__(self, **kw):
# dict.__init__(self, kw)
# self.__dict__ = self
#
# def __getstate__(self):
# return self
#
# def __setstate__(self, state):
# self.update(state)
# self.__dict__ = self
#
# Path: will/backends/generation/base.py
# class GenerationBackend(PubSubMixin, SleepMixin, object):
# is_will_generationbackend = True
#
# def __watch_pubsub(self):
# while True:
# try:
# m = self.pubsub.get_message()
# if m:
# self.__generate(m.data)
# except (KeyboardInterrupt, SystemExit):
# pass
# self.sleep_for_event_loop()
#
# def __generate(self, message):
# ret = self.do_generate(message)
# try:
# self.pubsub.publish("generation.complete", ret, reference_message=message)
# except (KeyboardInterrupt, SystemExit):
# pass
# except:
# logging.critical("Error publishing generation.complete: \n%s" % traceback.format_exc())
#
# def do_generate(self, message):
# # Take message, return a list of possible responses/matches
# raise NotImplementedError
#
# def start(self, name, **kwargs):
# for k, v in kwargs.items():
# self.__dict__[k] = v
#
# self.name = name
# self.bootstrap_pubsub()
# self.subscribe("generation.start")
# self.__watch_pubsub()
#
# class GeneratedOption(object):
#
# def __init__(self, *args, **kwargs):
# for o in OPTION_REQUIRED_FIELDS:
# if o not in kwargs:
# raise Exception("Missing '%s' argument to the generator backend." % o)
#
# for k, v in kwargs.items():
# self.__dict__[k] = v
#
# return super(GeneratedOption, self).__init__()
#
# def __unicode__(self):
# return "%s - %s" % (self.score, self.context)
#
# def __str__(self):
# return "%s - %s" % (self.score, self.context)
which might include code, classes, or functions. Output only the next line. | context = Bunch() |
Here is a snippet: <|code_start|> def do_generate(self, event):
exclude_list = ["fn", ]
matches = []
message = event.data
for name, l in self.bot.message_listeners.items():
search_matches = l["regex"].search(message.content)
if (
# The search regex matches and
search_matches
# It's not from me, or this search includes me, and
and (
message.will_said_it is False
or ("include_me" in l and l["include_me"])
)
# I'm mentioned, or this is an overheard, or we're in a 1-1
and (
message.is_private_chat
or ("direct_mentions_only" not in l or not l["direct_mentions_only"])
or message.is_direct
)
):
context = Bunch()
for k, v in l.items():
if k not in exclude_list:
context[k] = v
context.search_matches = search_matches.groupdict()
<|code_end|>
. Write the next line using the current file imports:
import re
from will import settings
from will.decorators import require_settings
from will.utils import Bunch
from .base import GenerationBackend, GeneratedOption
and context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/decorators.py
# def require_settings(*setting_names):
# def wrap(f):
# def wrapped_f(*args, **kwargs):
# f(*args, **kwargs)
# wrapped_f.will_fn_metadata = getattr(f, "will_fn_metadata", {})
# wrapped_f.will_fn_metadata["required_settings"] = setting_names
# return wrapped_f
# return wrap
#
# Path: will/utils.py
# class Bunch(dict):
# def __init__(self, **kw):
# dict.__init__(self, kw)
# self.__dict__ = self
#
# def __getstate__(self):
# return self
#
# def __setstate__(self, state):
# self.update(state)
# self.__dict__ = self
#
# Path: will/backends/generation/base.py
# class GenerationBackend(PubSubMixin, SleepMixin, object):
# is_will_generationbackend = True
#
# def __watch_pubsub(self):
# while True:
# try:
# m = self.pubsub.get_message()
# if m:
# self.__generate(m.data)
# except (KeyboardInterrupt, SystemExit):
# pass
# self.sleep_for_event_loop()
#
# def __generate(self, message):
# ret = self.do_generate(message)
# try:
# self.pubsub.publish("generation.complete", ret, reference_message=message)
# except (KeyboardInterrupt, SystemExit):
# pass
# except:
# logging.critical("Error publishing generation.complete: \n%s" % traceback.format_exc())
#
# def do_generate(self, message):
# # Take message, return a list of possible responses/matches
# raise NotImplementedError
#
# def start(self, name, **kwargs):
# for k, v in kwargs.items():
# self.__dict__[k] = v
#
# self.name = name
# self.bootstrap_pubsub()
# self.subscribe("generation.start")
# self.__watch_pubsub()
#
# class GeneratedOption(object):
#
# def __init__(self, *args, **kwargs):
# for o in OPTION_REQUIRED_FIELDS:
# if o not in kwargs:
# raise Exception("Missing '%s' argument to the generator backend." % o)
#
# for k, v in kwargs.items():
# self.__dict__[k] = v
#
# return super(GeneratedOption, self).__init__()
#
# def __unicode__(self):
# return "%s - %s" % (self.score, self.context)
#
# def __str__(self):
# return "%s - %s" % (self.score, self.context)
, which may include functions, classes, or code. Output only the next line. | o = GeneratedOption(context=context, backend="regex", score=100) |
Given the following code snippet before the placeholder: <|code_start|>@pytest.fixture()
def message():
"""Mimic message abstraction"""
def _message(fields):
required_fields = {
"is_direct": False,
"is_private_chat": False,
"is_group_chat": True,
"will_is_mentioned": False,
"will_said_it": False,
"sender": "TBD",
"backend_supports_acl": True,
"content": "TBD",
"backend": "TBD",
"original_incoming_event": "TBD"
}
required_fields.update(fields)
return Message(**required_fields)
return _message
@pytest.fixture()
def event():
"""Mimic event abstraction"""
def _event(fields):
required_fields = {"type": "TBD", "version": 1}
required_fields.update(fields)
<|code_end|>
, predict the next line using imports from the current file:
import datetime
import pytest
from will.abstractions import Event, Message, Person
and context including class names, function names, and sometimes code from other files:
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# class Message(object):
# will_internal_type = "Message"
# REQUIRED_FIELDS = [
# "is_direct",
# "is_private_chat",
# "is_group_chat",
# "will_is_mentioned",
# "will_said_it",
# "sender",
# "backend_supports_acl",
# "content",
# "backend",
# "original_incoming_event",
# ]
#
# def __init__(self, *args, **kwargs):
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs:
# raise Exception("Missing %s in Message construction." % f)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# # Clean content.
# self.content = self._clean_message_content(self.content)
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.content.encode("utf-8"))
# self.hash = h.hexdigest()
#
# self.metadata = Bunch()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# def __unicode__(self, *args, **kwargs):
# if len(self.content) > 20:
# content_str = "%s..." % self.content[:20]
# else:
# content_str = self.content
# return u"Message: \"%s\"\n %s (%s) " % (
# content_str,
# self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
# self.backend,
# )
#
# def __str__(self, *args, **kwargs):
# return self.__unicode__(*args, **kwargs)
#
# def _clean_message_content(self, s):
# # Clear out 'smart' quotes and the like.
# s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
# s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
# s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
# return s
#
# class Person(Bunch):
# will_is_person = True
# will_internal_type = "Person"
# REQUIRED_FIELDS = [
# "id",
# "handle",
# "mention_handle",
# "source",
# "name",
# "first_name"
# # "timezone",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Person, self).__init__(*args, **kwargs)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# # Provide first_name
# if "first_name" not in kwargs:
# self.first_name = self.name.split(" ")[0]
#
# for f in self.REQUIRED_FIELDS:
# if not hasattr(self, f):
# raise Exception("Missing %s in Person construction." % f)
#
# # Set TZ offset.
# if hasattr(self, "timezone") and self.timezone:
# self.timezone = pytz_timezone(self.timezone)
# self.utc_offset = self.timezone._utcoffset
# else:
# self.timezone = False
# self.utc_offset = False
#
# @property
# def nick(self):
# logging.warn("sender.nick is deprecated and will be removed eventually. Please use sender.handle instead!")
# return self.handle
. Output only the next line. | return Event(**required_fields) |
Predict the next line for this snippet: <|code_start|> "source": "TBD",
"handle": "TBD",
"name": "TBD",
"first_name": "TDB"
}
required_fields.update(fields)
return Person(**required_fields)
return _person
@pytest.fixture()
def message():
"""Mimic message abstraction"""
def _message(fields):
required_fields = {
"is_direct": False,
"is_private_chat": False,
"is_group_chat": True,
"will_is_mentioned": False,
"will_said_it": False,
"sender": "TBD",
"backend_supports_acl": True,
"content": "TBD",
"backend": "TBD",
"original_incoming_event": "TBD"
}
required_fields.update(fields)
<|code_end|>
with the help of current file imports:
import datetime
import pytest
from will.abstractions import Event, Message, Person
and context from other files:
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# class Message(object):
# will_internal_type = "Message"
# REQUIRED_FIELDS = [
# "is_direct",
# "is_private_chat",
# "is_group_chat",
# "will_is_mentioned",
# "will_said_it",
# "sender",
# "backend_supports_acl",
# "content",
# "backend",
# "original_incoming_event",
# ]
#
# def __init__(self, *args, **kwargs):
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs:
# raise Exception("Missing %s in Message construction." % f)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# # Clean content.
# self.content = self._clean_message_content(self.content)
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.content.encode("utf-8"))
# self.hash = h.hexdigest()
#
# self.metadata = Bunch()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# def __unicode__(self, *args, **kwargs):
# if len(self.content) > 20:
# content_str = "%s..." % self.content[:20]
# else:
# content_str = self.content
# return u"Message: \"%s\"\n %s (%s) " % (
# content_str,
# self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
# self.backend,
# )
#
# def __str__(self, *args, **kwargs):
# return self.__unicode__(*args, **kwargs)
#
# def _clean_message_content(self, s):
# # Clear out 'smart' quotes and the like.
# s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
# s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
# s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
# return s
#
# class Person(Bunch):
# will_is_person = True
# will_internal_type = "Person"
# REQUIRED_FIELDS = [
# "id",
# "handle",
# "mention_handle",
# "source",
# "name",
# "first_name"
# # "timezone",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Person, self).__init__(*args, **kwargs)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# # Provide first_name
# if "first_name" not in kwargs:
# self.first_name = self.name.split(" ")[0]
#
# for f in self.REQUIRED_FIELDS:
# if not hasattr(self, f):
# raise Exception("Missing %s in Person construction." % f)
#
# # Set TZ offset.
# if hasattr(self, "timezone") and self.timezone:
# self.timezone = pytz_timezone(self.timezone)
# self.utc_offset = self.timezone._utcoffset
# else:
# self.timezone = False
# self.utc_offset = False
#
# @property
# def nick(self):
# logging.warn("sender.nick is deprecated and will be removed eventually. Please use sender.handle instead!")
# return self.handle
, which may contain function names, class names, or code. Output only the next line. | return Message(**required_fields) |
Based on the snippet: <|code_start|>
@pytest.fixture(params=IO_BACKENDS)
def io_backend(request):
"""Parametrized fixture of available io backends"""
return request.param
@pytest.fixture(params=ANALYZE_BACKENDS)
def analysis(request):
"""Parametrized fixture of available analysis backends"""
return request.param
@pytest.fixture()
def person():
"""Mimic person abstraction"""
def _person(fields):
required_fields = {
"id": "TBD",
"mention_handle": "TBD",
"source": "TBD",
"handle": "TBD",
"name": "TBD",
"first_name": "TDB"
}
required_fields.update(fields)
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
import pytest
from will.abstractions import Event, Message, Person
and context (classes, functions, sometimes code) from other files:
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# class Message(object):
# will_internal_type = "Message"
# REQUIRED_FIELDS = [
# "is_direct",
# "is_private_chat",
# "is_group_chat",
# "will_is_mentioned",
# "will_said_it",
# "sender",
# "backend_supports_acl",
# "content",
# "backend",
# "original_incoming_event",
# ]
#
# def __init__(self, *args, **kwargs):
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs:
# raise Exception("Missing %s in Message construction." % f)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# # Clean content.
# self.content = self._clean_message_content(self.content)
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.content.encode("utf-8"))
# self.hash = h.hexdigest()
#
# self.metadata = Bunch()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# def __unicode__(self, *args, **kwargs):
# if len(self.content) > 20:
# content_str = "%s..." % self.content[:20]
# else:
# content_str = self.content
# return u"Message: \"%s\"\n %s (%s) " % (
# content_str,
# self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
# self.backend,
# )
#
# def __str__(self, *args, **kwargs):
# return self.__unicode__(*args, **kwargs)
#
# def _clean_message_content(self, s):
# # Clear out 'smart' quotes and the like.
# s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
# s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
# s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
# return s
#
# class Person(Bunch):
# will_is_person = True
# will_internal_type = "Person"
# REQUIRED_FIELDS = [
# "id",
# "handle",
# "mention_handle",
# "source",
# "name",
# "first_name"
# # "timezone",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Person, self).__init__(*args, **kwargs)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# # Provide first_name
# if "first_name" not in kwargs:
# self.first_name = self.name.split(" ")[0]
#
# for f in self.REQUIRED_FIELDS:
# if not hasattr(self, f):
# raise Exception("Missing %s in Person construction." % f)
#
# # Set TZ offset.
# if hasattr(self, "timezone") and self.timezone:
# self.timezone = pytz_timezone(self.timezone)
# self.utc_offset = self.timezone._utcoffset
# else:
# self.timezone = False
# self.utc_offset = False
#
# @property
# def nick(self):
# logging.warn("sender.nick is deprecated and will be removed eventually. Please use sender.handle instead!")
# return self.handle
. Output only the next line. | return Person(**required_fields) |
Given the following code snippet before the placeholder: <|code_start|> elif os.path.exists(expire_path):
os.unlink(expire_path)
def clear(self, key):
key_path, expire_path = self._key_paths(key)
if os.path.exists(key_path):
os.unlink(key_path)
if os.path.exists(expire_path):
os.unlink(expire_path)
def clear_all_keys(self):
for filename in self._all_setting_files():
os.unlink(filename)
def do_load(self, key):
key_path, expire_path = self._key_paths(key)
if os.path.exists(expire_path):
with open(expire_path, 'r') as f:
expire_at = f.read()
if time.time() > int(expire_at):
# the current value has expired
self.clear(key)
return
if os.path.exists(key_path):
with open(key_path, 'r') as f:
return f.read()
def size(self):
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
import time
from will.utils import sizeof_fmt
from .base import BaseStorageBackend
and context including class names, function names, and sometimes code from other files:
# Path: will/utils.py
# def sizeof_fmt(num, suffix='B'):
# # http://stackoverflow.com/a/1094933
# for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
# if abs(num) < 1024.0:
# return "%3.1f%s%s" % (num, unit, suffix)
# num /= 1024.0
# return "%.1f%s%s" % (num, 'Yi', suffix)
#
# Path: will/backends/storage/base.py
# class BaseStorageBackend(PrivateBaseStorageBackend):
# """
# The base storage backend. All storage backends must supply the following methods:
# __init__() - sets up the connection
# do_save() - saves a single value to a key
# do_load() - gets a value from the backend
# clear() - deletes a key
# clear_all_keys() - clears the db
# """
#
# def do_save(self, key, value, expire=None):
# raise NotImplementedError
#
# def do_load(self, key):
# raise NotImplementedError
#
# def clear(self, key):
# raise NotImplementedError
#
# def clear_all_keys(self):
# raise NotImplementedError
. Output only the next line. | return sizeof_fmt(sum([ |
Next line prediction: <|code_start|>
class FileStorageException():
"""
A condition that should not occur happened in the FileStorage module
"""
pass
<|code_end|>
. Use current file imports:
(import logging
import os
import time
from will.utils import sizeof_fmt
from .base import BaseStorageBackend)
and context including class names, function names, or small code snippets from other files:
# Path: will/utils.py
# def sizeof_fmt(num, suffix='B'):
# # http://stackoverflow.com/a/1094933
# for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
# if abs(num) < 1024.0:
# return "%3.1f%s%s" % (num, unit, suffix)
# num /= 1024.0
# return "%.1f%s%s" % (num, 'Yi', suffix)
#
# Path: will/backends/storage/base.py
# class BaseStorageBackend(PrivateBaseStorageBackend):
# """
# The base storage backend. All storage backends must supply the following methods:
# __init__() - sets up the connection
# do_save() - saves a single value to a key
# do_load() - gets a value from the backend
# clear() - deletes a key
# clear_all_keys() - clears the db
# """
#
# def do_save(self, key, value, expire=None):
# raise NotImplementedError
#
# def do_load(self, key):
# raise NotImplementedError
#
# def clear(self, key):
# raise NotImplementedError
#
# def clear_all_keys(self):
# raise NotImplementedError
. Output only the next line. | class FileStorage(BaseStorageBackend): |
Given snippet: <|code_start|>
class SettingsMixin(object):
required_settings = []
def verify_setting_exists(self, setting_name, message=None):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from clint.textui import colored, puts, indent
from will import settings
from will.utils import show_valid, warn, error
from will import settings
and context:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
which might include code, classes, or functions. Output only the next line. | if not hasattr(settings, setting_name): |
Here is a snippet: <|code_start|>
def verify_setting_exists(self, setting_name, message=None):
if not hasattr(settings, setting_name):
self.say("%s not set." % setting_name, message=message)
return False
return True
def verify_settings(self, quiet=False):
passed = True
for s in self.required_settings:
if not hasattr(settings, s["name"]):
meta = s
if hasattr(self, "friendly_name"):
meta["friendly_name"] = self.friendly_name
else:
meta["friendly_name"] = self.__class__.__name__
if not quiet:
with indent(2):
error("%(name)s is missing. It's required by the %(friendly_name)s backend." % meta)
with indent(2):
error_message = (
"To obtain a %(name)s: \n%(obtain_at)s"
) % meta
puts(error_message)
passed = False
# raise Exception(error_message)
else:
if not quiet:
with indent(2):
<|code_end|>
. Write the next line using the current file imports:
from clint.textui import colored, puts, indent
from will import settings
from will.utils import show_valid, warn, error
from will import settings
and context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
, which may include functions, classes, or code. Output only the next line. | show_valid(s["name"]) |
Predict the next line after this snippet: <|code_start|>
class SettingsMixin(object):
required_settings = []
def verify_setting_exists(self, setting_name, message=None):
if not hasattr(settings, setting_name):
self.say("%s not set." % setting_name, message=message)
return False
return True
def verify_settings(self, quiet=False):
passed = True
for s in self.required_settings:
if not hasattr(settings, s["name"]):
meta = s
if hasattr(self, "friendly_name"):
meta["friendly_name"] = self.friendly_name
else:
meta["friendly_name"] = self.__class__.__name__
if not quiet:
with indent(2):
<|code_end|>
using the current file's imports:
from clint.textui import colored, puts, indent
from will import settings
from will.utils import show_valid, warn, error
from will import settings
and any relevant context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
. Output only the next line. | error("%(name)s is missing. It's required by the %(friendly_name)s backend." % meta) |
Predict the next line after this snippet: <|code_start|>
def __init__(self, *args, **kwargs):
if "bot" in kwargs:
self.bot = kwargs["bot"]
del kwargs["bot"]
if "message" in kwargs:
self.message = kwargs["message"]
del kwargs["message"]
super().__init__(*args, **kwargs)
@staticmethod
def _prepared_content(content, _, __):
content = re.sub(r'>\s+<', '><', content)
return content
@staticmethod
def _trim_for_execution(message):
# Trim it down
if hasattr(message, "analysis"):
message.analysis = None
if hasattr(message, "source_message") and hasattr(message.source_message, "analysis"):
message.source_message.analysis = None
return message
@staticmethod
def get_backend(message: Message, service: str = None):
'Select the correct backend (module path)'
backend = False
if service:
<|code_end|>
using the current file's imports:
import re
import logging
from bottle import request
from will import settings
from will.abstractions import Event, Message
from will.mixins import NaturalTimeMixin, ScheduleMixin, StorageMixin, SettingsMixin, \
EmailMixin, PubSubMixin
and any relevant context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# class Message(object):
# will_internal_type = "Message"
# REQUIRED_FIELDS = [
# "is_direct",
# "is_private_chat",
# "is_group_chat",
# "will_is_mentioned",
# "will_said_it",
# "sender",
# "backend_supports_acl",
# "content",
# "backend",
# "original_incoming_event",
# ]
#
# def __init__(self, *args, **kwargs):
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs:
# raise Exception("Missing %s in Message construction." % f)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# # Clean content.
# self.content = self._clean_message_content(self.content)
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.content.encode("utf-8"))
# self.hash = h.hexdigest()
#
# self.metadata = Bunch()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# def __unicode__(self, *args, **kwargs):
# if len(self.content) > 20:
# content_str = "%s..." % self.content[:20]
# else:
# content_str = self.content
# return u"Message: \"%s\"\n %s (%s) " % (
# content_str,
# self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
# self.backend,
# )
#
# def __str__(self, *args, **kwargs):
# return self.__unicode__(*args, **kwargs)
#
# def _clean_message_content(self, s):
# # Clear out 'smart' quotes and the like.
# s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
# s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
# s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
# return s
. Output only the next line. | for b in settings.IO_BACKENDS: # pylint: disable=no-member |
Given the code snippet: <|code_start|>
if hasattr(message, "backend"):
backend = message.backend
elif message and hasattr(message, "data") and hasattr(message.data, "backend"):
backend = message.data.backend
else:
backend = settings.DEFAULT_BACKEND # pylint: disable=no-member
return backend
def get_message(self, message_passed):
'Try to find a message'
if not message_passed and hasattr(self, "message"):
return self.message
return message_passed
def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):
'Publish an event to be shipped to one of the IO backends.'
if channel:
room = channel
elif room:
channel = room
if "channel" not in kwargs and channel:
kwargs["channel"] = channel
message = self.get_message(message)
message = self._trim_for_execution(message)
backend = self.get_backend(message, service=service)
if backend:
<|code_end|>
, generate the next line using the imports in this file:
import re
import logging
from bottle import request
from will import settings
from will.abstractions import Event, Message
from will.mixins import NaturalTimeMixin, ScheduleMixin, StorageMixin, SettingsMixin, \
EmailMixin, PubSubMixin
and context (functions, classes, or occasionally code) from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# class Message(object):
# will_internal_type = "Message"
# REQUIRED_FIELDS = [
# "is_direct",
# "is_private_chat",
# "is_group_chat",
# "will_is_mentioned",
# "will_said_it",
# "sender",
# "backend_supports_acl",
# "content",
# "backend",
# "original_incoming_event",
# ]
#
# def __init__(self, *args, **kwargs):
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs:
# raise Exception("Missing %s in Message construction." % f)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# # Clean content.
# self.content = self._clean_message_content(self.content)
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.content.encode("utf-8"))
# self.hash = h.hexdigest()
#
# self.metadata = Bunch()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# def __unicode__(self, *args, **kwargs):
# if len(self.content) > 20:
# content_str = "%s..." % self.content[:20]
# else:
# content_str = self.content
# return u"Message: \"%s\"\n %s (%s) " % (
# content_str,
# self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
# self.backend,
# )
#
# def __str__(self, *args, **kwargs):
# return self.__unicode__(*args, **kwargs)
#
# def _clean_message_content(self, s):
# # Clear out 'smart' quotes and the like.
# s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
# s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
# s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
# return s
. Output only the next line. | e = Event( |
Predict the next line after this snippet: <|code_start|> ScheduleMixin, SettingsMixin, PubSubMixin):
'Basic things needed by all plugins'
is_will_plugin = True
request = request
def __init__(self, *args, **kwargs):
if "bot" in kwargs:
self.bot = kwargs["bot"]
del kwargs["bot"]
if "message" in kwargs:
self.message = kwargs["message"]
del kwargs["message"]
super().__init__(*args, **kwargs)
@staticmethod
def _prepared_content(content, _, __):
content = re.sub(r'>\s+<', '><', content)
return content
@staticmethod
def _trim_for_execution(message):
# Trim it down
if hasattr(message, "analysis"):
message.analysis = None
if hasattr(message, "source_message") and hasattr(message.source_message, "analysis"):
message.source_message.analysis = None
return message
@staticmethod
<|code_end|>
using the current file's imports:
import re
import logging
from bottle import request
from will import settings
from will.abstractions import Event, Message
from will.mixins import NaturalTimeMixin, ScheduleMixin, StorageMixin, SettingsMixin, \
EmailMixin, PubSubMixin
and any relevant context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# class Message(object):
# will_internal_type = "Message"
# REQUIRED_FIELDS = [
# "is_direct",
# "is_private_chat",
# "is_group_chat",
# "will_is_mentioned",
# "will_said_it",
# "sender",
# "backend_supports_acl",
# "content",
# "backend",
# "original_incoming_event",
# ]
#
# def __init__(self, *args, **kwargs):
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs:
# raise Exception("Missing %s in Message construction." % f)
#
# for f in kwargs:
# self.__dict__[f] = kwargs[f]
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# # Clean content.
# self.content = self._clean_message_content(self.content)
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.content.encode("utf-8"))
# self.hash = h.hexdigest()
#
# self.metadata = Bunch()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# def __unicode__(self, *args, **kwargs):
# if len(self.content) > 20:
# content_str = "%s..." % self.content[:20]
# else:
# content_str = self.content
# return u"Message: \"%s\"\n %s (%s) " % (
# content_str,
# self.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
# self.backend,
# )
#
# def __str__(self, *args, **kwargs):
# return self.__unicode__(*args, **kwargs)
#
# def _clean_message_content(self, s):
# # Clear out 'smart' quotes and the like.
# s = s.replace("’", "'").replace("‘", "'").replace('“', '"').replace('”', '"')
# s = s.replace(u"\u2018", "'").replace(u"\u2019", "'")
# s = s.replace(u"\u201c", '"').replace(u"\u201d", '"')
# return s
. Output only the next line. | def get_backend(message: Message, service: str = None): |
Continue the code snippet: <|code_start|>'Encrypt stored data'
# pylint: disable=no-member
BS = 16
key = hashlib.sha256(settings.SECRET_KEY.encode("utf-8")).digest()
def pad(s: bytes) -> str:
'''Ensure the data to be encrypted has sufficient padding.
Arbitrarily adding ~ to the end, so your message better not end with ~.'''
return "%s%s" % (s.decode("utf-8"), ((BS - len(s) % BS) * "~"))
def unpad(s: bytes) -> bytes:
'Removes all ~ on the end of the message.'
return s.rstrip(b'~')
<|code_end|>
. Use current file imports:
import binascii
import hashlib
import logging
import os
import dill as pickle
from Crypto.Cipher import AES
from will import settings
from will.backends.encryption.base import WillBaseEncryptionBackend
and context (classes, functions, or code) from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/backends/encryption/base.py
# class WillBaseEncryptionBackend(object):
#
# def __init__(self, *args, **kwargs):
# pass
#
# @staticmethod
# def encrypt_to_b64(raw):
# raise NotImplementedError
#
# @staticmethod
# def decrypt_from_b64(enc):
# raise NotImplementedError
. Output only the next line. | class AESEncryption(WillBaseEncryptionBackend): |
Here is a snippet: <|code_start|>
class HistoryAnalysis(AnalysisBackend, StorageMixin):
def do_analyze(self, message):
# Load the last few messages, add it to the context under "history"
history = self.load("message_history", [])
if not history:
history = []
<|code_end|>
. Write the next line using the current file imports:
import requests
from will import settings
from will.mixins import StorageMixin
from will.decorators import require_settings
from .base import AnalysisBackend
and context from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/decorators.py
# def require_settings(*setting_names):
# def wrap(f):
# def wrapped_f(*args, **kwargs):
# f(*args, **kwargs)
# wrapped_f.will_fn_metadata = getattr(f, "will_fn_metadata", {})
# wrapped_f.will_fn_metadata["required_settings"] = setting_names
# return wrapped_f
# return wrap
#
# Path: will/backends/analysis/base.py
# class AnalysisBackend(PubSubMixin, SleepMixin, object):
# is_will_analysisbackend = True
#
# def __watch_pubsub(self):
# while True:
# try:
# m = self.pubsub.get_message()
# if m:
# self.__analyze(m)
#
# except AttributeError:
# pass
# except (KeyboardInterrupt, SystemExit):
# pass
# self.sleep_for_event_loop()
#
# def __analyze(self, data):
# try:
# self.pubsub.publish(
# "analysis.complete",
# self.do_analyze(data),
# reference_message=data
# )
# except (KeyboardInterrupt, SystemExit):
# pass
# except:
# logging.critical("Error completing analysis: \n%s" % traceback.format_exc())
#
# def do_analyze(self, message):
# # Take message, return a dict to add to its context.
# raise NotImplementedError
#
# def start(self, name, **kwargs):
# signal.signal(signal.SIGINT, signal.SIG_IGN)
# for k, v in kwargs.items():
# self.__dict__[k] = v
#
# self.name = name
# self.bootstrap_pubsub()
# self.subscribe("analysis.start")
# self.__watch_pubsub()
, which may include functions, classes, or code. Output only the next line. | max_history_context = getattr(settings, "HISTORY_CONTEXT_LENGTH", 20) |
Predict the next line after this snippet: <|code_start|> return source
def __disable_service(service_name, source):
return source.replace('"will.backends.io_adapters.%s"' % cleaned(service_name),
'"# will.backends.io_adapters.%s"' % cleaned(service_name))
def enable_disable_service(service_name, source):
if ask_user(" Do you want to enable %s support?" % (service_name)):
return _enable_service(service_name, source)
else:
return __disable_service(service_name, source)
def main():
"""
Creates the following structure:
/plugins
__init__.py
hello.py
/templates
blank.html
.gitignore
run_will.py
requirements.txt
Procfile
README.md
"""
<|code_end|>
using the current file's imports:
import argparse
import os
import stat
import sys
from six.moves import input
from clint.textui import puts
from will.utils import print_head
and any relevant context from other files:
# Path: will/utils.py
# def print_head():
# puts(r"""
# ___/-\___
# ___|_________|___
# | |
# |--O---O--|
# | |
# | |
# | \___/ |
# |_________|
#
# Will: Hi!
# """)
. Output only the next line. | print_head() |
Predict the next line after this snippet: <|code_start|>
SKIP_TYPES = ["psubscribe", "punsubscribe", ]
class PubSubPrivateBase(SettingsMixin, EncryptionMixin):
"""
The private bits of the base pubsub backend.
"""
def __init__(self, *args, **kwargs):
self.recent_hashes = []
def publish(self, topic, obj, reference_message=None):
"""
Sends an object out over the pubsub connection, properly formatted,
and conforming to the protocol. Handles pickling for the wire, etc.
This method should *not* be subclassed.
"""
logging.debug("Publishing topic (%s): \n%s" % (topic, obj))
<|code_end|>
using the current file's imports:
import logging
import os
import redis
import traceback
from six.moves.urllib.parse import urlparse
from will.abstractions import Event
from will import settings
from will.mixins import SettingsMixin, EncryptionMixin
and any relevant context from other files:
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
. Output only the next line. | e = Event( |
Given the following code snippet before the placeholder: <|code_start|> if hasattr(obj, "sender"):
e.sender = obj.sender
if reference_message:
original_incoming_event_hash = None
if hasattr(reference_message, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.original_incoming_event_hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "hash"):
original_incoming_event_hash = reference_message.source.hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.source.original_incoming_event_hash
elif hasattr(reference_message, "hash"):
original_incoming_event_hash = reference_message.hash
if original_incoming_event_hash:
e.original_incoming_event_hash = original_incoming_event_hash
return self.publish_to_backend(
self._localize_topic(topic),
self.encrypt(e)
)
def unsubscribe(self, topic):
# This is mostly here for semantic consistency.
self.do_unsubscribe(topic)
def _localize_topic(self, topic):
cleaned_topic = topic
if type(topic) == type([]):
cleaned_topic = []
for t in topic:
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
import redis
import traceback
from six.moves.urllib.parse import urlparse
from will.abstractions import Event
from will import settings
from will.mixins import SettingsMixin, EncryptionMixin
and context including class names, function names, and sometimes code from other files:
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
#
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
. Output only the next line. | if not t.startswith(settings.SECRET_KEY): |
Given the following code snippet before the placeholder: <|code_start|> def handle_execution(self, message, context):
raise NotImplementedError
def no_response(self, message):
self.bot.pubsub.publish(
"message.no_response",
message.data,
reference_message=message.data.original_incoming_event
)
def not_allowed(self, message, explanation):
self.bot.pubsub.publish(
"message.outgoing.%s" % message.data.backend,
Event(
type="reply",
content=explanation,
source_message=message,
),
reference_message=message.data.original_incoming_event
)
def execute(self, message, option):
if "acl" in option.context:
acl = option.context["acl"]
if type(acl) == type("test"):
acl = [acl]
allowed = True
if len(acl) > 0:
<|code_end|>
, predict the next line using imports from the current file:
import imp
import logging
import signal
import traceback
from will import settings
from will.decorators import require_settings
from will.acl import verify_acl
from will.abstractions import Event
from multiprocessing import Process
and context including class names, function names, and sometimes code from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/decorators.py
# def require_settings(*setting_names):
# def wrap(f):
# def wrapped_f(*args, **kwargs):
# f(*args, **kwargs)
# wrapped_f.will_fn_metadata = getattr(f, "will_fn_metadata", {})
# wrapped_f.will_fn_metadata["required_settings"] = setting_names
# return wrapped_f
# return wrap
#
# Path: will/acl.py
# def verify_acl(message, acl):
# try:
# if settings.DISABLE_ACL:
# return True
#
# allowed = is_acl_allowed(message.sender.id, acl)
# if allowed:
# return True
# if hasattr(message, "data") and hasattr(message.data, "backend_supports_acl"):
# if not message.data.backend_supports_acl:
# logging.warning(
# "%s was just allowed to perform actions in %s because the backend does not support ACL. This can be a security risk." % (
# message.sender.handle,
# acl,
# ) + "To fix this, set ACL groups in your config.py, or set DISABLE_ACL = True"
# )
# return True
# except:
# pass
#
# return False
#
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
. Output only the next line. | allowed = verify_acl(message, acl) |
Based on the snippet: <|code_start|>
class ExecutionBackend(object):
is_will_execution_backend = True
def handle_execution(self, message, context):
raise NotImplementedError
def no_response(self, message):
self.bot.pubsub.publish(
"message.no_response",
message.data,
reference_message=message.data.original_incoming_event
)
def not_allowed(self, message, explanation):
self.bot.pubsub.publish(
"message.outgoing.%s" % message.data.backend,
<|code_end|>
, predict the immediate next line with the help of imports:
import imp
import logging
import signal
import traceback
from will import settings
from will.decorators import require_settings
from will.acl import verify_acl
from will.abstractions import Event
from multiprocessing import Process
and context (classes, functions, sometimes code) from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
#
# Path: will/decorators.py
# def require_settings(*setting_names):
# def wrap(f):
# def wrapped_f(*args, **kwargs):
# f(*args, **kwargs)
# wrapped_f.will_fn_metadata = getattr(f, "will_fn_metadata", {})
# wrapped_f.will_fn_metadata["required_settings"] = setting_names
# return wrapped_f
# return wrap
#
# Path: will/acl.py
# def verify_acl(message, acl):
# try:
# if settings.DISABLE_ACL:
# return True
#
# allowed = is_acl_allowed(message.sender.id, acl)
# if allowed:
# return True
# if hasattr(message, "data") and hasattr(message.data, "backend_supports_acl"):
# if not message.data.backend_supports_acl:
# logging.warning(
# "%s was just allowed to perform actions in %s because the backend does not support ACL. This can be a security risk." % (
# message.sender.handle,
# acl,
# ) + "To fix this, set ACL groups in your config.py, or set DISABLE_ACL = True"
# )
# return True
# except:
# pass
#
# return False
#
# Path: will/abstractions.py
# class Event(Bunch):
# will_internal_type = "Event"
#
# REQUIRED_FIELDS = [
# "type",
# "version",
# ]
#
# def __init__(self, *args, **kwargs):
# super(Event, self).__init__(*args, **kwargs)
# self.version = 1
#
# for f in self.REQUIRED_FIELDS:
# if not f in kwargs and not hasattr(self, f):
# raise Exception("Missing %s in Event construction." % f)
#
# if "timestamp" in kwargs:
# self.timestamp = kwargs["timestamp"]
# else:
# self.timestamp = datetime.datetime.now()
#
# h = hashlib.md5()
# h.update(self.timestamp.strftime("%s").encode("utf-8"))
# h.update(self.type.encode("utf-8"))
# self.hash = h.hexdigest()
# if not "original_incoming_event_hash" in kwargs:
# if hasattr(self, "original_incoming_event") and hasattr(self.original_incoming_event, "hash"):
# self.original_incoming_event_hash = self.original_incoming_event.hash
# else:
# self.original_incoming_event_hash = self.hash
. Output only the next line. | Event( |
Next line prediction: <|code_start|>
class PubSubMixin(object):
def bootstrap_pubsub(self):
if not hasattr(self, "pubsub"):
if hasattr(self, "bot") and hasattr(self.bot, "pubsub"):
self.pubsub = self.bot.pubsub
else:
# The PUBSUB_BACKEND setting points to a specific module namespace
# redis => will.pubsub.redis_backend
# zeromq => will.pubsub.zeromq_backend
# etc...
module_name = ''.join([
'will.backends.pubsub.',
<|code_end|>
. Use current file imports:
(import importlib
import logging
import dill as pickle
import functools
from will import settings)
and context including class names, function names, or small code snippets from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
. Output only the next line. | getattr(settings, 'PUBSUB_BACKEND', 'redis'), |
Predict the next line for this snippet: <|code_start|> with indent(2):
try:
had_warning = False
try:
except ImportError:
# Missing config.py. Check for config.py.dist
if os.path.isfile("config.py.dist"):
confirm = input(
"Hi, looks like you're just starting up!\nI didn't find a config.py, but I do see config.py.dist here. Want me to use that? (y/n) "
).lower()
if confirm in ["y", "yes"]:
print("Great! One moment.\n\n")
os.rename("config.py.dist", "config.py")
else:
print("Ok. I can't start without one though. Quitting now!")
sys.exit(1)
else:
error("I'm missing my config.py file. Usually one comes with the installation - maybe it got lost?")
sys.exit(1)
for k, v in config.__dict__.items():
# Ignore private variables
if "__" not in k:
if k in os.environ and v != os.environ[k] and not quiet:
warn("%s is set in the environment as '%s', but overridden in"
" config.py as '%s'." % (k, os.environ[k], v))
had_warning = True
settings[k] = v
if not had_warning and not quiet:
<|code_end|>
with the help of current file imports:
import os
import sys
import uuid
import time
import random
import hashlib
import config
import config
from will.utils import show_valid, warn, note, error
from clint.textui import puts, indent
from six.moves.urllib import parse
from six.moves import input
and context from other files:
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def note(warn_string):
# puts(colored.cyan("- Note: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
, which may contain function names, class names, or code. Output only the next line. | show_valid("Valid.") |
Next line prediction: <|code_start|> else:
settings["HIPCHAT_SERVER"] = "api.hipchat.com"
# Import from config
if not quiet:
puts("Importing config.py... ")
with indent(2):
try:
had_warning = False
try:
except ImportError:
# Missing config.py. Check for config.py.dist
if os.path.isfile("config.py.dist"):
confirm = input(
"Hi, looks like you're just starting up!\nI didn't find a config.py, but I do see config.py.dist here. Want me to use that? (y/n) "
).lower()
if confirm in ["y", "yes"]:
print("Great! One moment.\n\n")
os.rename("config.py.dist", "config.py")
else:
print("Ok. I can't start without one though. Quitting now!")
sys.exit(1)
else:
error("I'm missing my config.py file. Usually one comes with the installation - maybe it got lost?")
sys.exit(1)
for k, v in config.__dict__.items():
# Ignore private variables
if "__" not in k:
if k in os.environ and v != os.environ[k] and not quiet:
<|code_end|>
. Use current file imports:
(import os
import sys
import uuid
import time
import random
import hashlib
import config
import config
from will.utils import show_valid, warn, note, error
from clint.textui import puts, indent
from six.moves.urllib import parse
from six.moves import input)
and context including class names, function names, or small code snippets from other files:
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def note(warn_string):
# puts(colored.cyan("- Note: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
. Output only the next line. | warn("%s is set in the environment as '%s', but overridden in" |
Given the code snippet: <|code_start|> # Migrate from 1.x
if "CHAT_BACKENDS" in settings and "IO_BACKENDS" not in settings:
IO_BACKENDS = []
for c in settings["CHAT_BACKENDS"]:
IO_BACKENDS.append("will.backends.io_adapters.%s" % c)
settings["IO_BACKENDS"] = IO_BACKENDS
if not quiet:
warn(
"Deprecated settings. Please update your config.py from:"
"\n CHAT_BACKENDS = %s\n to\n IO_BACKENDS = %s" %
(settings["CHAT_BACKENDS"], IO_BACKENDS)
)
if "CHAT_BACKENDS" not in settings and "IO_BACKENDS" not in settings:
if not quiet:
warn("""Deprecated settings. No backend found, so we're defaulting to hipchat and shell only.
Please add this to your config.py:
IO_BACKENDS = "
"will.backends.io_adapters.hipchat",
"will.backends.io_adapters.shell",
# "will.backends.io_adapters.slack",
# "will.backends.io_adapters.rocketchat",
]
""")
settings["IO_BACKENDS"] = [
"will.backends.io_adapters.hipchat",
"will.backends.io_adapters.shell",
]
if "ANALYZE_BACKENDS" not in settings:
if not quiet:
<|code_end|>
, generate the next line using the imports in this file:
import os
import sys
import uuid
import time
import random
import hashlib
import config
import config
from will.utils import show_valid, warn, note, error
from clint.textui import puts, indent
from six.moves.urllib import parse
from six.moves import input
and context (functions, classes, or occasionally code) from other files:
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def note(warn_string):
# puts(colored.cyan("- Note: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
. Output only the next line. | note("No ANALYZE_BACKENDS specified. Defaulting to history only.") |
Predict the next line for this snippet: <|code_start|>
# If HIPCHAT_SERVER is set, we need to change the USERNAME slightly
# for XMPP to work.
if "HIPCHAT_SERVER" in settings:
settings["USERNAME"] = "{user}@{host}".\
format(user=settings["USERNAME"].split("@")[0],
host=settings["HIPCHAT_SERVER"])
else:
settings["HIPCHAT_SERVER"] = "api.hipchat.com"
# Import from config
if not quiet:
puts("Importing config.py... ")
with indent(2):
try:
had_warning = False
try:
except ImportError:
# Missing config.py. Check for config.py.dist
if os.path.isfile("config.py.dist"):
confirm = input(
"Hi, looks like you're just starting up!\nI didn't find a config.py, but I do see config.py.dist here. Want me to use that? (y/n) "
).lower()
if confirm in ["y", "yes"]:
print("Great! One moment.\n\n")
os.rename("config.py.dist", "config.py")
else:
print("Ok. I can't start without one though. Quitting now!")
sys.exit(1)
else:
<|code_end|>
with the help of current file imports:
import os
import sys
import uuid
import time
import random
import hashlib
import config
import config
from will.utils import show_valid, warn, note, error
from clint.textui import puts, indent
from six.moves.urllib import parse
from six.moves import input
and context from other files:
# Path: will/utils.py
# def show_valid(valid_str):
# puts(colored.green(u"✓ %s" % valid_str))
#
# def warn(warn_string):
# puts(colored.yellow("! Warning: %s" % warn_string))
#
# def note(warn_string):
# puts(colored.cyan("- Note: %s" % warn_string))
#
# def error(err_string):
# puts(colored.red("ERROR: %s" % err_string))
, which may contain function names, class names, or code. Output only the next line. | error("I'm missing my config.py file. Usually one comes with the installation - maybe it got lost?") |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
def get_acl_members(acl):
acl_members = []
acl = acl.lower()
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
from will import settings
and context (classes, functions, sometimes code) from other files:
# Path: will/settings.py
# def auto_key():
# def import_settings(quiet=True):
# DEPRECATED_BUT_MAPPED_SETTINGS = {
# "USERNAME": "HIPCHAT_USERNAME",
# "PASSWORD": "HIPCHAT_PASSWORD",
# "V1_TOKEN": "HIPCHAT_V1_TOKEN",
# "V2_TOKEN": "HIPCHAT_V2_TOKEN",
# "TOKEN": "HIPCHAT_V1_TOKEN",
# "ROOMS": "HIPCHAT_ROOMS",
# "NAME": "HIPCHAT_NAME",
# "HANDLE": "HIPCHAT_HANDLE",
# "DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
# "SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
# }
# IO_BACKENDS = []
. Output only the next line. | if getattr(settings, "ACL", None): |
Predict the next line for this snippet: <|code_start|> "will_is_mentioned",
"will_said_it",
"sender",
"backend_supports_acl",
"content",
"backend",
"original_incoming_event",
]
def __init__(self, *args, **kwargs):
for f in self.REQUIRED_FIELDS:
if not f in kwargs:
raise Exception("Missing %s in Message construction." % f)
for f in kwargs:
self.__dict__[f] = kwargs[f]
if "timestamp" in kwargs:
self.timestamp = kwargs["timestamp"]
else:
self.timestamp = datetime.datetime.now()
# Clean content.
self.content = self._clean_message_content(self.content)
h = hashlib.md5()
h.update(self.timestamp.strftime("%s").encode("utf-8"))
h.update(self.content.encode("utf-8"))
self.hash = h.hexdigest()
<|code_end|>
with the help of current file imports:
import datetime
import hashlib
import logging
from pytz import timezone as pytz_timezone
from will.utils import Bunch
and context from other files:
# Path: will/utils.py
# class Bunch(dict):
# def __init__(self, **kw):
# dict.__init__(self, kw)
# self.__dict__ = self
#
# def __getstate__(self):
# return self
#
# def __setstate__(self, state):
# self.update(state)
# self.__dict__ = self
, which may contain function names, class names, or code. Output only the next line. | self.metadata = Bunch() |
Continue the code snippet: <|code_start|> self.list_accelerator_types: gapic_v1.method.wrap_method(
self.list_accelerator_types,
default_timeout=None,
client_info=client_info,
),
self.get_accelerator_type: gapic_v1.method.wrap_method(
self.get_accelerator_type,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_nodes(
self,
) -> Callable[
<|code_end|>
. Use current file imports:
import abc
import pkg_resources
import google.auth # type: ignore
import google.api_core
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.tpu_v1.types import cloud_tpu
from google.longrunning import operations_pb2 # type: ignore
and context (classes, functions, or code) from other files:
# Path: google/cloud/tpu_v1/types/cloud_tpu.py
# class SchedulingConfig(proto.Message):
# class NetworkEndpoint(proto.Message):
# class Node(proto.Message):
# class State(proto.Enum):
# class Health(proto.Enum):
# class ApiVersion(proto.Enum):
# class ListNodesRequest(proto.Message):
# class ListNodesResponse(proto.Message):
# class GetNodeRequest(proto.Message):
# class CreateNodeRequest(proto.Message):
# class DeleteNodeRequest(proto.Message):
# class ReimageNodeRequest(proto.Message):
# class StopNodeRequest(proto.Message):
# class StartNodeRequest(proto.Message):
# class TensorFlowVersion(proto.Message):
# class GetTensorFlowVersionRequest(proto.Message):
# class ListTensorFlowVersionsRequest(proto.Message):
# class ListTensorFlowVersionsResponse(proto.Message):
# class AcceleratorType(proto.Message):
# class GetAcceleratorTypeRequest(proto.Message):
# class ListAcceleratorTypesRequest(proto.Message):
# class ListAcceleratorTypesResponse(proto.Message):
# class OperationMetadata(proto.Message):
# class Symptom(proto.Message):
# class SymptomType(proto.Enum):
# STATE_UNSPECIFIED = 0
# CREATING = 1
# READY = 2
# RESTARTING = 3
# REIMAGING = 4
# DELETING = 5
# REPAIRING = 6
# STOPPED = 8
# STOPPING = 9
# STARTING = 10
# PREEMPTED = 11
# TERMINATED = 12
# HIDING = 13
# HIDDEN = 14
# UNHIDING = 15
# HEALTH_UNSPECIFIED = 0
# HEALTHY = 1
# DEPRECATED_UNHEALTHY = 2
# TIMEOUT = 3
# UNHEALTHY_TENSORFLOW = 4
# UNHEALTHY_MAINTENANCE = 5
# API_VERSION_UNSPECIFIED = 0
# V1_ALPHA1 = 1
# V1 = 2
# V2_ALPHA1 = 3
# SYMPTOM_TYPE_UNSPECIFIED = 0
# LOW_MEMORY = 1
# OUT_OF_MEMORY = 2
# EXECUTE_TIMED_OUT = 3
# MESH_BUILD_FAIL = 4
# HBM_OUT_OF_MEMORY = 5
# PROJECT_ABUSE = 6
# def raw_page(self):
# def raw_page(self):
# def raw_page(self):
. Output only the next line. | [cloud_tpu.ListNodesRequest], |
Given the code snippet: <|code_start|> client_info=client_info,
),
self.get_runtime_version: gapic_v1.method.wrap_method(
self.get_runtime_version, default_timeout=None, client_info=client_info,
),
self.get_guest_attributes: gapic_v1.method.wrap_method(
self.get_guest_attributes,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_nodes(
self,
) -> Callable[
<|code_end|>
, generate the next line using the imports in this file:
import abc
import pkg_resources
import google.auth # type: ignore
import google.api_core
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.tpu_v2alpha1.types import cloud_tpu
from google.longrunning import operations_pb2 # type: ignore
and context (functions, classes, or occasionally code) from other files:
# Path: google/cloud/tpu_v2alpha1/types/cloud_tpu.py
# class GuestAttributes(proto.Message):
# class GuestAttributesValue(proto.Message):
# class GuestAttributesEntry(proto.Message):
# class AttachedDisk(proto.Message):
# class DiskMode(proto.Enum):
# class SchedulingConfig(proto.Message):
# class NetworkEndpoint(proto.Message):
# class AccessConfig(proto.Message):
# class NetworkConfig(proto.Message):
# class ServiceAccount(proto.Message):
# class Node(proto.Message):
# class State(proto.Enum):
# class Health(proto.Enum):
# class ApiVersion(proto.Enum):
# class ListNodesRequest(proto.Message):
# class ListNodesResponse(proto.Message):
# class GetNodeRequest(proto.Message):
# class CreateNodeRequest(proto.Message):
# class DeleteNodeRequest(proto.Message):
# class StopNodeRequest(proto.Message):
# class StartNodeRequest(proto.Message):
# class UpdateNodeRequest(proto.Message):
# class ServiceIdentity(proto.Message):
# class GenerateServiceIdentityRequest(proto.Message):
# class GenerateServiceIdentityResponse(proto.Message):
# class AcceleratorType(proto.Message):
# class GetAcceleratorTypeRequest(proto.Message):
# class ListAcceleratorTypesRequest(proto.Message):
# class ListAcceleratorTypesResponse(proto.Message):
# class OperationMetadata(proto.Message):
# class RuntimeVersion(proto.Message):
# class GetRuntimeVersionRequest(proto.Message):
# class ListRuntimeVersionsRequest(proto.Message):
# class ListRuntimeVersionsResponse(proto.Message):
# class Symptom(proto.Message):
# class SymptomType(proto.Enum):
# class GetGuestAttributesRequest(proto.Message):
# class GetGuestAttributesResponse(proto.Message):
# DISK_MODE_UNSPECIFIED = 0
# READ_WRITE = 1
# READ_ONLY = 2
# STATE_UNSPECIFIED = 0
# CREATING = 1
# READY = 2
# RESTARTING = 3
# REIMAGING = 4
# DELETING = 5
# REPAIRING = 6
# STOPPED = 8
# STOPPING = 9
# STARTING = 10
# PREEMPTED = 11
# TERMINATED = 12
# HIDING = 13
# HIDDEN = 14
# UNHIDING = 15
# HEALTH_UNSPECIFIED = 0
# HEALTHY = 1
# TIMEOUT = 3
# UNHEALTHY_TENSORFLOW = 4
# UNHEALTHY_MAINTENANCE = 5
# API_VERSION_UNSPECIFIED = 0
# V1_ALPHA1 = 1
# V1 = 2
# V2_ALPHA1 = 3
# SYMPTOM_TYPE_UNSPECIFIED = 0
# LOW_MEMORY = 1
# OUT_OF_MEMORY = 2
# EXECUTE_TIMED_OUT = 3
# MESH_BUILD_FAIL = 4
# HBM_OUT_OF_MEMORY = 5
# PROJECT_ABUSE = 6
# def raw_page(self):
# def raw_page(self):
# def raw_page(self):
. Output only the next line. | [cloud_tpu.ListNodesRequest], |
Continue the code snippet: <|code_start|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class ListNodesPager:
"""A pager for iterating through ``list_nodes`` requests.
This class thinly wraps an initial
:class:`google.cloud.tpu_v2alpha1.types.ListNodesResponse` object, and
provides an ``__iter__`` method to iterate through its
``nodes`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListNodes`` requests and continue to iterate
through the ``nodes`` field on the
corresponding responses.
All the usual :class:`google.cloud.tpu_v2alpha1.types.ListNodesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
<|code_end|>
. Use current file imports:
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.tpu_v2alpha1.types import cloud_tpu
and context (classes, functions, or code) from other files:
# Path: google/cloud/tpu_v2alpha1/types/cloud_tpu.py
# class GuestAttributes(proto.Message):
# class GuestAttributesValue(proto.Message):
# class GuestAttributesEntry(proto.Message):
# class AttachedDisk(proto.Message):
# class DiskMode(proto.Enum):
# class SchedulingConfig(proto.Message):
# class NetworkEndpoint(proto.Message):
# class AccessConfig(proto.Message):
# class NetworkConfig(proto.Message):
# class ServiceAccount(proto.Message):
# class Node(proto.Message):
# class State(proto.Enum):
# class Health(proto.Enum):
# class ApiVersion(proto.Enum):
# class ListNodesRequest(proto.Message):
# class ListNodesResponse(proto.Message):
# class GetNodeRequest(proto.Message):
# class CreateNodeRequest(proto.Message):
# class DeleteNodeRequest(proto.Message):
# class StopNodeRequest(proto.Message):
# class StartNodeRequest(proto.Message):
# class UpdateNodeRequest(proto.Message):
# class ServiceIdentity(proto.Message):
# class GenerateServiceIdentityRequest(proto.Message):
# class GenerateServiceIdentityResponse(proto.Message):
# class AcceleratorType(proto.Message):
# class GetAcceleratorTypeRequest(proto.Message):
# class ListAcceleratorTypesRequest(proto.Message):
# class ListAcceleratorTypesResponse(proto.Message):
# class OperationMetadata(proto.Message):
# class RuntimeVersion(proto.Message):
# class GetRuntimeVersionRequest(proto.Message):
# class ListRuntimeVersionsRequest(proto.Message):
# class ListRuntimeVersionsResponse(proto.Message):
# class Symptom(proto.Message):
# class SymptomType(proto.Enum):
# class GetGuestAttributesRequest(proto.Message):
# class GetGuestAttributesResponse(proto.Message):
# DISK_MODE_UNSPECIFIED = 0
# READ_WRITE = 1
# READ_ONLY = 2
# STATE_UNSPECIFIED = 0
# CREATING = 1
# READY = 2
# RESTARTING = 3
# REIMAGING = 4
# DELETING = 5
# REPAIRING = 6
# STOPPED = 8
# STOPPING = 9
# STARTING = 10
# PREEMPTED = 11
# TERMINATED = 12
# HIDING = 13
# HIDDEN = 14
# UNHIDING = 15
# HEALTH_UNSPECIFIED = 0
# HEALTHY = 1
# TIMEOUT = 3
# UNHEALTHY_TENSORFLOW = 4
# UNHEALTHY_MAINTENANCE = 5
# API_VERSION_UNSPECIFIED = 0
# V1_ALPHA1 = 1
# V1 = 2
# V2_ALPHA1 = 3
# SYMPTOM_TYPE_UNSPECIFIED = 0
# LOW_MEMORY = 1
# OUT_OF_MEMORY = 2
# EXECUTE_TIMED_OUT = 3
# MESH_BUILD_FAIL = 4
# HBM_OUT_OF_MEMORY = 5
# PROJECT_ABUSE = 6
# def raw_page(self):
# def raw_page(self):
# def raw_page(self):
. Output only the next line. | method: Callable[..., cloud_tpu.ListNodesResponse], |
Continue the code snippet: <|code_start|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class ListNodesPager:
"""A pager for iterating through ``list_nodes`` requests.
This class thinly wraps an initial
:class:`google.cloud.tpu_v1.types.ListNodesResponse` object, and
provides an ``__iter__`` method to iterate through its
``nodes`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListNodes`` requests and continue to iterate
through the ``nodes`` field on the
corresponding responses.
All the usual :class:`google.cloud.tpu_v1.types.ListNodesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
<|code_end|>
. Use current file imports:
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.tpu_v1.types import cloud_tpu
and context (classes, functions, or code) from other files:
# Path: google/cloud/tpu_v1/types/cloud_tpu.py
# class SchedulingConfig(proto.Message):
# class NetworkEndpoint(proto.Message):
# class Node(proto.Message):
# class State(proto.Enum):
# class Health(proto.Enum):
# class ApiVersion(proto.Enum):
# class ListNodesRequest(proto.Message):
# class ListNodesResponse(proto.Message):
# class GetNodeRequest(proto.Message):
# class CreateNodeRequest(proto.Message):
# class DeleteNodeRequest(proto.Message):
# class ReimageNodeRequest(proto.Message):
# class StopNodeRequest(proto.Message):
# class StartNodeRequest(proto.Message):
# class TensorFlowVersion(proto.Message):
# class GetTensorFlowVersionRequest(proto.Message):
# class ListTensorFlowVersionsRequest(proto.Message):
# class ListTensorFlowVersionsResponse(proto.Message):
# class AcceleratorType(proto.Message):
# class GetAcceleratorTypeRequest(proto.Message):
# class ListAcceleratorTypesRequest(proto.Message):
# class ListAcceleratorTypesResponse(proto.Message):
# class OperationMetadata(proto.Message):
# class Symptom(proto.Message):
# class SymptomType(proto.Enum):
# STATE_UNSPECIFIED = 0
# CREATING = 1
# READY = 2
# RESTARTING = 3
# REIMAGING = 4
# DELETING = 5
# REPAIRING = 6
# STOPPED = 8
# STOPPING = 9
# STARTING = 10
# PREEMPTED = 11
# TERMINATED = 12
# HIDING = 13
# HIDDEN = 14
# UNHIDING = 15
# HEALTH_UNSPECIFIED = 0
# HEALTHY = 1
# DEPRECATED_UNHEALTHY = 2
# TIMEOUT = 3
# UNHEALTHY_TENSORFLOW = 4
# UNHEALTHY_MAINTENANCE = 5
# API_VERSION_UNSPECIFIED = 0
# V1_ALPHA1 = 1
# V1 = 2
# V2_ALPHA1 = 3
# SYMPTOM_TYPE_UNSPECIFIED = 0
# LOW_MEMORY = 1
# OUT_OF_MEMORY = 2
# EXECUTE_TIMED_OUT = 3
# MESH_BUILD_FAIL = 4
# HBM_OUT_OF_MEMORY = 5
# PROJECT_ABUSE = 6
# def raw_page(self):
# def raw_page(self):
# def raw_page(self):
. Output only the next line. | method: Callable[..., cloud_tpu.ListNodesResponse], |
Given snippet: <|code_start|>
def get_version(fname):
"grab __version__ variable from fname (assuming fname is a python file). parses without importing."
assign_stmts = [s for s in ast.parse(open(fname).read()).body if isinstance(s,ast.Assign)]
valid_targets = [s for s in assign_stmts if len(s.targets) == 1 and s.targets[0].id == '__version__']
return valid_targets[-1].value.s # fail if valid_targets empty
setuptools.setup(
name='pg13',
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import ast,os,setuptools
from pg13 import version
and context:
# Path: pg13/version.py
which might include code, classes, or functions. Output only the next line. | version=version.__version__,
|
Given the code snippet: <|code_start|>
@pytest.mark.xfail
def test_getterby(): raise NotImplementedError
def test_methonce():
class C:
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from pg13 import misc
and context (functions, classes, or occasionally code) from other files:
# Path: pg13/misc.py
# def utcnow():
# def tbframes(traceback):
# def tbfuncs(frames):
# def trace():
# def key_from_pair(pair):
# def multimap(kv_pairs):
# def meth_once(func):
# def wrapper(self, *args, **kwargs):
# class CallOnceError(Exception):
. Output only the next line. | @misc.meth_once |
Here is a snippet: <|code_start|> linkingAgentIdentifierRole.text = 'implementer'
linkingAgentIdentifierType.text = i[0]
linkingAgentIdentifierValue.text = i[1]
def main():
print 'This is not a standalone script. It is a library of functions that other scripts can use'
sys.exit()
def setup_xml(source_file):
premisxml = os.path.dirname(os.path.dirname(source_file)) + '/metadata' '/' + os.path.basename(os.path.dirname(os.path.dirname(source_file))) + '_premis.xml'
namespace = '<premis:premis xmlns:premis="http://www.loc.gov/premis/v3" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/premis/v3 https://www.loc.gov/standards/premis/premis.xsd" version="3.0"></premis:premis>'
premis_namespace = "http://www.loc.gov/premis/v3"
xsi_namespace = "http://www.w3.org/2001/XMLSchema-instance"
print premisxml
if os.path.isfile(premisxml):
print 'looks like premis already exists?'
parser = ET.XMLParser(remove_blank_text=True)
doc = ET.parse(premisxml,parser=parser)
premis = doc.getroot()
else:
premis = ET.fromstring(namespace)
doc = ET.ElementTree(premis)
return premisxml, premis_namespace, doc, premis
def representation_uuid_csv(filmographic, source_accession, uuid):
uuid_csv = os.path.expanduser('~/Desktop/uuid.csv')
if not os.path.isfile(uuid_csv):
create_csv(uuid_csv, ('reference number','source accession number' 'uuid'))
<|code_end|>
. Write the next line using the current file imports:
import lxml.etree as ET
import lxml.builder as builder
import uuid
import time
import sys
import subprocess
import os
import hashlib
import csv
from glob import glob
from collections import OrderedDict
from ififuncs import append_csv
from ififuncs import create_csv
and context from other files:
# Path: ififuncs.py
# def append_csv(csv_file, *args):
# f = open(csv_file, 'a', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
#
# Path: ififuncs.py
# def create_csv(csv_file, *args):
# f = open(csv_file, 'w', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
, which may include functions, classes, or code. Output only the next line. | append_csv(uuid_csv, (filmographic, source_accession, uuid) ) |
Here is a snippet: <|code_start|> linkingAgentIdentifierRole = create_unit(2,linkingAgentIdentifier,'linkingAgentRole')
linkingAgentIdentifierRole.text = 'implementer'
linkingAgentIdentifierType.text = i[0]
linkingAgentIdentifierValue.text = i[1]
def main():
print 'This is not a standalone script. It is a library of functions that other scripts can use'
sys.exit()
def setup_xml(source_file):
premisxml = os.path.dirname(os.path.dirname(source_file)) + '/metadata' '/' + os.path.basename(os.path.dirname(os.path.dirname(source_file))) + '_premis.xml'
namespace = '<premis:premis xmlns:premis="http://www.loc.gov/premis/v3" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/premis/v3 https://www.loc.gov/standards/premis/premis.xsd" version="3.0"></premis:premis>'
premis_namespace = "http://www.loc.gov/premis/v3"
xsi_namespace = "http://www.w3.org/2001/XMLSchema-instance"
print premisxml
if os.path.isfile(premisxml):
print 'looks like premis already exists?'
parser = ET.XMLParser(remove_blank_text=True)
doc = ET.parse(premisxml,parser=parser)
premis = doc.getroot()
else:
premis = ET.fromstring(namespace)
doc = ET.ElementTree(premis)
return premisxml, premis_namespace, doc, premis
def representation_uuid_csv(filmographic, source_accession, uuid):
uuid_csv = os.path.expanduser('~/Desktop/uuid.csv')
if not os.path.isfile(uuid_csv):
<|code_end|>
. Write the next line using the current file imports:
import lxml.etree as ET
import lxml.builder as builder
import uuid
import time
import sys
import subprocess
import os
import hashlib
import csv
from glob import glob
from collections import OrderedDict
from ififuncs import append_csv
from ififuncs import create_csv
and context from other files:
# Path: ififuncs.py
# def append_csv(csv_file, *args):
# f = open(csv_file, 'a', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
#
# Path: ififuncs.py
# def create_csv(csv_file, *args):
# f = open(csv_file, 'w', newline='')
# try:
# writer = csv.writer(f)
# writer.writerow(*args)
# finally:
# f.close()
, which may include functions, classes, or code. Output only the next line. | create_csv(uuid_csv, ('reference number','source accession number' 'uuid')) |
Using the snippet: <|code_start|>#!/usr/bin/env python
'''
Generates sidecar MD5 or SHA512 checksum manifest.
'''
def remove_bad_files(root_dir, log_name_source):
'''
Removes unwanted files.
Verify if this is different than the same function in ififuncs.
'''
rm_these = ['.DS_Store', 'Thumbs.db', 'desktop.ini']
for root, _, files in os.walk(root_dir):
for name in files:
path = os.path.join(root, name)
for i in rm_these:
if name == i:
print(('***********************' + 'removing: ' + path))
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context (class names, function names, or code) available:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | generate_log( |
Using the snippet: <|code_start|> generate_log(
log_name_source,
'EVENT = Generating manifest: status=started, eventType=message digest calculation, module=%s, agent=OSX' % module
)
elif sys.platform == "linux2":
generate_log(
log_name_source,
'EVENT = Generating manifest: status=started, eventType=message digest calculation, module=%s, agent=Linux' % module
)
ififuncs.generate_log(
log_name_source,
'eventDetail=manifest.py %s' % ififuncs.get_script_version('manifest.py'))
generate_log(log_name_source, 'Source: %s' % source)
if os.path.isfile(source):
print('\nFile checksum is not currently supported, only directories.\n')
generate_log(log_name_source, 'Error: Attempted to generate manifest for file. Only Directories/Folders are currently supported')
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
elif not os.path.isdir(source):
print((' %s is either not a directory or it does not exist' % source))
generate_log(log_name_source, ' %s is either not a directory or it does not exist' % source)
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
remove_bad_files(source, log_name_source)
source_count = 0
for _, _, filenames in os.walk(source):
# There has to be a better way to count the files..
for _ in filenames:
source_count += 1 #works in windows at least
if os.path.isfile(manifest):
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context (class names, function names, or code) available:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | count_in_manifest = manifest_file_count(manifest) |
Given the following code snippet before the placeholder: <|code_start|> if os.path.isfile(source):
print('\nFile checksum is not currently supported, only directories.\n')
generate_log(log_name_source, 'Error: Attempted to generate manifest for file. Only Directories/Folders are currently supported')
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
elif not os.path.isdir(source):
print((' %s is either not a directory or it does not exist' % source))
generate_log(log_name_source, ' %s is either not a directory or it does not exist' % source)
generate_log(log_name_source, 'manifest.py exit')
sys.exit()
remove_bad_files(source, log_name_source)
source_count = 0
for _, _, filenames in os.walk(source):
# There has to be a better way to count the files..
for _ in filenames:
source_count += 1 #works in windows at least
if os.path.isfile(manifest):
count_in_manifest = manifest_file_count(manifest)
if source_count != count_in_manifest:
print('This manifest may be outdated as the number of files in your directory does not match the number of files in the manifest')
generate_log(log_name_source, 'EVENT = Existing source manifest check - Failure - The number of files in the source directory is not equal to the number of files in the source manifest ')
sys.exit()
if not os.path.isfile(manifest):
try:
print('Generating source manifest')
generate_log(log_name_source, 'EVENT = Generating source manifest')
if args.f:
if args.sha512:
ififuncs.sha512_manifest(source, manifest, source)
else:
<|code_end|>
, predict the next line using imports from the current file:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context including class names, function names, and sometimes code from other files:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | hashlib_manifest(source, manifest, source) |
Predict the next line for this snippet: <|code_start|> action='store_true',
help='Generates sha512 checksums instead of md5'
)
args = parser.parse_args(args_)
source = args.source
source_parent_dir = os.path.dirname(source)
normpath = os.path.normpath(source)
relative_path = normpath.split(os.sep)[-1]
log_name_source_ = os.path.basename(
args.source
) + time.strftime("_%Y_%m_%dT%H_%M_%S")
if args.s:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source_parent_dir + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
elif args.f:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
else:
if args.sha512:
manifest_ = manifest_ = '/%s_manifest-sha512.txt' % relative_path
else:
manifest_ = '/%s_manifest.md5' % relative_path
desktop_manifest_dir = make_desktop_manifest_dir()
manifest = "%s/%s" % (desktop_manifest_dir, manifest_)
<|code_end|>
with the help of current file imports:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context from other files:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
, which may contain function names, class names, or code. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Given the following code snippet before the placeholder: <|code_start|> parser.add_argument(
'-sha512',
action='store_true',
help='Generates sha512 checksums instead of md5'
)
args = parser.parse_args(args_)
source = args.source
source_parent_dir = os.path.dirname(source)
normpath = os.path.normpath(source)
relative_path = normpath.split(os.sep)[-1]
log_name_source_ = os.path.basename(
args.source
) + time.strftime("_%Y_%m_%dT%H_%M_%S")
if args.s:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source_parent_dir + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
elif args.f:
if args.sha512:
manifest = source_parent_dir + '/%s_manifest-sha512.txt' % relative_path
else:
manifest = source + '/%s_manifest.md5' % relative_path
log_name_source = source_parent_dir + '/%s.log' % log_name_source_
else:
if args.sha512:
manifest_ = manifest_ = '/%s_manifest-sha512.txt' % relative_path
else:
manifest_ = '/%s_manifest.md5' % relative_path
<|code_end|>
, predict the next line using imports from the current file:
import sys
import os
import argparse
import time
import shutil
import ififuncs
from ififuncs import generate_log
from ififuncs import manifest_file_count
from ififuncs import hashlib_manifest
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir
and context including class names, function names, and sometimes code from other files:
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
#
# Path: ififuncs.py
# def manifest_file_count(manifest2check):
# '''
# Checks how many entries are in a manifest
# '''
# if os.path.isfile(manifest2check):
# print(' - A manifest already exists')
# with open(manifest2check, "r") as fo:
# manifest_lines = [line.split(',') for line in fo.readlines()]
# count_in_manifest = len(manifest_lines)
# return count_in_manifest
#
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
. Output only the next line. | desktop_manifest_dir = make_desktop_manifest_dir() |
Continue the code snippet: <|code_start|> logs_dir = os.path.join(sip_dir, 'logs')
logfile = os.path.join(logs_dir, logname)
if os.path.isfile(logfile):
with open(log, 'r') as fo:
validate_log = fo.readlines()
with open(logfile, 'a') as ba:
for lines in validate_log:
ba.write(lines)
for possible_manifest in possible_manifests:
if os.path.isfile(possible_manifest):
with open(possible_manifest, 'r') as manifesto:
manifest_lines = manifesto.readlines()
for lines in manifest_lines:
if logname in lines:
if 'manifest-sha512.txt' in possible_manifest:
lines = lines[:127].replace(lines[:127], ififuncs.hashlib_sha512(logfile)) + lines[128:]
elif '_manifest.md5' in possible_manifest:
lines = lines[:31].replace(lines[:31], ififuncs.hashlib_md5(logfile)) + lines[32:]
updated_manifest.append(lines)
with open(possible_manifest, 'w') as fo:
for lines in updated_manifest:
fo.write(lines)
updated_manifest = []
def main(args_):
'''
Launches all other functions when called from the command line.
'''
args = make_parser(args_)
<|code_end|>
. Use current file imports:
import sys
import os
import argparse
import time
import unicodedata
import ififuncs
from ififuncs import make_desktop_logs_dir
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Given snippet: <|code_start|>#!/usr/bin/env python
'''
This script will ask mediainfo to get all durations with a folder
'''
def main():
'''
Recursively search for AV files and print duration in seconds
'''
all_files = sys.argv[1:]
duration = 0
for parent_directory in all_files:
for root, dirnames, filenames in os.walk(parent_directory):
for filename in filenames:
if filename.endswith(('.MP4', '.mov', '.mkv')):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
from ififuncs import get_milliseconds
and context:
# Path: ififuncs.py
# def get_milliseconds(filename):
# '''
# Returns a float with the duration of a file in milliseconds.
# '''
# milliseconds = get_mediainfo(
# 'miliseconds',
# '--inform=General;%Duration%',
# filename
# )
# return float(milliseconds)
which might include code, classes, or functions. Output only the next line. | milliseconds = get_milliseconds( |
Given the following code snippet before the placeholder: <|code_start|> # make sure that the alternate log filename is more recent
if int(
os.path.basename(logs)[-12:-4].replace('_', '')) > int(os.path.basename(i)[-12:-4].replace('_', '')):
print(' - trying to analyze %s' % logs)
print(" - %-*s : %s" % (50, os.path.basename(logs)[:-24], analyze_log(os.path.join(desktop_logs_dir, logs))))
def main():
'''
Launches the other functions wihch attempt to run multiple copyit.py
instances if manifests and matching sidecar directories are found
inside of the input directory.
'''
args = parse_args()
all_files = find_manifest(args)
processed_dirs = []
log_names = []
print('\n\n - **** All of these folders will be copied to %s\n' % args.o)
for i in all_files:
absolute_path = os.path.join(args.o, os.path.basename(i))
if os.path.isdir(absolute_path):
print(' - %s already exists, skipping' % absolute_path)
else:
print(' - %s will be copied' % i)
time.sleep(2)
for i in all_files:
absolute_path = os.path.join(args.o, os.path.basename(i))
if os.path.isdir(absolute_path):
print(' - %s already exists, skipping' % absolute_path)
else:
<|code_end|>
, predict the next line using imports from the current file:
import os
import argparse
import time
import copyit
from ififuncs import make_desktop_logs_dir
and context including class names, function names, and sometimes code from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Continue the code snippet: <|code_start|> output = filename + "_h264.mov"
ffmpeg_args = [
'ffmpeg',
'-i', filename,
]
if args.logo:
ffmpeg_args.extend(['-i', args.logo])
ffmpeg_args += [
'-c:a', 'aac',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-crf', crf_value
]
if args.wide:
ffmpeg_args.append('-aspect')
ffmpeg_args.append('16:9')
if not args.map:
ffmpeg_args.append('-map')
ffmpeg_args.append('0:a?')
ffmpeg_args.append('-map')
ffmpeg_args.append('0:v')
if len(filter_list) > 0:
for _filter in filter_list:
ffmpeg_args.append(_filter)
ffmpeg_args.append(output)
print(ffmpeg_args)
subprocess.call(ffmpeg_args)
if args.md5:
manifest = '%s_manifest.md5' % filename
print('Generating md5 sidecar...')
<|code_end|>
. Use current file imports:
import argparse
import subprocess
import sys
import os
import ififuncs
from ififuncs import hashlib_md5
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def hashlib_md5(filename):
# '''
# uses hashlib to return an MD5 checksum of an input filename
# '''
# read_size = 0
# last_percent_done = 0
# m = hashlib.md5()
# total_size = os.path.getsize(filename)
# with open(str(filename), 'rb') as f:
# while True:
# buf = f.read(2**20)
# if not buf:
# break
# read_size += len(buf)
# m.update(buf)
# percent_done = 100 * read_size / total_size
# if percent_done > last_percent_done:
# sys.stdout.write('[%d%%]\r' % percent_done)
# sys.stdout.flush()
# last_percent_done = percent_done
# md5_output = m.hexdigest()
# return md5_output
. Output only the next line. | h264_md5 = hashlib_md5(filename) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
'''
This script will create a new UUID
via ififuncs.create_uuid and print to terminal
'''
def main():
'''
Prints a new UUID to the terminal
'''
<|code_end|>
. Use current file imports:
from ififuncs import create_uuid
and context (classes, functions, or code) from other files:
# Path: ififuncs.py
# def create_uuid():
# '''
# Returns a randonly generated UUID as a string
# '''
# new_uuid = str(uuid.uuid4())
# return new_uuid
. Output only the next line. | new_uuid = create_uuid() |
Given snippet: <|code_start|> if dirname == '':
rootpos = 'y'
'''
dirname = raw_input(
'What do you want your destination folder to be called?\n'
)
'''
relative_path = normpath.split(os.sep)[-1]
# or hardcode
destination_final_path = os.path.join(destination, dirname)
if rootpos == 'y':
manifest_destination = os.path.dirname(destination) + '/%s_manifest.md5' % os.path.basename(destination)
else:
manifest_destination = destination + '/%s_manifest.md5' % dirname
if os.path.isfile(manifest_destination):
print('Destination manifest already exists')
if rootpos == 'y':
manifest_filename = '%s_manifest.md5' % os.path.basename(destination)
else:
manifest_filename = '%s_manifest.md5' % dirname
desktop_manifest_dir = make_desktop_manifest_dir()
# manifest = desktop manifest, looks like this can get rewritten later.
manifest = os.path.join(
desktop_manifest_dir, manifest_filename
)
manifest_sidecar = os.path.join(
os.path.dirname(source), relative_path + '_manifest.md5'
)
manifest_root = source + '/%s_manifest.md5' % os.path.basename(source)
log_name_filename = dirname + time.strftime("_%Y_%m_%dT%H_%M_%S")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import subprocess
import os
import filecmp
import tempfile
import time
import argparse
import hashlib
import shutil
import unicodedata
import ififuncs
from builtins import input
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log
and context:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
#
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
which might include code, classes, or functions. Output only the next line. | desktop_logs_dir = make_desktop_logs_dir() |
Here is a snippet: <|code_start|> if os.path.isdir(dircheck):
source = check_for_sip(args.source)
destination = os.path.join(args.destination, os.path.basename(args.source))
os.makedirs(destination)
else:
source = os.path.abspath(args.source)
destination = args.destination
normpath = os.path.normpath(source)
#is there any benefit to this over os.path.basename
dirname = os.path.split(os.path.basename(source))[1]
if dirname == '':
rootpos = 'y'
'''
dirname = raw_input(
'What do you want your destination folder to be called?\n'
)
'''
relative_path = normpath.split(os.sep)[-1]
# or hardcode
destination_final_path = os.path.join(destination, dirname)
if rootpos == 'y':
manifest_destination = os.path.dirname(destination) + '/%s_manifest.md5' % os.path.basename(destination)
else:
manifest_destination = destination + '/%s_manifest.md5' % dirname
if os.path.isfile(manifest_destination):
print('Destination manifest already exists')
if rootpos == 'y':
manifest_filename = '%s_manifest.md5' % os.path.basename(destination)
else:
manifest_filename = '%s_manifest.md5' % dirname
<|code_end|>
. Write the next line using the current file imports:
import sys
import subprocess
import os
import filecmp
import tempfile
import time
import argparse
import hashlib
import shutil
import unicodedata
import ififuncs
from builtins import input
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log
and context from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
#
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
, which may include functions, classes, or code. Output only the next line. | desktop_manifest_dir = make_desktop_manifest_dir() |
Predict the next line after this snippet: <|code_start|> last_percent_done = 0
md5_object = hashlib.md5()
total_size = os.path.getsize(filename)
with open(str(filename), 'rb') as file_object:
while True:
buf = file_object.read(2**20)
if not buf:
break
read_size += len(buf)
md5_object.update(buf)
percent_done = 100 * read_size / total_size
if percent_done > last_percent_done:
sys.stdout.write('[%d%%]\r' % percent_done)
sys.stdout.flush()
last_percent_done = percent_done
md5_output = md5_object.hexdigest()
return md5_output + ' ' + os.path.abspath(filename) + '\n'
def test_write_capabilities(directory, log_name_source):
'''
Checks if drives have write access.
Also checks if source is a file or directory (no file support right now)
'''
if os.path.isdir(directory):
temp = tempfile.mkstemp(dir=directory, suffix='.tmp')
os.close(temp[0]) # Needed for windows.
os.remove(temp[1])
elif os.path.isfile(directory):
print('\nFile transfer is not currently supported, only directories.\n')
<|code_end|>
using the current file's imports:
import sys
import subprocess
import os
import filecmp
import tempfile
import time
import argparse
import hashlib
import shutil
import unicodedata
import ififuncs
from builtins import input
from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log
and any relevant context from other files:
# Path: ififuncs.py
# def make_desktop_logs_dir():
# desktop_logs_dir = os.path.expanduser("~/Desktop/ifiscripts_logs")
# if not os.path.isdir(desktop_logs_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_logs_dir)
# return desktop_logs_dir
#
# def make_desktop_manifest_dir():
# desktop_manifest_dir = os.path.expanduser("~/Desktop/moveit_manifests")
# if not os.path.isdir(desktop_manifest_dir):
# #I should probably ask permission here, or ask for alternative location
# os.makedirs(desktop_manifest_dir)
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# else:
# if not os.path.isdir(os.path.join(desktop_manifest_dir, 'old_manifests')):
# os.makedirs(os.path.join(desktop_manifest_dir, 'old_manifests'))
# return desktop_manifest_dir
#
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
. Output only the next line. | generate_log( |
Continue the code snippet: <|code_start|> objects_dir = os.path.join(sip_path, 'objects')
uuid = os.path.basename(sip_path)
old_basename, ext = os.path.splitext(item)
new_path = os.path.join(objects_dir, uuid + ext)
os.rename(os.path.join(objects_dir, os.path.basename(item)), new_path)
manifest = os.path.join(os.path.dirname(new_path), os.path.basename(item)) + '_manifest.md5'
updated_lines = []
ififuncs.generate_log(
log_name,
'EVENT = Filename change - eventDetail=original filename replaced with uuid, eventOutcomeDetailNote=%s replaced with %s, agentName=%s, agentName=sipcreator.py))' % (os.path.basename(item), uuid + ext, user))
with open(manifest, 'r') as file_object:
checksums = file_object.readlines()
for line in checksums:
if os.path.basename(item) in line:
line = line.replace(os.path.basename(item), os.path.basename(new_path))
updated_lines.append(line)
with open(manifest, 'w') as fo:
for lines in updated_lines:
fo.write(lines)
consolidate_logs(log_names, sip_path)
return log_names
def log_report(log_names):
'''
Analyzes all the moveit.py logs on the desktop and print a report.
'''
desktop_logs_dir = ififuncs.make_desktop_logs_dir()
for i in log_names:
if os.path.isfile(i):
<|code_end|>
. Use current file imports:
import os
import argparse
import sys
import shutil
import datetime
import time
import copyit
import ififuncs
import package_update
import accession
import manifest
import makezip
import accession
import dicttoxml
import clairmeta
from masscopy import analyze_log
from clairmeta.utils.xml import prettyprint_xml
from clairmeta import DCP
and context (classes, functions, or code) from other files:
# Path: masscopy.py
# def analyze_log(logfile):
# '''
# Analyzes logfiles on the desktop and summarises the outcome.
# '''
# outcome = ''
# with open(logfile, 'r') as fo:
# log_lines = fo.readlines()
# for line in log_lines:
# if 'EVENT = File Transfer Judgement - Success' in line:
# outcome = 'success'
# if 'EVENT = File Transfer Outcome - Failure' in line:
# outcome = 'failure'
# if 'EVENT = Existing source manifest check - Failure' in line:
# outcome = 'failure - might be outdated manifests in use'
# return outcome
. Output only the next line. | print(("%-*s : copyit job was a %s" % (50, os.path.basename(i)[:-24], analyze_log(i)))) |
Here is a snippet: <|code_start|> print('Exiting as you selected -dryrun')
sys.exit()
logs = []
if args.y:
proceed = 'Y'
else:
proceed = ififuncs.ask_yes_no(
'Do you want to proceed?'
)
if proceed == 'Y':
for sips in sorted(oe_dict):
print(oe_dict[sips])
sipcreator_cmd = ['-i',]
for sipcreator_inputs in oe_dict[sips][0]:
sipcreator_cmd.append(sipcreator_inputs)
sipcreator_cmd += ['-supplement']
for sipcreator_supplements in oe_dict[sips][1]:
sipcreator_cmd.append(sipcreator_supplements)
sipcreator_cmd += ['-user', user, '-oe', sips, '-o', args.o]
if args.rename_uuid:
sipcreator_cmd.append('-rename_uuid')
if args.zip:
sipcreator_cmd.append('-zip')
if args.l:
sipcreator_cmd.append('-l')
print(sipcreator_cmd)
sipcreator_log, _ = sipcreator.main(sipcreator_cmd)
logs.append(sipcreator_log)
for i in logs:
if os.path.isfile(i):
<|code_end|>
. Write the next line using the current file imports:
import argparse
import os
import sys
import ififuncs
import sipcreator
from masscopy import analyze_log
and context from other files:
# Path: masscopy.py
# def analyze_log(logfile):
# '''
# Analyzes logfiles on the desktop and summarises the outcome.
# '''
# outcome = ''
# with open(logfile, 'r') as fo:
# log_lines = fo.readlines()
# for line in log_lines:
# if 'EVENT = File Transfer Judgement - Success' in line:
# outcome = 'success'
# if 'EVENT = File Transfer Outcome - Failure' in line:
# outcome = 'failure'
# if 'EVENT = Existing source manifest check - Failure' in line:
# outcome = 'failure - might be outdated manifests in use'
# return outcome
, which may include functions, classes, or code. Output only the next line. | print(("%-*s : copyit job was a %s" % (50, os.path.basename(i), analyze_log(i)))) |
Here is a snippet: <|code_start|> 'Accepts a parent folder as input and will generate manifest for each subfolder.'
' Designed for a specific IFI Irish Film Archive workflow. '
'Written by Kieran O\'Leary.'
)
parser.add_argument(
'input', help='file path of parent directory'
)
parser.add_argument(
'-v', action='store_true',
help='verbose mode - some extra information such as overall file count.'
)
return parser
def create_manifest(source):
'''
Generates a master log and creates checksum manifests for all subdirectories.
'''
master_log = os.path.expanduser('~/Desktop/batchfixity_errors.log')
os.chdir(source)
for dirname in os.walk('.').next()[1]:
full_path = os.path.join(source, dirname)
manifest_textfile = '%s/%s_manifest.md5' % (full_path, dirname)
if not os.path.isfile(manifest_textfile):
log_name = '%s/%s_fixity.log' % (
os.path.dirname(full_path), dirname
)
generate_log(log_name, 'batchfixity started')
generate_log(log_name, '%s created' % manifest_textfile)
try:
<|code_end|>
. Write the next line using the current file imports:
import argparse
import os
import shutil
from ififuncs import hashlib_manifest
from ififuncs import generate_log
and context from other files:
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
, which may include functions, classes, or code. Output only the next line. | hashlib_manifest(full_path, manifest_textfile, full_path) |
Predict the next line after this snippet: <|code_start|> '''
parser = argparse.ArgumentParser(
description='Batch MD5 checksum generator.'
'Accepts a parent folder as input and will generate manifest for each subfolder.'
' Designed for a specific IFI Irish Film Archive workflow. '
'Written by Kieran O\'Leary.'
)
parser.add_argument(
'input', help='file path of parent directory'
)
parser.add_argument(
'-v', action='store_true',
help='verbose mode - some extra information such as overall file count.'
)
return parser
def create_manifest(source):
'''
Generates a master log and creates checksum manifests for all subdirectories.
'''
master_log = os.path.expanduser('~/Desktop/batchfixity_errors.log')
os.chdir(source)
for dirname in os.walk('.').next()[1]:
full_path = os.path.join(source, dirname)
manifest_textfile = '%s/%s_manifest.md5' % (full_path, dirname)
if not os.path.isfile(manifest_textfile):
log_name = '%s/%s_fixity.log' % (
os.path.dirname(full_path), dirname
)
<|code_end|>
using the current file's imports:
import argparse
import os
import shutil
from ififuncs import hashlib_manifest
from ififuncs import generate_log
and any relevant context from other files:
# Path: ififuncs.py
# def hashlib_manifest(manifest_dir, manifest_textfile, path_to_remove):
# '''
# Creates an MD5 manifest with relative filepaths.
# '''
# file_count = 0
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if not f[0] == '.']
# directories[:] = [d for d in directories if not d[0] == '.']
# for files in filenames:
# #print(" - Calculating number of files to process in current directory - %s files \r "% file_count)
# print("- Calculating number of files to process in current directory - {0} files ".format(file_count), end="\r")
# file_count += 1
# manifest_generator = ''
# md5_counter = 1
# for root, directories, filenames in os.walk(manifest_dir):
# filenames = [f for f in filenames if f[0] != '.']
# directories[:] = [d for d in directories if d[0] != '.']
# for files in filenames:
# print(' - Generating MD5 for %s - file %d of %d' % (os.path.join(root, files), md5_counter, file_count))
# md5 = hashlib_md5(os.path.join(root, files))
# md5_counter += 1
# root2 = os.path.abspath(root).replace(path_to_remove, '')
# try:
# if root2[0] == '/':
# root2 = root2[1:]
# if root2[0] == '\\':
# root2 = root2[1:]
# except: IndexError
# manifest_generator += md5[:32] + ' ' + os.path.join(root2, files).replace("\\", "/") + '\n'
# manifest_list = manifest_generator.splitlines()
# files_in_manifest = len(manifest_list)
# # http://stackoverflow.com/a/31306961/2188572
# manifest_list = sorted(manifest_list, key=lambda x: (x[34:]))
# with open(manifest_textfile, "w", encoding='utf-8') as fo:
# for i in manifest_list:
# fo.write((unicodedata.normalize('NFC', i) + '\n'))
#
# Path: ififuncs.py
# def generate_log(log, what2log):
# if not os.path.isfile(log):
# with open(log, "w", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
# else:
# with open(log, "a", encoding='utf-8') as fo:
# fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ")
# + getpass.getuser()
# + ' ' + what2log + ' \n')
. Output only the next line. | generate_log(log_name, 'batchfixity started') |
Next line prediction: <|code_start|> Remove a file or directory (including contents).
Ignore if it doesn't exist.
'''
try:
os.remove(path)
except OSError as e:
if e.errno == errno.ENOENT:
pass
elif e.errno in (errno.EISDIR,
# Windows gives EACCES when you try to unlink a directory,
# because ERROR_DIRECTORY_NOT_SUPPORTED ("An operation is
# not supported on a directory") might accidentally be useful.
errno.EACCES,
# OSX at some point stopped reporting EISDIR,
# and decided EPERM was more fun
errno.EPERM
):
rmtree(path)
else:
raise
try:
samefile = os.path.samefile
except AttributeError:
# Windows
def samefile(path1, path2):
return os.path.normcase(os.path.normpath(path1)) == \
os.path.normcase(os.path.normpath(path2))
<|code_end|>
. Use current file imports:
(import os
import errno
import logging
import shutil
import platform
from .log import getLogger
from .var import IS_WINDOWS)
and context including class names, function names, or small code snippets from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/var.py
# IS_WINDOWS = sys.platform.startswith('win')
. Output only the next line. | if IS_WINDOWS: |
Given snippet: <|code_start|> self.checksum = None
self.clobbers = False
self.runid = None
if file is None:
self.rules.append(NeverBuilt())
else:
version_line = file.readline().strip()
_log.trace("version_line: %s" % (version_line,))
if not version_line.startswith('version:'): raise ValueError("Invalid file")
_, file_version = version_line.split(' ')
if int(file_version) != self.FORMAT_VERSION:
raise VersionMismatch("can't read format version %s" % (file_version,))
while True:
line = file.readline()
if not line: break
dep = Dependency.parse(line.rstrip())
if isinstance(dep, Checksum):
assert self.checksum is None
self.checksum = dep.value
elif isinstance(dep, RunId):
assert self.runid is None
self.runid = dep
elif isinstance(dep, ClobbersTarget):
self.clobbers = True
else:
self.rules.append(dep)
def is_dirty(self, builder, build_dependency):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import logging
import errno
import hashlib
from .util import *
from .log import getLogger
from .gupfile import Builder
from .parallel import Lock
from .path import resolve_base
from .var import RUN_ID
from .error import SafeError
and context:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/gupfile.py
# class Builder(object):
# '''
# The canonical builder for a target.
# `path` is the path to the build script, even if this
# builder was obtained indirectly (via a Gupfile match)
# '''
# def __init__(self, script_path, target, basedir, parent):
# self.path = script_path
# self.realpath = os.path.realpath(self.path)
# self.target = target
# self.basedir = basedir
# self.target_path = os.path.join(self.basedir, self.target)
# self.parent = parent
#
# def __repr__(self):
# return "Builder(path=%r, target=%r, basedir=%r, parent=%r)" % (self.path, self.target, self.basedir, self.parent)
#
# @staticmethod
# def for_target(path):
# for candidate in possible_gup_files(path):
# builder = candidate.get_builder()
# if builder is not None:
# return builder
# return None
#
# Path: python/gup/parallel.py
# class Lock:
# def __init__(self, name):
# self.owned = False
# self.name = name
# self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0o666)
# _close_on_exec(self.lockfile, True)
# self.shared = fcntl.LOCK_SH
# self.exclusive = fcntl.LOCK_EX
#
# def __del__(self):
# if self.owned:
# self.unlock()
# os.close(self.lockfile)
#
# def read(self):
# return LockHelper(self, fcntl.LOCK_SH)
#
# def write(self):
# return LockHelper(self, fcntl.LOCK_EX)
#
# def trylock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# try:
# fcntl.lockf(self.lockfile, kind|fcntl.LOCK_NB, 0, 0)
# except IOError as e:
# if e.errno in (errno.EAGAIN, errno.EACCES):
# _log.trace("%s lock failed", self.name)
# pass # someone else has it locked
# else:
# raise
# else:
# _log.trace("%s lock (try)", self.name)
# self.owned = kind
#
# def waitlock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# _log.trace("%s lock (wait)", self.name)
# fcntl.lockf(self.lockfile, kind, 0, 0)
# self.owned = kind
#
# def unlock(self):
# if not self.owned:
# raise Exception("can't unlock %r - we don't own it" % self.name)
# fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
# _log.trace("%s unlock", self.name)
# self.owned = False
#
# Path: python/gup/path.py
# def resolve_base(p):
# return os.path.join(
# os.path.realpath(os.path.dirname(p)),
# os.path.basename(p)
# )
#
# Path: python/gup/var.py
# RUN_ID = os.environ['GUP_RUNID'] = str(int(time.time() * 1000))
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
which might include code, classes, or functions. Output only the next line. | assert isinstance(builder, Builder) |
Continue the code snippet: <|code_start|> def __init__(self, p):
self.path = p
def __repr__(self):
return 'TargetState(%r)' % (self.path,)
@staticmethod
def built_targets(dir):
'''
Returns the target names which have metadata stored in `dir`
'''
for f in os.listdir(dir):
_log.trace("checking file %s", f);
if f.startswith('deps.'):
target_name = f[5:]
_log.trace(".deps file: %s", target_name);
yield target_name
def meta_path(self, ext):
base, target = os.path.split(self.path)
meta_dir = os.path.join(base, META_DIR)
return os.path.join(meta_dir, "%s.%s" % (ext, target))
def _ensure_meta_path(self, ext):
p = self.meta_path(ext)
mkdirp(os.path.dirname(p))
return p
def _ensure_dep_lock(self):
if not self._dep_lock:
<|code_end|>
. Use current file imports:
import os
import logging
import errno
import hashlib
from .util import *
from .log import getLogger
from .gupfile import Builder
from .parallel import Lock
from .path import resolve_base
from .var import RUN_ID
from .error import SafeError
and context (classes, functions, or code) from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/gupfile.py
# class Builder(object):
# '''
# The canonical builder for a target.
# `path` is the path to the build script, even if this
# builder was obtained indirectly (via a Gupfile match)
# '''
# def __init__(self, script_path, target, basedir, parent):
# self.path = script_path
# self.realpath = os.path.realpath(self.path)
# self.target = target
# self.basedir = basedir
# self.target_path = os.path.join(self.basedir, self.target)
# self.parent = parent
#
# def __repr__(self):
# return "Builder(path=%r, target=%r, basedir=%r, parent=%r)" % (self.path, self.target, self.basedir, self.parent)
#
# @staticmethod
# def for_target(path):
# for candidate in possible_gup_files(path):
# builder = candidate.get_builder()
# if builder is not None:
# return builder
# return None
#
# Path: python/gup/parallel.py
# class Lock:
# def __init__(self, name):
# self.owned = False
# self.name = name
# self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0o666)
# _close_on_exec(self.lockfile, True)
# self.shared = fcntl.LOCK_SH
# self.exclusive = fcntl.LOCK_EX
#
# def __del__(self):
# if self.owned:
# self.unlock()
# os.close(self.lockfile)
#
# def read(self):
# return LockHelper(self, fcntl.LOCK_SH)
#
# def write(self):
# return LockHelper(self, fcntl.LOCK_EX)
#
# def trylock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# try:
# fcntl.lockf(self.lockfile, kind|fcntl.LOCK_NB, 0, 0)
# except IOError as e:
# if e.errno in (errno.EAGAIN, errno.EACCES):
# _log.trace("%s lock failed", self.name)
# pass # someone else has it locked
# else:
# raise
# else:
# _log.trace("%s lock (try)", self.name)
# self.owned = kind
#
# def waitlock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# _log.trace("%s lock (wait)", self.name)
# fcntl.lockf(self.lockfile, kind, 0, 0)
# self.owned = kind
#
# def unlock(self):
# if not self.owned:
# raise Exception("can't unlock %r - we don't own it" % self.name)
# fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
# _log.trace("%s unlock", self.name)
# self.owned = False
#
# Path: python/gup/path.py
# def resolve_base(p):
# return os.path.join(
# os.path.realpath(os.path.dirname(p)),
# os.path.basename(p)
# )
#
# Path: python/gup/var.py
# RUN_ID = os.environ['GUP_RUNID'] = str(int(time.time() * 1000))
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
. Output only the next line. | self._dep_lock = Lock(self._ensure_meta_path('deps-lock')) |
Predict the next line after this snippet: <|code_start|> return getattr(cls, 'deserialize', cls)(*fields)
def append_to(self, file):
line = self.tag + ' ' + ' '.join(self.fields)
assert "\n" not in line
file.write(line + "\n")
def __repr__(self):
return '%s(%s)' % (type(self).__name__, ', '.join(map(repr, self.fields)))
class NeverBuilt(object):
fields = []
def is_dirty(self, args):
_log.debug('DIRTY: never built')
return True
def append_to(self, file): pass
class AlwaysRebuild(Dependency):
tag = 'always:'
num_fields = 0
fields = []
def is_dirty(self, _):
_log.debug('DIRTY: always rebuild')
return True
class BaseFileDependency(Dependency):
num_fields = 3
@classmethod
def relative_to(cls, rel_root, mtime, path):
<|code_end|>
using the current file's imports:
import os
import logging
import errno
import hashlib
from .util import *
from .log import getLogger
from .gupfile import Builder
from .parallel import Lock
from .path import resolve_base
from .var import RUN_ID
from .error import SafeError
and any relevant context from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/gupfile.py
# class Builder(object):
# '''
# The canonical builder for a target.
# `path` is the path to the build script, even if this
# builder was obtained indirectly (via a Gupfile match)
# '''
# def __init__(self, script_path, target, basedir, parent):
# self.path = script_path
# self.realpath = os.path.realpath(self.path)
# self.target = target
# self.basedir = basedir
# self.target_path = os.path.join(self.basedir, self.target)
# self.parent = parent
#
# def __repr__(self):
# return "Builder(path=%r, target=%r, basedir=%r, parent=%r)" % (self.path, self.target, self.basedir, self.parent)
#
# @staticmethod
# def for_target(path):
# for candidate in possible_gup_files(path):
# builder = candidate.get_builder()
# if builder is not None:
# return builder
# return None
#
# Path: python/gup/parallel.py
# class Lock:
# def __init__(self, name):
# self.owned = False
# self.name = name
# self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0o666)
# _close_on_exec(self.lockfile, True)
# self.shared = fcntl.LOCK_SH
# self.exclusive = fcntl.LOCK_EX
#
# def __del__(self):
# if self.owned:
# self.unlock()
# os.close(self.lockfile)
#
# def read(self):
# return LockHelper(self, fcntl.LOCK_SH)
#
# def write(self):
# return LockHelper(self, fcntl.LOCK_EX)
#
# def trylock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# try:
# fcntl.lockf(self.lockfile, kind|fcntl.LOCK_NB, 0, 0)
# except IOError as e:
# if e.errno in (errno.EAGAIN, errno.EACCES):
# _log.trace("%s lock failed", self.name)
# pass # someone else has it locked
# else:
# raise
# else:
# _log.trace("%s lock (try)", self.name)
# self.owned = kind
#
# def waitlock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# _log.trace("%s lock (wait)", self.name)
# fcntl.lockf(self.lockfile, kind, 0, 0)
# self.owned = kind
#
# def unlock(self):
# if not self.owned:
# raise Exception("can't unlock %r - we don't own it" % self.name)
# fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
# _log.trace("%s unlock", self.name)
# self.owned = False
#
# Path: python/gup/path.py
# def resolve_base(p):
# return os.path.join(
# os.path.realpath(os.path.dirname(p)),
# os.path.basename(p)
# )
#
# Path: python/gup/var.py
# RUN_ID = os.environ['GUP_RUNID'] = str(int(time.time() * 1000))
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
. Output only the next line. | rel_path = os.path.relpath(resolve_base(path), rel_root) |
Next line prediction: <|code_start|>
@classmethod
def deserialize(cls, mtime):
return cls(int(mtime))
def is_dirty(self, args):
path = args.deps.path
mtime = get_mtime(path)
assert mtime is not None
_log.debug("comparing stored mtime %s to %s", self.value, mtime)
if mtime != self.value:
log_method = _log.warning
if os.path.isdir(path):
# dirs are modified externally for various reasons, not worth warning
log_method = _log.debug
log_method("%s was externally modified - rebuilding" % (path,))
return True
return False
class RunId(Dependency):
tag = 'run:'
num_fields = 1
def __init__(self, runid):
self.value = runid
self.fields = [runid]
@classmethod
def current(cls):
<|code_end|>
. Use current file imports:
(import os
import logging
import errno
import hashlib
from .util import *
from .log import getLogger
from .gupfile import Builder
from .parallel import Lock
from .path import resolve_base
from .var import RUN_ID
from .error import SafeError)
and context including class names, function names, or small code snippets from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/gupfile.py
# class Builder(object):
# '''
# The canonical builder for a target.
# `path` is the path to the build script, even if this
# builder was obtained indirectly (via a Gupfile match)
# '''
# def __init__(self, script_path, target, basedir, parent):
# self.path = script_path
# self.realpath = os.path.realpath(self.path)
# self.target = target
# self.basedir = basedir
# self.target_path = os.path.join(self.basedir, self.target)
# self.parent = parent
#
# def __repr__(self):
# return "Builder(path=%r, target=%r, basedir=%r, parent=%r)" % (self.path, self.target, self.basedir, self.parent)
#
# @staticmethod
# def for_target(path):
# for candidate in possible_gup_files(path):
# builder = candidate.get_builder()
# if builder is not None:
# return builder
# return None
#
# Path: python/gup/parallel.py
# class Lock:
# def __init__(self, name):
# self.owned = False
# self.name = name
# self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0o666)
# _close_on_exec(self.lockfile, True)
# self.shared = fcntl.LOCK_SH
# self.exclusive = fcntl.LOCK_EX
#
# def __del__(self):
# if self.owned:
# self.unlock()
# os.close(self.lockfile)
#
# def read(self):
# return LockHelper(self, fcntl.LOCK_SH)
#
# def write(self):
# return LockHelper(self, fcntl.LOCK_EX)
#
# def trylock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# try:
# fcntl.lockf(self.lockfile, kind|fcntl.LOCK_NB, 0, 0)
# except IOError as e:
# if e.errno in (errno.EAGAIN, errno.EACCES):
# _log.trace("%s lock failed", self.name)
# pass # someone else has it locked
# else:
# raise
# else:
# _log.trace("%s lock (try)", self.name)
# self.owned = kind
#
# def waitlock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# _log.trace("%s lock (wait)", self.name)
# fcntl.lockf(self.lockfile, kind, 0, 0)
# self.owned = kind
#
# def unlock(self):
# if not self.owned:
# raise Exception("can't unlock %r - we don't own it" % self.name)
# fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
# _log.trace("%s unlock", self.name)
# self.owned = False
#
# Path: python/gup/path.py
# def resolve_base(p):
# return os.path.join(
# os.path.realpath(os.path.dirname(p)),
# os.path.basename(p)
# )
#
# Path: python/gup/var.py
# RUN_ID = os.environ['GUP_RUNID'] = str(int(time.time() * 1000))
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
. Output only the next line. | return cls(RUN_ID) |
Based on the snippet: <|code_start|> except IOError as e:
if e.errno != errno.ENOENT: raise
else:
try:
with f:
rv = Dependencies(self.path, f)
except VersionMismatch as e:
_log.debug("Ignoring stored dependencies from incompatible version: %s", deps_path)
except Exception as e:
_log.debug("Error loading %s: %s (assuming dirty)", deps_path, e)
_log.trace("Loaded serialized state from %s: %r" % (deps_path, rv,))
return rv
def create_lock(self):
if self.lockfile is None:
self.lockfile = Lock(self.meta_path('lock'))
def add_dependency(self, dep):
lock = Lock(self.meta_path('deps2-lock'))
_log.debug('add dep: %s -> %s' % (self.path, dep))
with lock.write():
with open(self.meta_path('deps2'), 'a') as f:
dep.append_to(f)
def mark_clobbers(self):
self.add_dependency(ClobbersTarget())
def perform_build(self, builder, do_build):
exe = builder.path
if not os.path.exists(exe):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import logging
import errno
import hashlib
from .util import *
from .log import getLogger
from .gupfile import Builder
from .parallel import Lock
from .path import resolve_base
from .var import RUN_ID
from .error import SafeError
and context (classes, functions, sometimes code) from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/gupfile.py
# class Builder(object):
# '''
# The canonical builder for a target.
# `path` is the path to the build script, even if this
# builder was obtained indirectly (via a Gupfile match)
# '''
# def __init__(self, script_path, target, basedir, parent):
# self.path = script_path
# self.realpath = os.path.realpath(self.path)
# self.target = target
# self.basedir = basedir
# self.target_path = os.path.join(self.basedir, self.target)
# self.parent = parent
#
# def __repr__(self):
# return "Builder(path=%r, target=%r, basedir=%r, parent=%r)" % (self.path, self.target, self.basedir, self.parent)
#
# @staticmethod
# def for_target(path):
# for candidate in possible_gup_files(path):
# builder = candidate.get_builder()
# if builder is not None:
# return builder
# return None
#
# Path: python/gup/parallel.py
# class Lock:
# def __init__(self, name):
# self.owned = False
# self.name = name
# self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0o666)
# _close_on_exec(self.lockfile, True)
# self.shared = fcntl.LOCK_SH
# self.exclusive = fcntl.LOCK_EX
#
# def __del__(self):
# if self.owned:
# self.unlock()
# os.close(self.lockfile)
#
# def read(self):
# return LockHelper(self, fcntl.LOCK_SH)
#
# def write(self):
# return LockHelper(self, fcntl.LOCK_EX)
#
# def trylock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# try:
# fcntl.lockf(self.lockfile, kind|fcntl.LOCK_NB, 0, 0)
# except IOError as e:
# if e.errno in (errno.EAGAIN, errno.EACCES):
# _log.trace("%s lock failed", self.name)
# pass # someone else has it locked
# else:
# raise
# else:
# _log.trace("%s lock (try)", self.name)
# self.owned = kind
#
# def waitlock(self, kind=fcntl.LOCK_EX):
# assert(self.owned != kind)
# _log.trace("%s lock (wait)", self.name)
# fcntl.lockf(self.lockfile, kind, 0, 0)
# self.owned = kind
#
# def unlock(self):
# if not self.owned:
# raise Exception("can't unlock %r - we don't own it" % self.name)
# fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
# _log.trace("%s unlock", self.name)
# self.owned = False
#
# Path: python/gup/path.py
# def resolve_base(p):
# return os.path.join(
# os.path.realpath(os.path.dirname(p)),
# os.path.basename(p)
# )
#
# Path: python/gup/var.py
# RUN_ID = os.environ['GUP_RUNID'] = str(int(time.time() * 1000))
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
. Output only the next line. | raise SafeError("Build script not found: %s" % (exe)) |
Continue the code snippet: <|code_start|>
# By default, no output colouring.
RED = ""
GREEN = ""
YELLOW = ""
BOLD = ""
PLAIN = ""
_want_color = os.environ.get('GUP_COLOR', 'auto')
if _want_color == '1' or (
_want_color == 'auto' and
<|code_end|>
. Use current file imports:
import os, sys
import logging
from .var import IS_WINDOWS
and context (classes, functions, or code) from other files:
# Path: python/gup/var.py
# IS_WINDOWS = sys.platform.startswith('win')
. Output only the next line. | not IS_WINDOWS and |
Based on the snippet: <|code_start|> _log.trace("build_basedir: %s" % (build_basedir,))
if not self.indirect:
if target_is_builder():
_log.debug("ignoring direct builder for target %s", path)
# gupfiles & scripts can only be built by Gupfile targets, not .gup scripts
return None
return Builder(path, self.target, build_basedir, parent=None)
with open(path) as f:
try:
rules = parse_gupfile(f)
except AssertionError as e:
reason = " (%s)" % (e.message,) if e.message else ""
raise SafeError("Invalid %s: %s%s" % (GUPFILE, path, reason))
_log.trace("Parsed gupfile: %r" % rules)
match_target = self.target
# always use `/` as path sep in gupfile patterns
if os.path.sep != '/':
match_target = self.target.replace(os.path.sep, '/')
def find_matching_rule(matchfn, target, match_target):
for script, ruleset in rules:
if matchfn(ruleset, match_target):
base = os.path.realpath(build_basedir)
parent = None
if script.startswith('!'):
script = script[1:]
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import re
import itertools
import doctest
from os import path
from .whichcraft import which
from .log import getLogger
from .error import SafeError
from .var import INDENT, PY3
and context (classes, functions, sometimes code) from other files:
# Path: python/gup/whichcraft.py
# def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# """Given a command, mode, and a PATH string, return the path which
# conforms to the given mode on the PATH, or None if there is no such
# file.
# `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
# of os.environ.get("PATH"), or can be overridden with a custom search
# path.
# Note: This function was backported from the Python 3 source code.
# """
# # Check that a given file can be accessed with the correct mode.
# # Additionally check that `file` is not a directory, as on Windows
# # directories pass the os.access check.
# def _access_check(fn, mode):
# return (os.path.exists(fn) and os.access(fn, mode) and
# not os.path.isdir(fn))
#
# # If we're given a path with a directory part, look it up directly
# # rather than referring to PATH directories. This includes checking
# # relative to the current directory, e.g. ./script
# if os.path.dirname(cmd):
# if _access_check(cmd, mode):
# return cmd
# return None
#
# if path is None:
# path = os.environ.get("PATH", os.defpath)
# if not path:
# return None
# path = path.split(os.pathsep)
#
# if sys.platform == "win32":
# # The current directory takes precedence on Windows.
# if os.curdir not in path:
# path.insert(0, os.curdir)
#
# # PATHEXT is necessary to check on Windows.
# pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# # See if the given file matches any of the expected path
# # extensions. This will allow us to short circuit when given
# # "python.exe". If it does match, only test that one, otherwise we
# # have to try others.
# if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
# files = [cmd]
# else:
# files = [cmd + ext for ext in pathext]
# else:
# # On other platforms you don't have things like PATHEXT to tell you
# # what file suffixes are executable, so just pass on cmd as-is.
# files = [cmd]
#
# seen = set()
# for dir in path:
# normdir = os.path.normcase(dir)
# if normdir not in seen:
# seen.add(normdir)
# for thefile in files:
# name = os.path.join(dir, thefile)
# if _access_check(name, mode):
# return name
# return None
#
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
#
# Path: python/gup/var.py
# INDENT = os.environ.get('GUP_INDENT', '')
#
# PY3 = sys.version_info >= (3,0)
. Output only the next line. | script_path = which(script) |
Given the following code snippet before the placeholder: <|code_start|>
def get_builder(self):
path = self.guppath
if not os.path.exists(path):
return None
if os.path.isdir(path):
_log.trace("skipping directory: %s", path)
return None
_log.trace("candidate exists: %s" % (path,))
def target_is_builder():
target_name = os.path.basename(self.target)
return target_name == GUPFILE or os.path.splitext(target_name)[1].lower() == '.gup'
build_basedir = os.path.join(*self._base_parts(False))
_log.trace("build_basedir: %s" % (build_basedir,))
if not self.indirect:
if target_is_builder():
_log.debug("ignoring direct builder for target %s", path)
# gupfiles & scripts can only be built by Gupfile targets, not .gup scripts
return None
return Builder(path, self.target, build_basedir, parent=None)
with open(path) as f:
try:
rules = parse_gupfile(f)
except AssertionError as e:
reason = " (%s)" % (e.message,) if e.message else ""
<|code_end|>
, predict the next line using imports from the current file:
import os
import re
import itertools
import doctest
from os import path
from .whichcraft import which
from .log import getLogger
from .error import SafeError
from .var import INDENT, PY3
and context including class names, function names, and sometimes code from other files:
# Path: python/gup/whichcraft.py
# def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# """Given a command, mode, and a PATH string, return the path which
# conforms to the given mode on the PATH, or None if there is no such
# file.
# `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
# of os.environ.get("PATH"), or can be overridden with a custom search
# path.
# Note: This function was backported from the Python 3 source code.
# """
# # Check that a given file can be accessed with the correct mode.
# # Additionally check that `file` is not a directory, as on Windows
# # directories pass the os.access check.
# def _access_check(fn, mode):
# return (os.path.exists(fn) and os.access(fn, mode) and
# not os.path.isdir(fn))
#
# # If we're given a path with a directory part, look it up directly
# # rather than referring to PATH directories. This includes checking
# # relative to the current directory, e.g. ./script
# if os.path.dirname(cmd):
# if _access_check(cmd, mode):
# return cmd
# return None
#
# if path is None:
# path = os.environ.get("PATH", os.defpath)
# if not path:
# return None
# path = path.split(os.pathsep)
#
# if sys.platform == "win32":
# # The current directory takes precedence on Windows.
# if os.curdir not in path:
# path.insert(0, os.curdir)
#
# # PATHEXT is necessary to check on Windows.
# pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# # See if the given file matches any of the expected path
# # extensions. This will allow us to short circuit when given
# # "python.exe". If it does match, only test that one, otherwise we
# # have to try others.
# if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
# files = [cmd]
# else:
# files = [cmd + ext for ext in pathext]
# else:
# # On other platforms you don't have things like PATHEXT to tell you
# # what file suffixes are executable, so just pass on cmd as-is.
# files = [cmd]
#
# seen = set()
# for dir in path:
# normdir = os.path.normcase(dir)
# if normdir not in seen:
# seen.add(normdir)
# for thefile in files:
# name = os.path.join(dir, thefile)
# if _access_check(name, mode):
# return name
# return None
#
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
#
# Path: python/gup/var.py
# INDENT = os.environ.get('GUP_INDENT', '')
#
# PY3 = sys.version_info >= (3,0)
. Output only the next line. | raise SafeError("Invalid %s: %s%s" % (GUPFILE, path, reason)) |
Predict the next line after this snippet: <|code_start|> if not self.indirect:
if target_is_builder():
_log.debug("ignoring direct builder for target %s", path)
# gupfiles & scripts can only be built by Gupfile targets, not .gup scripts
return None
return Builder(path, self.target, build_basedir, parent=None)
with open(path) as f:
try:
rules = parse_gupfile(f)
except AssertionError as e:
reason = " (%s)" % (e.message,) if e.message else ""
raise SafeError("Invalid %s: %s%s" % (GUPFILE, path, reason))
_log.trace("Parsed gupfile: %r" % rules)
match_target = self.target
# always use `/` as path sep in gupfile patterns
if os.path.sep != '/':
match_target = self.target.replace(os.path.sep, '/')
def find_matching_rule(matchfn, target, match_target):
for script, ruleset in rules:
if matchfn(ruleset, match_target):
base = os.path.realpath(build_basedir)
parent = None
if script.startswith('!'):
script = script[1:]
script_path = which(script)
if script_path is None:
<|code_end|>
using the current file's imports:
import os
import re
import itertools
import doctest
from os import path
from .whichcraft import which
from .log import getLogger
from .error import SafeError
from .var import INDENT, PY3
and any relevant context from other files:
# Path: python/gup/whichcraft.py
# def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# """Given a command, mode, and a PATH string, return the path which
# conforms to the given mode on the PATH, or None if there is no such
# file.
# `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
# of os.environ.get("PATH"), or can be overridden with a custom search
# path.
# Note: This function was backported from the Python 3 source code.
# """
# # Check that a given file can be accessed with the correct mode.
# # Additionally check that `file` is not a directory, as on Windows
# # directories pass the os.access check.
# def _access_check(fn, mode):
# return (os.path.exists(fn) and os.access(fn, mode) and
# not os.path.isdir(fn))
#
# # If we're given a path with a directory part, look it up directly
# # rather than referring to PATH directories. This includes checking
# # relative to the current directory, e.g. ./script
# if os.path.dirname(cmd):
# if _access_check(cmd, mode):
# return cmd
# return None
#
# if path is None:
# path = os.environ.get("PATH", os.defpath)
# if not path:
# return None
# path = path.split(os.pathsep)
#
# if sys.platform == "win32":
# # The current directory takes precedence on Windows.
# if os.curdir not in path:
# path.insert(0, os.curdir)
#
# # PATHEXT is necessary to check on Windows.
# pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# # See if the given file matches any of the expected path
# # extensions. This will allow us to short circuit when given
# # "python.exe". If it does match, only test that one, otherwise we
# # have to try others.
# if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
# files = [cmd]
# else:
# files = [cmd + ext for ext in pathext]
# else:
# # On other platforms you don't have things like PATHEXT to tell you
# # what file suffixes are executable, so just pass on cmd as-is.
# files = [cmd]
#
# seen = set()
# for dir in path:
# normdir = os.path.normcase(dir)
# if normdir not in seen:
# seen.add(normdir)
# for thefile in files:
# name = os.path.join(dir, thefile)
# if _access_check(name, mode):
# return name
# return None
#
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
#
# Path: python/gup/var.py
# INDENT = os.environ.get('GUP_INDENT', '')
#
# PY3 = sys.version_info >= (3,0)
. Output only the next line. | raise SafeError("Build command not found on PATH: %s\n %s(specified in %s)" % (script, INDENT, path)) |
Given the following code snippet before the placeholder: <|code_start|>
# _log = getLogger(__name__)
def resolve_base(p):
return os.path.join(
os.path.realpath(os.path.dirname(p)),
os.path.basename(p)
)
def traverse_from(base, rel, resolve_final=False):
<|code_end|>
, predict the next line using imports from the current file:
import os
import errno
from .log import getLogger
from .var import IS_WINDOWS
and context including class names, function names, and sometimes code from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/var.py
# IS_WINDOWS = sys.platform.startswith('win')
. Output only the next line. | if IS_WINDOWS: |
Given the following code snippet before the placeholder: <|code_start|> signal.alarm(0)
signal.signal(signal.SIGALRM, oldh)
return b and b or None # None means EOF
def _running(self):
"Tell if jobs are running"
return len(self.waitfds)
def start_job(self, jobfunc, donefunc):
"""
Start a job
jobfunc: executed in the child process
doncfunc: executed in the parent process during a wait or wait_all call
"""
reason = 'build'
assert(self.tokens <= 1)
self._get_token(reason)
assert(self.tokens >= 1)
assert(self.tokens == 1)
self.tokens -= 1
r,w = os.pipe()
pid = os.fork()
if pid == 0:
# child
os.close(r)
rv = 201
try:
try:
rv = jobfunc() or 0
_debug('jobfunc completed (%r, %r)' % (jobfunc,rv))
<|code_end|>
, predict the next line using imports from the current file:
import tempfile
import traceback
import fcntl
import os, errno, select, signal
from .log import getLogger
from .error import SafeError, UNKNOWN_ERROR_CODE
and context including class names, function names, and sometimes code from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
#
# UNKNOWN_ERROR_CODE = 1
. Output only the next line. | except SafeError as e: |
Given the following code snippet before the placeholder: <|code_start|>
def start_job(self, jobfunc, donefunc):
"""
Start a job
jobfunc: executed in the child process
doncfunc: executed in the parent process during a wait or wait_all call
"""
reason = 'build'
assert(self.tokens <= 1)
self._get_token(reason)
assert(self.tokens >= 1)
assert(self.tokens == 1)
self.tokens -= 1
r,w = os.pipe()
pid = os.fork()
if pid == 0:
# child
os.close(r)
rv = 201
try:
try:
rv = jobfunc() or 0
_debug('jobfunc completed (%r, %r)' % (jobfunc,rv))
except SafeError as e:
_log.error("%s" % (str(e),))
rv = SafeError.exitcode
except KeyboardInterrupt:
rv = SafeError.exitcode
except Exception:
traceback.print_exc()
<|code_end|>
, predict the next line using imports from the current file:
import tempfile
import traceback
import fcntl
import os, errno, select, signal
from .log import getLogger
from .error import SafeError, UNKNOWN_ERROR_CODE
and context including class names, function names, and sometimes code from other files:
# Path: python/gup/log.py
# def getLogger(*a):
# logger = logging.getLogger(*a)
# logger.addFilter(_color_filter)
# return logger
#
# Path: python/gup/error.py
# class SafeError(Exception):
# exitcode = 10
# pass
#
# UNKNOWN_ERROR_CODE = 1
. Output only the next line. | rv = UNKNOWN_ERROR_CODE |
Given the following code snippet before the placeholder: <|code_start|>#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
from __future__ import division
class TestHDFS(unittest.TestCase):
def setUp(self):
wd = tempfile.mkdtemp(suffix='_%s' % UNI_CHR)
wd_bn = os.path.basename(wd)
self.local_wd = "file:%s" % wd
fs = hdfs.hdfs("default", 0)
fs.create_directory(wd_bn)
self.hdfs_wd = fs.get_path_info(wd_bn)["name"]
fs.close()
basenames = ["test_path_%d" % i for i in range(2)]
self.local_paths = ["%s/%s" % (self.local_wd, bn) for bn in basenames]
self.hdfs_paths = ["%s/%s" % (self.hdfs_wd, bn) for bn in basenames]
self.data = make_random_data(
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import tempfile
import os
import stat
import pydoop.hdfs as hdfs
from pydoop.utils.py3compat import czip
from threading import Thread
from pydoop.hdfs.common import BUFSIZE
from pydoop.test_utils import UNI_CHR, make_random_data, FSTree
and context including class names, function names, and sometimes code from other files:
# Path: pydoop/hdfs/common.py
# BUFSIZE = 16384
#
# Path: pydoop/test_utils.py
# UNI_CHR = _get_special_chr()
#
# def make_random_data(size=_RANDOM_DATA_SIZE, printable=True):
# randint = random.randint
# start, stop = (32, 126) if printable else (0, 255)
# return bytes(bytearray([randint(start, stop) for _ in range(size)]))
#
# class FSTree(object):
# """
# >>> t = FSTree('root')
# >>> d1 = t.add('d1')
# >>> f1 = t.add('f1', 0)
# >>> d2 = d1.add('d2')
# >>> f2 = d2.add('f2', 0)
# >>> for x in t.walk(): print x.name, x.kind
# ...
# root 1
# d1 1
# d2 1
# f2 0
# f1 0
# """
#
# def __init__(self, name, kind=1):
# assert kind in (0, 1) # (file, dir)
# self.name = name
# self.kind = kind
# if self.kind:
# self.children = []
#
# def add(self, name, kind=1):
# t = FSTree(name, kind)
# self.children.append(t)
# return t
#
# def walk(self):
# yield self
# if self.kind:
# for c in self.children:
# for t in c.walk():
# yield t
. Output only the next line. | 4 * BUFSIZE + BUFSIZE // 2, printable=False |
Given snippet: <|code_start|># BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
from __future__ import division
class TestHDFS(unittest.TestCase):
def setUp(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import tempfile
import os
import stat
import pydoop.hdfs as hdfs
from pydoop.utils.py3compat import czip
from threading import Thread
from pydoop.hdfs.common import BUFSIZE
from pydoop.test_utils import UNI_CHR, make_random_data, FSTree
and context:
# Path: pydoop/hdfs/common.py
# BUFSIZE = 16384
#
# Path: pydoop/test_utils.py
# UNI_CHR = _get_special_chr()
#
# def make_random_data(size=_RANDOM_DATA_SIZE, printable=True):
# randint = random.randint
# start, stop = (32, 126) if printable else (0, 255)
# return bytes(bytearray([randint(start, stop) for _ in range(size)]))
#
# class FSTree(object):
# """
# >>> t = FSTree('root')
# >>> d1 = t.add('d1')
# >>> f1 = t.add('f1', 0)
# >>> d2 = d1.add('d2')
# >>> f2 = d2.add('f2', 0)
# >>> for x in t.walk(): print x.name, x.kind
# ...
# root 1
# d1 1
# d2 1
# f2 0
# f1 0
# """
#
# def __init__(self, name, kind=1):
# assert kind in (0, 1) # (file, dir)
# self.name = name
# self.kind = kind
# if self.kind:
# self.children = []
#
# def add(self, name, kind=1):
# t = FSTree(name, kind)
# self.children.append(t)
# return t
#
# def walk(self):
# yield self
# if self.kind:
# for c in self.children:
# for t in c.walk():
# yield t
which might include code, classes, or functions. Output only the next line. | wd = tempfile.mkdtemp(suffix='_%s' % UNI_CHR) |
Using the snippet: <|code_start|># of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
from __future__ import division
class TestHDFS(unittest.TestCase):
def setUp(self):
wd = tempfile.mkdtemp(suffix='_%s' % UNI_CHR)
wd_bn = os.path.basename(wd)
self.local_wd = "file:%s" % wd
fs = hdfs.hdfs("default", 0)
fs.create_directory(wd_bn)
self.hdfs_wd = fs.get_path_info(wd_bn)["name"]
fs.close()
basenames = ["test_path_%d" % i for i in range(2)]
self.local_paths = ["%s/%s" % (self.local_wd, bn) for bn in basenames]
self.hdfs_paths = ["%s/%s" % (self.hdfs_wd, bn) for bn in basenames]
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import tempfile
import os
import stat
import pydoop.hdfs as hdfs
from pydoop.utils.py3compat import czip
from threading import Thread
from pydoop.hdfs.common import BUFSIZE
from pydoop.test_utils import UNI_CHR, make_random_data, FSTree
and context (class names, function names, or code) available:
# Path: pydoop/hdfs/common.py
# BUFSIZE = 16384
#
# Path: pydoop/test_utils.py
# UNI_CHR = _get_special_chr()
#
# def make_random_data(size=_RANDOM_DATA_SIZE, printable=True):
# randint = random.randint
# start, stop = (32, 126) if printable else (0, 255)
# return bytes(bytearray([randint(start, stop) for _ in range(size)]))
#
# class FSTree(object):
# """
# >>> t = FSTree('root')
# >>> d1 = t.add('d1')
# >>> f1 = t.add('f1', 0)
# >>> d2 = d1.add('d2')
# >>> f2 = d2.add('f2', 0)
# >>> for x in t.walk(): print x.name, x.kind
# ...
# root 1
# d1 1
# d2 1
# f2 0
# f1 0
# """
#
# def __init__(self, name, kind=1):
# assert kind in (0, 1) # (file, dir)
# self.name = name
# self.kind = kind
# if self.kind:
# self.children = []
#
# def add(self, name, kind=1):
# t = FSTree(name, kind)
# self.children.append(t)
# return t
#
# def walk(self):
# yield self
# if self.kind:
# for c in self.children:
# for t in c.walk():
# yield t
. Output only the next line. | self.data = make_random_data( |
Based on the snippet: <|code_start|>
def lsl(self):
self.__ls(hdfs.lsl, lambda x: x["name"])
def ls(self):
self.__ls(hdfs.ls, lambda x: x)
def mkdir(self):
for wd in self.local_wd, self.hdfs_wd:
d1 = "%s/d1" % wd
d2 = "%s/d2" % d1
hdfs.mkdir(d2)
dir_list = hdfs.ls(d1)
self.assertEqual(len(dir_list), 1)
self.assertTrue(dir_list[0].endswith(d2))
def load(self):
for test_path in self.hdfs_paths[0], self.local_paths[0]:
hdfs.dump(self.data, test_path, mode="wb")
rdata = hdfs.load(test_path)
self.assertEqual(rdata, self.data)
def __make_tree(self, wd, root="d1", create=True):
"""
d1
|-- d2
| `-- f2
`-- f1
"""
d1 = "%s/%s" % (wd, root)
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import tempfile
import os
import stat
import pydoop.hdfs as hdfs
from pydoop.utils.py3compat import czip
from threading import Thread
from pydoop.hdfs.common import BUFSIZE
from pydoop.test_utils import UNI_CHR, make_random_data, FSTree
and context (classes, functions, sometimes code) from other files:
# Path: pydoop/hdfs/common.py
# BUFSIZE = 16384
#
# Path: pydoop/test_utils.py
# UNI_CHR = _get_special_chr()
#
# def make_random_data(size=_RANDOM_DATA_SIZE, printable=True):
# randint = random.randint
# start, stop = (32, 126) if printable else (0, 255)
# return bytes(bytearray([randint(start, stop) for _ in range(size)]))
#
# class FSTree(object):
# """
# >>> t = FSTree('root')
# >>> d1 = t.add('d1')
# >>> f1 = t.add('f1', 0)
# >>> d2 = d1.add('d2')
# >>> f2 = d2.add('f2', 0)
# >>> for x in t.walk(): print x.name, x.kind
# ...
# root 1
# d1 1
# d2 1
# f2 0
# f1 0
# """
#
# def __init__(self, name, kind=1):
# assert kind in (0, 1) # (file, dir)
# self.name = name
# self.kind = kind
# if self.kind:
# self.children = []
#
# def add(self, name, kind=1):
# t = FSTree(name, kind)
# self.children.append(t)
# return t
#
# def walk(self):
# yield self
# if self.kind:
# for c in self.children:
# for t in c.walk():
# yield t
. Output only the next line. | t1 = FSTree(d1) |
Based on the snippet: <|code_start|>
def test_string_as_string(self):
with sercore.FileOutStream(self.fname) as s:
s.write_string(self.STRING)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_string(), self.STRING)
def test_string_as_bytes(self):
with sercore.FileOutStream(self.fname) as s:
s.write_string(self.STRING)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_bytes(), self.STRING.encode("utf8"))
def test_bytes_as_string(self):
with sercore.FileOutStream(self.fname) as s:
s.write_bytes(self.BYTES)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_string(), self.BYTES.decode("utf8"))
def test_bytes_as_bytes(self):
with sercore.FileOutStream(self.fname) as s:
s.write_bytes(self.BYTES)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_bytes(), self.BYTES)
def test_output(self):
k, v = b"key", b"value"
with sercore.FileOutStream(self.fname) as s:
s.write_output(k, v)
with sercore.FileInStream(self.fname) as s:
<|code_end|>
, predict the immediate next line with the help of imports:
import io
import os
import shutil
import struct
import tempfile
import unittest
import uuid
import pydoop.sercore as sercore
from random import randint
from pydoop.mapreduce.binary_protocol import OUTPUT, PARTITIONED_OUTPUT
and context (classes, functions, sometimes code) from other files:
# Path: pydoop/mapreduce/binary_protocol.py
# OUTPUT = 50
#
# PARTITIONED_OUTPUT = 51
. Output only the next line. | self.assertEqual(s.read_vint(), OUTPUT) |
Next line prediction: <|code_start|> def test_string_as_bytes(self):
with sercore.FileOutStream(self.fname) as s:
s.write_string(self.STRING)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_bytes(), self.STRING.encode("utf8"))
def test_bytes_as_string(self):
with sercore.FileOutStream(self.fname) as s:
s.write_bytes(self.BYTES)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_string(), self.BYTES.decode("utf8"))
def test_bytes_as_bytes(self):
with sercore.FileOutStream(self.fname) as s:
s.write_bytes(self.BYTES)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_bytes(), self.BYTES)
def test_output(self):
k, v = b"key", b"value"
with sercore.FileOutStream(self.fname) as s:
s.write_output(k, v)
with sercore.FileInStream(self.fname) as s:
self.assertEqual(s.read_vint(), OUTPUT)
self.assertEqual(s.read_bytes(), k)
self.assertEqual(s.read_bytes(), v)
part = 1
with sercore.FileOutStream(self.fname) as s:
s.write_output(k, v, part)
with sercore.FileInStream(self.fname) as s:
<|code_end|>
. Use current file imports:
(import io
import os
import shutil
import struct
import tempfile
import unittest
import uuid
import pydoop.sercore as sercore
from random import randint
from pydoop.mapreduce.binary_protocol import OUTPUT, PARTITIONED_OUTPUT)
and context including class names, function names, or small code snippets from other files:
# Path: pydoop/mapreduce/binary_protocol.py
# OUTPUT = 50
#
# PARTITIONED_OUTPUT = 51
. Output only the next line. | self.assertEqual(s.read_vint(), PARTITIONED_OUTPUT) |
Given snippet: <|code_start|># BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
TEST_MODULE_NAMES = [
'test_submit',
]
def suite(path=None):
suites = []
for module in TEST_MODULE_NAMES:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import sys
from pydoop.test_utils import get_module
and context:
# Path: pydoop/test_utils.py
# def get_module(name, path=None):
#
# fp, pathname, description = imp.find_module(name, path)
# try:
# module = imp.load_module(name, fp, pathname, description)
# return module
# finally:
# fp.close()
which might include code, classes, or functions. Output only the next line. | suites.append(get_module(module, path).suite()) |
Given the following code snippet before the placeholder: <|code_start|>#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
def uni_last(tup):
return tup[:-1] + (tup[-1] + UNI_CHR,)
class TestSplit(unittest.TestCase):
def good(self):
cases = [
('hdfs://localhost:9000/', ('localhost', 9000, '/')),
('hdfs://localhost:9000/a/b', ('localhost', 9000, '/a/b')),
<|code_end|>
, predict the next line using imports from the current file:
import os
import unittest
import tempfile
import pydoop.hdfs as hdfs
from numbers import Number
from pydoop.hdfs.common import DEFAULT_PORT, DEFAULT_USER
from pydoop.utils.misc import make_random_str
from pydoop.test_utils import UNI_CHR
and context including class names, function names, and sometimes code from other files:
# Path: pydoop/hdfs/common.py
# DEFAULT_PORT = 8020 # org/apache/hadoop/hdfs/server/namenode/NameNode.java
#
# DEFAULT_USER = getpass.getuser()
#
# Path: pydoop/test_utils.py
# UNI_CHR = _get_special_chr()
. Output only the next line. | ('hdfs://localhost/a/b', ('localhost', DEFAULT_PORT, '/a/b')), |
Predict the next line after this snippet: <|code_start|>def uni_last(tup):
return tup[:-1] + (tup[-1] + UNI_CHR,)
class TestSplit(unittest.TestCase):
def good(self):
cases = [
('hdfs://localhost:9000/', ('localhost', 9000, '/')),
('hdfs://localhost:9000/a/b', ('localhost', 9000, '/a/b')),
('hdfs://localhost/a/b', ('localhost', DEFAULT_PORT, '/a/b')),
('hdfs:///a/b', ('default', 0, '/a/b')),
('hdfs:/', ('default', 0, '/')),
('file:///a/b', ('', 0, '/a/b')),
('file:/a/b', ('', 0, '/a/b')),
('file:///a', ('', 0, '/a')),
('file:/a', ('', 0, '/a')),
('file://temp/foo.txt', ('', 0, 'temp/foo.txt')),
('file://temp', ('', 0, 'temp')),
]
if hdfs.default_is_local():
cases.extend([
('///a/b', ('', 0, '/a/b')),
('/a/b', ('', 0, '/a/b')),
('a/b', ('', 0, 'a/b')),
])
else:
cases.extend([
('///a/b', ('default', 0, '/a/b')),
('/a/b', ('default', 0, '/a/b')),
<|code_end|>
using the current file's imports:
import os
import unittest
import tempfile
import pydoop.hdfs as hdfs
from numbers import Number
from pydoop.hdfs.common import DEFAULT_PORT, DEFAULT_USER
from pydoop.utils.misc import make_random_str
from pydoop.test_utils import UNI_CHR
and any relevant context from other files:
# Path: pydoop/hdfs/common.py
# DEFAULT_PORT = 8020 # org/apache/hadoop/hdfs/server/namenode/NameNode.java
#
# DEFAULT_USER = getpass.getuser()
#
# Path: pydoop/test_utils.py
# UNI_CHR = _get_special_chr()
. Output only the next line. | ('a/b', ('default', 0, '/user/%s/a/b' % DEFAULT_USER)), |
Here is a snippet: <|code_start|>#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
TEST_MODULE_NAMES = [
'test_hadoop_utils',
'test_hadut',
'test_pydoop',
]
def suite(path=None):
suites = []
for module in TEST_MODULE_NAMES:
<|code_end|>
. Write the next line using the current file imports:
import sys
import unittest
from pydoop.test_utils import get_module
and context from other files:
# Path: pydoop/test_utils.py
# def get_module(name, path=None):
#
# fp, pathname, description = imp.find_module(name, path)
# try:
# module = imp.load_module(name, fp, pathname, description)
# return module
# finally:
# fp.close()
, which may include functions, classes, or code. Output only the next line. | suites.append(get_module(module, path).suite()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.