text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def boundary_interaction(self, **kwargs):
""" Returns a list of Location4D objects """ |
particle = kwargs.pop('particle')
starting = kwargs.pop('starting')
ending = kwargs.pop('ending')
# shoreline
if self.useshore:
intersection_point = self._shoreline.intersect(start_point=starting.point, end_point=ending.point)
if intersection_point:
# Set the intersection point.
hitpoint = Location4D(point=intersection_point['point'], time=starting.time + (ending.time - starting.time))
particle.location = hitpoint
# This relies on the shoreline to put the particle in water and not on shore.
resulting_point = self._shoreline.react(start_point=starting,
end_point=ending,
hit_point=hitpoint,
reverse_distance=self.reverse_distance,
feature=intersection_point['feature'],
distance=kwargs.get('distance'),
angle=kwargs.get('angle'),
azimuth=kwargs.get('azimuth'),
reverse_azimuth=kwargs.get('reverse_azimuth'))
ending.latitude = resulting_point.latitude
ending.longitude = resulting_point.longitude
ending.depth = resulting_point.depth
logger.debug("%s - hit the shoreline at %s. Setting location to %s." % (particle.logstring(), hitpoint.logstring(), ending.logstring()))
# bathymetry
if self.usebathy:
if not particle.settled:
bintersect = self._bathymetry.intersect(start_point=starting, end_point=ending)
if bintersect:
pt = self._bathymetry.react(type='reverse', start_point=starting, end_point=ending)
logger.debug("%s - hit the bottom at %s. Setting location to %s." % (particle.logstring(), ending.logstring(), pt.logstring()))
ending.latitude = pt.latitude
ending.longitude = pt.longitude
ending.depth = pt.depth
# sea-surface
if self.usesurface:
if ending.depth > 0:
logger.debug("%s - rose out of the water. Setting depth to 0." % particle.logstring())
ending.depth = 0
particle.location = ending
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_buildfile_path(settings):
""" Path to which a build tarball should be downloaded. """ |
base = os.path.basename(settings.build_url)
return os.path.join(BUILDS_ROOT, base) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prior_dates(*args, **kwargs):
"""Get the prior distribution of calibrated radiocarbon dates""" |
try:
chron = args[0]
except IndexError:
chron = kwargs['coredates']
d_r = np.array(kwargs['d_r'])
d_std = np.array(kwargs['d_std'])
t_a = np.array(kwargs['t_a'])
t_b = np.array(kwargs['t_b'])
try:
normal_distr = kwargs['normal_distr']
except KeyError:
normal_distr = None
cc_int = kwargs['cc']
ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13',
3: 'SHCal13', 4: 'ConstCal'}
# There is a better way to do this.
if 'cc1' in kwargs:
ccdict[1] = str(kwargs['cc1'])
if 'cc2' in kwargs:
ccdict[2] = str(kwargs['cc2'])
if 'cc3' in kwargs:
ccdict[3] = str(kwargs['cc3'])
if 'cc4' in kwargs:
ccdict[4] = str(kwargs['cc4'])
cc = []
for i in cc_int:
i = int(i)
cc.append(fetch_calibcurve(ccdict[i]))
d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std,
t_a=t_a, t_b=t_b, normal_distr=normal_distr)
return d, p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prior_sediment_rate(*args, **kwargs):
"""Get the prior density of sediment rates Returns ------- y : ndarray Array giving the density. x : ndarray Array of sediment accumulation values (yr/cm) over which the density was evaluated. """ |
# PlotAccPrior @ Bacon.R ln 113 -> ln 1097-1115
# alpha = acc_shape, beta = acc_shape / acc_mean
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
acc_mean = kwargs['acc_mean']
acc_shape = kwargs['acc_shape']
x = np.linspace(0, 6 * np.max(acc_mean), 100)
y = stats.gamma.pdf(x, a=acc_shape,
scale=1 / (acc_shape/acc_mean))
return y, x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prior_sediment_memory(*args, **kwargs):
"""Get the prior density of sediment memory Returns ------- y : ndarray Array giving the density. x : ndarray Array of Memory (ratio) values over which the density was evaluated. """ |
# "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)"
# PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141
# w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean)
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
mem_shape = kwargs['mem_strength'] # aka. `mem_shape`
mem_mean = kwargs['mem_mean']
x = np.linspace(0, 1, 100)
y = stats.beta.pdf(x, a=mem_shape * mem_mean,
b=mem_shape * (1 - mem_mean))
return y, x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_browser(self):
"""Update this everytime the CERN SSO login form is refactored.""" |
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url)
self.browser.find_link_by_partial_text("Sign in").click()
self.browser.fill(
'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$'
'txtFormsLogin', self.user)
self.browser.fill(
'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$'
'txtFormsPassword', self.password)
self.browser.find_by_css('input[type=submit]').click()
self.browser.find_by_css('input[type=submit]').click() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(self, directory='~/Music', song_name='%a - %s - %A'):
""" Download a song to a directory. :param directory: A system file path. :param song_name: A name that will be formatted with :meth:`format`. :return: The formatted song name. """ |
formatted = self.format(song_name)
path = os.path.expanduser(directory) + os.path.sep + formatted + '.mp3'
try:
raw = self.safe_download()
with open(path, 'wb') as f:
f.write(raw)
except:
raise
return formatted |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def safe_download(self):
"""Download a song respecting Grooveshark's API. :return: The raw song data. """ |
def _markStreamKeyOver30Seconds(stream):
self._connection.request(
'markStreamKeyOver30Seconds',
{'streamServerID': stream.ip,
'artistID': self.artist.id,
'songQueueID': self._connection.session.queue,
'songID': self.id,
'songQueueSongID': 1,
'streamKey': stream.key},
self._connection.header('markStreamKeyOver30Seconds', 'jsqueue'))
stream = self.stream
timer = threading.Timer(30, _markStreamKeyOver30Seconds, [stream])
timer.start()
raw = stream.data.read()
if len(raw) == stream.size:
timer.cancel()
self._connection.request(
'markSongDownloadedEx',
{'streamServerID': stream.ip,
'songID': self.id,
'streamKey': stream.key},
self._connection.header('markSongDownloadedEx', 'jsqueue'))
self._connection.request(
'removeSongsFromQueue',
{'userRemoved': True,
'songQueueID': self._connection.session.queue,
'songQueueSongIDs': [1]},
self._connection.header('removeSongsFromQueue', 'jsqueue'))
return raw
else:
raise ValueError("Content-Length {}, but read {}"
.format(stream.size, len(raw))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
""" Return a copy of this configuration. """ |
return self.__class__(options=self.__options,
attribute_options=self.__attribute_options) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_option(self, name):
""" Returns the value for the specified generic configuration option. :returns: configuration option value or `None`, if the option was not set. """ |
self.__validate_option_name(name)
return self.__options.get(name, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_option(self, name, value):
""" Sets the specified generic configuration option to the given value. """ |
self.__validate_option_name(name)
self.__options[name] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_attribute_option(self, attribute, option_name, option_value):
""" Sets the given attribute option to the given value for the specified attribute. """ |
self.__validate_attribute_option_name(option_name)
attribute_key = self.__make_key(attribute)
mp_options = self.__attribute_options.setdefault(attribute_key, {})
mp_options[option_name] = option_value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attribute_option(self, attribute, option_name):
""" Returns the value of the given attribute option for the specified attribute. """ |
self.__validate_attribute_option_name(option_name)
attribute_key = self.__make_key(attribute)
return self.__attribute_options[attribute_key].get(option_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attribute_options(self, attribute=None):
""" Returns a copy of the mapping options for the given attribute name or a copy of all mapping options, if no attribute name is provided. All options that were not explicitly configured are given a default value of `None`. :param tuple attribute_key: attribute name or tuple specifying an attribute path. :returns: mapping options dictionary (including default `None` values) """ |
attribute_key = self.__make_key(attribute)
if attribute_key is None:
opts = defaultdict(self._default_attributes_options.copy)
for attr, mp_options in iteritems_(self.__attribute_options):
opts[attr].update(mp_options)
else:
opts = self._default_attributes_options.copy()
attr_opts = self.__attribute_options[attribute_key]
opts.update(attr_opts)
return opts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, visitor):
""" Traverses this representer configuration traverser with the given visitor. :param visitor: :class:`RepresenterConfigVisitorBase` instance. """ |
attr_option_map = self.__config.get_attribute_options()
# Sorting the keys results in a depth-first traversal, which is just
# what we want.
for (key, key_attr_option_map) in sorted(iteritems_(attr_option_map)):
if not self.__max_depth is None and len(key) > self.__max_depth:
continue
visitor.visit(key, key_attr_option_map) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def with_retry(cls, methods):
""" Wraps the given list of methods in a class with an exponential-back retry mechanism. """ |
retry_with_backoff = retry(
retry_on_exception=lambda e: isinstance(e, BotoServerError),
wait_exponential_multiplier=1000,
wait_exponential_max=10000
)
for method in methods:
m = getattr(cls, method, None)
if isinstance(m, collections.Callable):
setattr(cls, method, retry_with_backoff(m))
return cls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(file_path) -> dict:
""" Load JSON file """ |
with io.open(file_path, 'r', encoding='utf-8') as json_stream:
return Json.parse(json_stream, True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def safe_values(self, value):
""" Parse non-string values that will not serialize """ |
# TODO: override-able?
string_val = ""
if isinstance(value, datetime.date):
try:
string_val = value.strftime('{0}{1}{2}'.format(
current_app.config['DATETIME']['DATE_FORMAT'],
current_app.config['DATETIME']['SEPARATOR'],
current_app.config['DATETIME']['TIME_FORMAT']))
except RuntimeError as error:
string_val = value.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(value, bytes):
string_val = value.decode('utf-8')
elif isinstance(value, decimal.Decimal):
string_val = float(value)
else:
string_val = value
return string_val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def camel_case(self, snake_case):
""" Convert snake case to camel case """ |
components = snake_case.split('_')
return components[0] + "".join(x.title() for x in components[1:]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __find_object_children(self, obj) -> dict:
""" Convert object to flattened object """ |
if hasattr(obj, 'items') and \
isinstance(obj.items, types.BuiltinFunctionType):
return self.__construct_object(obj)
elif isinstance(obj, (list, tuple, set)):
return self.__construct_list(obj)
else:
exclude_list = []
if hasattr(obj, '_sa_instance_state'):
# load only deferred objects
if len(orm.attributes.instance_state(obj).unloaded) > 0:
mapper = inspect(obj)
for column in mapper.attrs:
column.key
column.value
if hasattr(obj, 'json_exclude_list'):
# do not serialize any values in this list
exclude_list = obj.json_exclude_list
return self.__construct_object(vars(obj), exclude_list)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __iterate_value(self, value):
""" Return value for JSON serialization """ |
if hasattr(value, '__dict__') or isinstance(value, dict):
return self.__find_object_children(value) # go through dict/class
elif isinstance(value, (list, tuple, set)):
return self.__construct_list(value) # go through list
return self.safe_values(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_xml(self):
'''
Writes a VocabularyKey Xml as per Healthvault schema.
:returns: lxml.etree.Element representing a single VocabularyKey
'''
key = None
if self. language is not None:
lang = {}
lang['{http://www.w3.org/XML/1998/namespace}lang'] = self.language
key = etree.Element('vocabulary-key', attrib=lang)
else:
key = etree.Element('vocabulary-key')
name = etree.Element('name')
name.text = self.name
key.append(name)
if self.family is not None:
family = etree.Element('family')
family.text = self.family
key.append(family)
if self.version is not None:
version = etree.Element('version')
version.text = self.version
key.append(version)
if self.code_value is not None:
code_value = etree.Element('code-value')
code_value.text = self.code_value
key.append(code_value)
return key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_xml(self, key_xml):
'''
Parse a VocabularyKey from an Xml as per Healthvault
schema.
:param key_xml: lxml.etree.Element representing a single VocabularyKey
'''
xmlutils = XmlUtils(key_xml)
self.name = xmlutils.get_string_by_xpath('name')
self.family = xmlutils.get_string_by_xpath('family')
self.version = xmlutils.get_string_by_xpath('version')
self.description = xmlutils.get_string_by_xpath('description')
self.language = xmlutils.get_lang() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_and_exit(results):
"""Print each result and stop the reactor.""" |
for success, value in results:
if success:
print value.encode(locale.getpreferredencoding())
else:
value.printTraceback() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _topological_sort(self):
""" Kahn's algorithm for Topological Sorting - Finds cycles in graph - Computes dependency weight """ |
sorted_graph = []
node_map = self._graph.get_nodes()
nodes = [NodeVisitor(node_map[node]) for node in node_map]
def get_pointers_for_edge_nodes(visitor_decorated_node):
edges = []
edge_ids = visitor_decorated_node.get_node().get_edges()
for node in nodes:
if node.get_id() in edge_ids:
edges.append(node)
return edges
# node is initially weighted with the number of immediate dependencies
for node in nodes:
for edge in get_pointers_for_edge_nodes(node):
edge.increment()
# Start with a list of nodes who have no dependents
resolved = [node for node in nodes if node.get_weight() == 0]
while resolved:
node = resolved.pop()
sorted_graph.append(node)
for edge in get_pointers_for_edge_nodes(node):
edge.decrement()
if edge.get_weight() == 0:
resolved.append(edge)
self._circular_dependencies = [
node.get_node() for node in nodes if node.get_weight() > 0]
self._sorted_nodes = list(reversed(
[node.get_node() for node in sorted_graph])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_environment_vars(self):
""" Looks for any MACH9_ prefixed environment variables and applies them to the configuration if present. """ |
for k, v in os.environ.items():
if k.startswith(MACH9_PREFIX):
_, config_key = k.split(MACH9_PREFIX, 1)
self[config_key] = v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self, parent=None):
"""Copies an existing structure and all of it's children""" |
new = Structure(None, parent=parent)
new.key = self.key
new.type_ = self.type_
new.val_guaranteed = self.val_guaranteed
new.key_guaranteed = self.key_guaranteed
for child in self.children:
new.children.append(child.copy(new))
return new |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generation(self):
"""Returns the number of ancestors that are dictionaries""" |
if not self.parent:
return 0
elif self.parent.is_dict:
return 1 + self.parent.generation
else:
return self.parent.generation |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def type_string(self):
"""Returns a string representing the type of the structure""" |
if self.is_tuple:
subtypes = [item.type_string for item in self.children]
return '{}({})'.format(
'' if self.val_guaranteed else '*',
', '.join(subtypes))
elif self.is_list:
return '{}[{}]'.format(
'' if self.val_guaranteed else '*',
self.children[0].type_string)
else:
return '{}{}'.format(
'' if self.val_guaranteed else '*',
self.type_.__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_field(obj, field_name, value):
"""Fancy setattr with debugging.""" |
old = getattr(obj, field_name)
field = obj._meta.get_field(field_name)
# is_relation is Django 1.8 only
if field.is_relation:
# If field_name is the `_id` field, then there is no 'pk' attr and
# old/value *is* the pk
old_repr = None if old is None else getattr(old, 'pk', old)
new_repr = None if value is None else getattr(value, 'pk', value)
elif field.__class__.__name__ == 'DateTimeField':
old_repr = None if old is None else datetime_repr(old)
new_repr = None if value is None else datetime_repr(value)
else:
old_repr = None if old is None else str(old)
new_repr = None if value is None else str(value)
if old_repr != new_repr:
setattr(obj, field_name, value)
if not hasattr(obj, DIRTY):
setattr(obj, DIRTY, [])
getattr(obj, DIRTY).append(dict(
field_name=field_name,
old_value=old_repr,
new_value=new_repr,
)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def obj_update(obj, data: dict, *, update_fields=UNSET, save: bool=True) -> bool: """ Fancy way to update `obj` with `data` dict. Parameters obj : Django model instance data The data to update ``obj`` with update_fields Use your ``update_fields`` instead of our generated one. If you need an auto_now or auto_now_add field to get updated, set this to ``None`` to get the default Django behavior. save If save=False, then don't actually save. This can be useful if you just want to utilize the verbose logging. DEPRECRATED in favor of the more standard ``update_fields=[]`` Returns ------- bool True if data changed """ |
for field_name, value in data.items():
set_field(obj, field_name, value)
dirty_data = getattr(obj, DIRTY, None)
if not dirty_data:
return False
logger.debug(
human_log_formatter(dirty_data),
extra={
'model': obj._meta.object_name,
'pk': obj.pk,
'changes': json_log_formatter(dirty_data),
}
)
if update_fields == UNSET:
update_fields = list(map(itemgetter('field_name'), dirty_data))
if not save:
update_fields = ()
obj.save(update_fields=update_fields)
delattr(obj, DIRTY)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def obj_update_or_create(model, defaults=None, update_fields=UNSET, **kwargs):
""" Mimic queryset.update_or_create but using obj_update. """ |
obj, created = model.objects.get_or_create(defaults=defaults, **kwargs)
if created:
logger.debug('CREATED %s %s',
model._meta.object_name,
obj.pk,
extra={'pk': obj.pk})
else:
obj_update(obj, defaults, update_fields=update_fields)
return obj, created |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect_scheme(filename):
"""Detects partitioning scheme of the source Args: filename (str):
path to file or device for detection of \ partitioning scheme. Returns: SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN Raises: IOError: The file doesn't exist or cannot be opened for reading """ |
logger = logging.getLogger(__name__)
logger.info('Detecting partitioning scheme')
with open(filename, 'rb') as f:
# Look for MBR signature first
f.seek(mbr.MBR_SIG_OFFSET)
data = f.read(mbr.MBR_SIG_SIZE)
signature = struct.unpack("<H", data)[0]
if signature != mbr.MBR_SIGNATURE:
# Something else
logger.debug('Unknown partitioning scheme')
return PartitionScheme.SCHEME_UNKNOWN
else:
# Could be MBR or GPT, look for GPT header
f.seek(gpt.GPT_HEADER_OFFSET)
data = f.read(gpt.GPT_SIG_SIZE)
signature = struct.unpack("<8s", data)[0]
if signature != gpt.GPT_SIGNATURE:
logger.debug('MBR scheme detected')
return PartitionScheme.SCHEME_MBR
else:
logger.debug('GPT scheme detected')
return PartitionScheme.SCHEME_GPT |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _has_file_rolled(self):
"""Check if the file has been rolled""" |
# if the size is smaller then before, the file has
# probabilly been rolled
if self._fh:
size = self._getsize_of_current_file()
if size < self.oldsize:
return True
self.oldsize = size
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _open_file(self, filename):
"""Open a file to be tailed""" |
if not self._os_is_windows:
self._fh = open(filename, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0
return
# if we're in Windows, we need to use the WIN32 API to open the
# file without locking it
import win32file
import msvcrt
handle = win32file.CreateFile(filename,
win32file.GENERIC_READ,
win32file.FILE_SHARE_DELETE |
win32file.FILE_SHARE_READ |
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
detached_handle = handle.Detach()
file_descriptor = msvcrt.open_osfhandle(
detached_handle, os.O_RDONLY)
self._fh = open(file_descriptor, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _filehandle(self):
""" Return a filehandle to the file being tailed """ |
# if file is opened and it has been rolled we need to close the file
# and then to reopen it
if self._fh and self._has_file_rolled():
try:
self._fh.close()
except Exception:
pass
self._fh = None
# if the file is closed (or has been closed right now), open it
if not self._fh:
self._open_file(self.filename)
if not self.opened_before:
self.opened_before = True
self._fh.seek(0, os.SEEK_END)
return self._fh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_class(class_string):
""" Get a class from a dotted string """ |
split_string = class_string.encode('ascii').split('.')
import_path = '.'.join(split_string[:-1])
class_name = split_string[-1]
if class_name:
try:
if import_path:
mod = __import__(import_path, globals(), {}, [class_name])
cls = getattr(mod, class_name)
else:
cls = __import__(class_name, globals(), {})
if cls:
return cls
except (ImportError, AttributeError):
pass
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _register_handler(event, fun, external=False):
"""Register a function to be an event handler""" |
registry = core.HANDLER_REGISTRY
if external:
registry = core.EXTERNAL_HANDLER_REGISTRY
if not isinstance(event, basestring):
# If not basestring, it is a BaseEvent subclass.
# This occurs when class methods are registered as handlers
event = core.parse_event_to_name(event)
if event in registry:
registry[event].append(fun)
else:
registry[event] = [fun]
return fun |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handler(param):
"""Decorator that associates a handler to an event class This decorator works for both methods and functions. Since it only registers the callable object and returns it without evaluating it. The name param should be informed in a dotted notation and should contain two informations: the django app name and the class name. Just like this: You can also use this same decorator to mark class methods as handlers. Just notice that the class *must* inherit from `BaseEvent`. """ |
if isinstance(param, basestring):
return lambda f: _register_handler(param, f)
else:
core.HANDLER_METHOD_REGISTRY.append(param)
return param |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(name, data=None):
"""Entry point for the event lib that starts the logging process This function uses the `name` param to find the event class that will be processed to log stuff. This name must provide two informations separated by a dot: the app name and the event class name. Like this: The "ActionLog" is a class declared inside the 'deal.events' module and this function will raise an `EventNotFoundError` error if it's not possible to import the right event class. The `data` param *must* be a dictionary, otherwise a `TypeError` will be rised. All keys *must* be strings and all values *must* be serializable by the `json.dumps` function. If you need to pass any unsupported object, you will have to register a serializer function. Consult the RFC-00003-serialize-registry for more information. """ |
data = data or {}
data.update(core.get_default_values(data))
# InvalidEventNameError, EventNotFoundError
event_cls = core.find_event(name)
event = event_cls(name, data)
event.validate() # ValidationError
data = core.filter_data_values(data)
data = ejson.dumps(data) # TypeError
# We don't use celery when developing
if conf.getsetting('DEBUG'):
core.process(name, data)
else:
tasks.process_task.delay(name, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_keys(self, *keys):
"""Validation helper to ensure that keys are present in data This method makes sure that all of keys received here are present in the data received from the caller. It is better to call this method in the `validate()` method of your event. Not in the `clean()` one, since the first will be called locally, making it easier to debug things and find problems. """ |
current_keys = set(self.data.keys())
needed_keys = set(keys)
if not needed_keys.issubset(current_keys):
raise ValidationError(
'One of the following keys are missing from the '
'event\'s data: {}'.format(
', '.join(needed_keys.difference(current_keys)))
)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def addProject(gh_link):
''' Adds a github project to the data folder, unzips it, and deletes the zip file.
Returns the project name and the path to the project folder. '''
name = os.path.basename(gh_link)
zipurl = gh_link+"/archive/master.zip"
outzip = os.path.join('temp_data',name+'.zip')
if not os.path.exists('temp_data'):
os.makedirs('temp_data')
downloadFile(zipurl,outzip)
zip = zipfile.ZipFile(outzip,mode='r')
outpath = os.path.join('temp_data',name)
zip.extractall(outpath)
zip.close()
os.remove(outzip)
return name,outpath |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cleanDir(self):
''' Remove existing json datafiles in the target directory. '''
if os.path.isdir(self.outdir):
baddies = ['tout.json','nout.json','hout.json']
for file in baddies:
filepath = os.path.join(self.outdir,file)
if os.path.isfile(filepath):
os.remove(filepath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def makeHTML(self,mustachepath,htmlpath):
'''Write an html file by applying this ideogram's attributes to a mustache template. '''
subs = dict()
if self.title:
subs["title"]=self.title
subs["has_title"]=True
else:
subs["has_title"]=False
subs["font_size"] = self.font_size
subs["font_family"] = self.font_family
subs["colorscheme"] = self.colorscheme
subs["title_color"] = self.title_color
subs["bgcolor"] = self.bgcolor
with open(mustachepath,'r') as infile:
mustache_text = pystache.render(infile.read(), subs)
with open(htmlpath,'w+') as outfile:
outfile.write(mustache_text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def age(self, **kwargs):
""" Age this particle. parameters (optional, only one allowed):
days (default) hours minutes seconds """ |
if kwargs.get('days', None) is not None:
self._age += kwargs.get('days')
return
if kwargs.get('hours', None) is not None:
self._age += kwargs.get('hours') / 24.
return
if kwargs.get('minutes', None) is not None:
self._age += kwargs.get('minutes') / 24. / 60.
return
if kwargs.get('seconds', None) is not None:
self._age += kwargs.get('seconds') / 24. / 60. / 60.
return
raise KeyError("Could not age particle, please specify 'days', 'hours', 'minutes', or 'seconds' parameter") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalized_indexes(self, model_timesteps):
""" This function will normalize the particles locations to the timestep of the model that was run. This is used in output, as we should only be outputting the model timestep that was chosen to be run. In most cases, the length of the model_timesteps and the particle's locations will be the same (unless it hits shore). If they are not the same length pull out of locations the timesteps that are closest to the model_timesteps """ |
# Clean up locations
# If duplicate time instances, remove the lower index
clean_locs = []
for i,loc in enumerate(self.locations):
try:
if loc.time == self.locations[i+1].time:
continue
else:
clean_locs.append(loc)
except StandardError:
clean_locs.append(loc)
if len(clean_locs) == len(model_timesteps):
return [ind for ind,loc in enumerate(self.locations) if loc in clean_locs]
elif len(model_timesteps) < len(clean_locs):
# We have at least one internal timestep for this particle
# Pull out the matching location indexes
indexes = [ind for ind,loc in enumerate(self.locations) if loc in clean_locs]
if len(model_timesteps) == len(indexes):
return indexes
raise ValueError("Can't normalize")
elif len(model_timesteps) > len(clean_locs):
# The particle stopped before forcing for all of the model timesteps
raise ValueError("Particle has less locations than model timesteps") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_satisfied_by(self, candidate: Any, **kwds: Any) -> bool: """Return True if `candidate` satisfies the specification.""" |
candidate_name = self._candidate_name
context = self._context
if context:
if candidate_name in kwds:
raise ValueError(f"Candidate name '{candidate_name}' must "
"not be given as keyword.")
context.update(kwds)
context[candidate_name] = candidate
try:
code = self._code
except AttributeError:
self._code = code = compile(self._ast_expr, '<str>', mode='eval')
return eval(code, context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_edge(self, fr, to):
""" Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs can be used to permit multiple edges by partitioning the graph into vertices and edges. :param fr: The name of the origin vertex. :param to: The name of the destination vertex. :return: """ |
fr = self.add_vertex(fr)
to = self.add_vertex(to)
self.adjacency[fr].children.add(to)
self.adjacency[to].parents.add(fr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clone(self, source_id, backup_id, size, volume_id=None, source_host=None):
""" create a volume then clone the contents of the backup into the new volume """ |
volume_id = volume_id or str(uuid.uuid4())
return self.http_put('/volumes/%s' % volume_id,
params=self.unused({
'source_host': source_host,
'source_volume_id': source_id,
'backup_id': backup_id,
'size': size
})) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, volume_id, backup_id=None, timestamp=None):
""" create a backup of a volume """ |
backup_id = backup_id or str(uuid.uuid4())
timestamp = timestamp or int(time())
return self.http_put('/volumes/%s/backups/%s' % (volume_id, backup_id),
params={'timestamp': timestamp}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def boards(hwpack='arduino'):
"""read boards from boards.txt. :param core_package: 'all,'arduino',.. """ |
bunch = read_properties(boards_txt(hwpack))
bunch_items = list(bunch.items())
# remove invalid boards
for bid, board in bunch_items:
if 'build' not in board.keys() or 'name' not in board.keys():
log.debug('invalid board found: %s', bid)
del bunch[bid]
return bunch |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def board_names(hwpack='arduino'):
"""return installed board names.""" |
ls = list(boards(hwpack).keys())
ls.sort()
return ls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_boards(hwpack='arduino', verbose=False):
"""print boards from boards.txt.""" |
if verbose:
pp(boards(hwpack))
else:
print('\n'.join(board_names(hwpack))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_lib_dir(root):
"""search for lib dir under root.""" |
root = path(root)
log.debug('files in dir: %s', root)
for x in root.walkfiles():
log.debug(' %s', x)
# only 1 dir in root? (example: github)
if not len(root.files()) and len(root.dirs()) == 1:
log.debug('go inside root')
root = root.dirs()[0]
if len(root.files('keywords.txt')):
root = rename_root(root)
return root, root
keywords = list(root.walkfiles('keywords.txt'))
if len(keywords):
if len(keywords) > 1:
log.warning('more keywords.txt found. Installing only one. %s', keywords)
lib_dir = keywords[0].parent
lib_dir = fix_libdir(lib_dir)
return root, lib_dir
header_only = len(list(noexample(root.walkfiles('*.cpp')))) == 0
log.debug('header_only: %s', header_only)
lib_dir = None
headers = list(noexample(root.walkfiles('*.h')))
for h in headers:
cpp = h.stripext() + '.cpp'
if (header_only or cpp.exists()) and h.parent.name.lower() == h.namebase.lower():
assert not lib_dir
lib_dir = h.parent
log.debug('found lib: %s', lib_dir)
if not lib_dir:
if len(headers) == 1 and len(list(root.files('*.h'))) == 0:
log.debug('only 1 header, not in root')
lib_dir = headers[0].parent
lib_dir = rename_root(lib_dir)
if not lib_dir:
# xxx.cpp and xxx.h in root? -> rename root dir
root = rename_root(root)
return root, root
# for h in root.files('*.h'):
# cpp = h.stripext() + '.cpp'
# if header_only or cpp.exists():
# assert not lib_dir
# root.rename(root.parent / h.namebase)
# root = lib_dir = root.parent / h.namebase
assert lib_dir
return root, lib_dir |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_examples(root, lib_dir):
"""find examples not under lib dir, and move into ``examples``""" |
all_pde = files_multi_pattern(root, INO_PATTERNS)
lib_pde = files_multi_pattern(lib_dir, INO_PATTERNS)
stray_pde = all_pde.difference(lib_pde)
if len(stray_pde) and not len(lib_pde):
log.debug(
'examples found outside lib dir, moving them: %s', stray_pde)
examples = lib_dir / EXAMPLES
examples.makedirs()
for x in stray_pde:
d = examples / x.namebase
d.makedirs()
x.move(d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_examples_dir(lib_dir):
"""rename examples dir to ``examples``""" |
for x in lib_dir.dirs():
if x.name.lower() == EXAMPLES:
return
for x in lib_dir.dirs():
if x.name.lower() == EXAMPLES:
_fix_dir(x)
return
for x in lib_dir.dirs():
if 'example' in x.name.lower():
_fix_dir(x)
return
for x in lib_dir.dirs():
if len(files_multi_pattern(x, INO_PATTERNS)):
_fix_dir(x)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install_lib(url, replace_existing=False, fix_wprogram=True):
"""install library from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None """ |
d = tmpdir(tmpdir())
f = download(url)
Archive(f).extractall(d)
clean_dir(d)
d, src_dlib = find_lib_dir(d)
move_examples(d, src_dlib)
fix_examples_dir(src_dlib)
if fix_wprogram:
fix_wprogram_in_files(src_dlib)
targ_dlib = libraries_dir() / src_dlib.name
if targ_dlib.exists():
log.debug('library already exists: %s', targ_dlib)
if replace_existing:
log.debug('remove %s', targ_dlib)
targ_dlib.rmtree()
else:
raise ConfduinoError('library already exists:' + targ_dlib)
log.debug('move %s -> %s', src_dlib, targ_dlib)
src_dlib.move(targ_dlib)
libraries_dir().copymode(targ_dlib)
for x in targ_dlib.walk():
libraries_dir().copymode(x)
return targ_dlib.name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _init_supervisor_rpc(self, rpc_or_port):
'''Initialize supervisor RPC.
Allow passing in an RPC connection, or a port number for
making one.
'''
if isinstance(rpc_or_port, int):
if self.username:
leader = 'http://{self.username}:{self.password}@'
else:
leader = 'http://'
tmpl = leader + '{self.name}:{port}'
url = tmpl.format(self=self, port=rpc_or_port)
self.rpc = xmlrpc_client.ServerProxy(
url, transport=TimeoutTransport())
else:
self.rpc = rpc_or_port
self.supervisor = self.rpc.supervisor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_redis(redis_spec):
""" Return a StrictRedis instance or None based on redis_spec. redis_spec may be None, a Redis URL, or a StrictRedis instance """ |
if not redis_spec:
return
if isinstance(redis_spec, six.string_types):
return redis.StrictRedis.from_url(redis_spec)
# assume any other value is a valid instance
return redis_spec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_base():
""" if 'deploy' resolves in this environment, use the hostname for which that name resolves. Override with 'VELOCIRAPTOR_URL' """ |
try:
name, _aliaslist, _addresslist = socket.gethostbyname_ex('deploy')
except socket.gaierror:
name = 'deploy'
fallback = 'https://{name}/'.format(name=name)
return os.environ.get('VELOCIRAPTOR_URL', fallback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_all(cls, vr, params=None):
""" Create instances of all objects found """ |
ob_docs = vr.query(cls.base, params)
return [cls(vr, ob) for ob in ob_docs] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dispatch(self, **changes):
""" Patch the swarm with changes and then trigger the swarm. """ |
self.patch(**changes)
trigger_url = self._vr._build_url(self.resource_uri, 'swarm/')
resp = self._vr.session.post(trigger_url)
resp.raise_for_status()
try:
return resp.json()
except ValueError:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assemble(self):
""" Assemble a build """ |
if not self.created:
self.create()
# trigger the build
url = self._vr._build_url(self.resource_uri, 'build/')
resp = self._vr.session.post(url)
resp.raise_for_status() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_token(self):
'''
requests an communication token from Grooveshark
'''
self.session.token = self.request(
'getCommunicationToken',
{'secretKey': self.session.secret},
{'uuid': self.session.user,
'session': self.session.session,
'clientRevision': grooveshark.const.CLIENTS['htmlshark']['version'],
'country': self.session.country,
'privacy': 0,
'client': 'htmlshark'})[1]
self.session.time = time.time() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _request_token(self, method, client):
'''
generates a request token
'''
if time.time() - self.session.time > grooveshark.const.TOKEN_TIMEOUT:
self._get_token()
random_value = self._random_hex()
return random_value + hashlib.sha1((method + ':' + self.session.token + ':' + grooveshark.const.CLIENTS[client]['token'] + ':' + random_value).encode('utf-8')).hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def request(self, method, parameters, header):
'''
Grooveshark API request
'''
data = json.dumps({
'parameters': parameters,
'method': method,
'header': header})
request = urllib.Request(
'https://grooveshark.com/more.php?%s' % (method),
data=data.encode('utf-8'), headers=self._json_request_header())
with contextlib.closing(self.urlopen(request)) as response:
result = json.loads(response.read().decode('utf-8'))
if 'result' in result:
return response.info(), result['result']
elif 'fault' in result:
raise RequestError(result['fault']['message'],
result['fault']['code'])
else:
raise UnknownError(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def header(self, method, client='htmlshark'):
'''
generates Grooveshark API Json header
'''
return {'token': self._request_token(method, client),
'privacy': 0,
'uuid': self.session.user,
'clientRevision': grooveshark.const.CLIENTS[client]['version'],
'session': self.session.session,
'client': client,
'country': self.session.country} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def radio(self, radio):
'''
Get songs belong to a specific genre.
:param radio: genre to listen to
:rtype: a :class:`Radio` object
Genres:
This list is incomplete because there isn't an English translation for
some genres.
Please look at the sources for all possible Tags.
+-------------------------------------+-------------------------------+
| Constant | Genre |
+=====================================+===============================+
| :const:`Radio.GENRE_RNB` | R and B |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_JAZZ` | Jazz |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_ROCK` | Rock |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_CLASSICAL` | Classical |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_DUBSTEP` | Dubstep |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_BLUES` | Blues |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_FOLK` | Folk |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_ELECTRONICA` | Electronica |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_CHRISTMAS` | Christmas |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_OLDIES` | Oldies |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_COUNTRY` | Country |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_EXPERIMENTAL` | Experimental |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_POP` | Pop |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_INDIE` | Indie |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_BLUEGRASS` | Bluegrass |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_RAP` | Rap |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_AMBIENT` | Ambient |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_TRANCE` | Trance |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_REGGAE` | Reggae |
+-------------------------------------+-------------------------------+
| :const:`Radio.GENRE_METAL` | Metal |
+-------------------------------------+-------------------------------+
'''
artists = self.connection.request(
'getArtistsForTagRadio',
{'tagID': radio},
self.connection.header('getArtistsForTagRadio', 'jsqueue'))[1]
return Radio(artists, radio, self.connection) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def search(self, query, type=SONGS):
'''
Search for songs, artists and albums.
:param query: search string
:param type: type to search for
:rtype: a generator generates :class:`Song`, :class:`Artist` and :class:`Album` objects
Search Types:
+---------------------------------+---------------------------------+
| Constant | Meaning |
+=================================+=================================+
| :const:`Client.SONGS` | Search for songs |
+---------------------------------+---------------------------------+
| :const:`Client.ARTISTS` | Search for artists |
+---------------------------------+---------------------------------+
| :const:`Client.ALBUMS` | Search for albums |
+---------------------------------+---------------------------------+
| :const:`Client.PLAYLISTS` | Search for playlists |
+---------------------------------+---------------------------------+
'''
result = self.connection.request(
'getResultsFromSearch',
{'query': query, 'type': type, 'guts': 0, 'ppOverride': False},
self.connection.header('getResultsFromSearch'))[1]['result']
if type == self.SONGS:
return (Song.from_response(song, self.connection)
for song in result)
elif type == self.ARTISTS:
return (Artist(artist['ArtistID'], artist['Name'], self.connection)
for artist in result)
elif type == self.ALBUMS:
return (self._parse_album(album) for album in result)
elif type == self.PLAYLISTS:
return (self._parse_playlist(playlist) for playlist in result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def popular(self, period=DAILY):
'''
Get popular songs.
:param period: time period
:rtype: a generator generates :class:`Song` objects
Time periods:
+---------------------------------+-----------------------------------+
| Constant | Meaning |
+=================================+===================================+
| :const:`Client.DAILY` | Popular songs of this day |
+---------------------------------+-----------------------------------+
| :const:`Client.MONTHLY` | Popular songs of this month |
+---------------------------------+-----------------------------------+
'''
songs = self.connection.request(
'popularGetSongs',
{'type': period},
self.connection.header('popularGetSongs'))[1]['Songs']
return (Song.from_response(song, self.connection) for song in songs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def playlist(self, playlist_id):
'''
Get a playlist from it's ID
:param playlist_id: ID of the playlist
:rtype: a :class:`Playlist` object
'''
playlist = self.connection.request(
'getPlaylistByID',
{'playlistID': playlist_id},
self.connection.header('getPlaylistByID'))[1]
return self._parse_playlist(playlist) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collection(self, user_id):
""" Get the song collection of a user. :param user_id: ID of a user. :rtype: list of :class:`Song` """ |
# TODO further evaluation of the page param, I don't know where the
# limit is.
dct = {'userID': user_id, 'page': 0}
r = 'userGetSongsInLibrary'
result = self.connection.request(r, dct, self.connection.header(r))
songs = result[1]['Songs']
return [Song.from_response(song, self.connection) for song in songs] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hwpack_names():
"""return installed hardware package names.""" |
ls = hwpack_dir().listdir()
ls = [x.name for x in ls]
ls = [x for x in ls if x != 'tools']
arduino_included = 'arduino' in ls
ls = [x for x in ls if x != 'arduino']
ls.sort()
if arduino_included:
ls = ['arduino'] + ls # move to 1st pos
return ls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_parser(self, html_parser, current_url):
""" Create the tinycss stylesheet. :param html_parser: The HTML parser. :type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser :param current_url: The current URL of page. :type current_url: str """ |
css_code = ''
elements = html_parser.find(
'style,link[rel="stylesheet"]'
).list_results()
for element in elements:
if element.get_tag_name() == 'STYLE':
css_code = css_code + element.get_text_content()
else:
css_code = css_code + requests.get(
urljoin(current_url, element.get_attribute('href'))
).text
self.stylesheet = tinycss.make_parser().parse_stylesheet(css_code) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def readline(self, prompt=''):
u'''Try to act like GNU readline.'''
# handle startup_hook
if self.first_prompt:
self.first_prompt = False
if self.startup_hook:
try:
self.startup_hook()
except:
print u'startup hook failed'
traceback.print_exc()
c = self.console
self.l_buffer.reset_line()
self.prompt = prompt
self._print_prompt()
if self.pre_input_hook:
try:
self.pre_input_hook()
except:
print u'pre_input_hook failed'
traceback.print_exc()
self.pre_input_hook = None
log(u"in readline: %s"%self.paste_line_buffer)
if len(self.paste_line_buffer)>0:
self.l_buffer=lineobj.ReadlineTextBuffer(self.paste_line_buffer[0])
self._update_line()
self.paste_line_buffer=self.paste_line_buffer[1:]
c.write(u'\r\n')
else:
self._readline_from_keyboard()
c.write(u'\r\n')
self.add_history(self.l_buffer.copy())
log(u'returning(%s)' % self.l_buffer.get_line_text())
return self.l_buffer.get_line_text() + '\n' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def history_search_backward(self, e): # ()
u'''Search backward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
self.l_buffer=self._history.history_search_backward(self.l_buffer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def quoted_insert(self, e): # (C-q or C-v)
u'''Add the next character typed to the line verbatim. This is how to
insert key sequences like C-q, for example.'''
e = self.console.getkeypress()
self.insert_text(e.char) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ipython_paste(self,e):
u'''Paste windows clipboard. If enable_ipython_paste_list_of_lists is
True then try to convert tabseparated data to repr of list of lists or
repr of array'''
if self.enable_win32_clipboard:
txt=clipboard.get_clipboard_text_and_convert(
self.enable_ipython_paste_list_of_lists)
if self.enable_ipython_paste_for_paths:
if len(txt)<300 and (u"\t" not in txt) and (u"\n" not in txt):
txt=txt.replace(u"\\", u"/").replace(u" ", ur"\ ")
self.insert_text(txt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(cls, args=None):
""" Fill in command-line arguments from argv """ |
if args is None:
args = sys.argv[1:]
try:
o = cls()
o.parseOptions(args)
except usage.UsageError as e:
print(o.getSynopsis())
print(o.getUsage())
print(str(e))
return 1
except CLIError as ce:
print(str(ce))
return ce.returnCode
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _index_files(path):
"""Search zip file for SID PHA files""" |
with zipfile.ZipFile(path) as zf:
names = sorted(zf.namelist())
names = [nn for nn in names if nn.endswith(".tif")]
names = [nn for nn in names if nn.startswith("SID PHA")]
phasefiles = []
for name in names:
with zf.open(name) as pt:
fd = io.BytesIO(pt.read())
if SingleTifPhasics.verify(fd):
phasefiles.append(name)
return phasefiles |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def files(self):
"""List of Phasics tif file names in the input zip file""" |
if self._files is None:
self._files = SeriesZipTifPhasics._index_files(self.path)
return self._files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify(path):
"""Verify that `path` is a zip file with Phasics TIFF files""" |
valid = False
try:
zf = zipfile.ZipFile(path)
except (zipfile.BadZipfile, IsADirectoryError):
pass
else:
names = sorted(zf.namelist())
names = [nn for nn in names if nn.endswith(".tif")]
names = [nn for nn in names if nn.startswith("SID PHA")]
for name in names:
with zf.open(name) as pt:
fd = io.BytesIO(pt.read())
if SingleTifPhasics.verify(fd):
valid = True
break
zf.close()
return valid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def chunks(stream, size=None):
'''
Returns a generator of chunks from the `stream` with a maximum
size of `size`. I don't know why this isn't part of core Python.
:Parameters:
stream : file-like object
The stream to fetch the chunks from. Note that the stream will
not be repositioned in any way.
size : int | 'lines'; default: null
If a integer, the size of the chunks to return. If the
string ``"lines"``, then behaves the same as `file.read()`.
If unspecified or null, defaults to the package default
MAXBUF size (usually 8 KiB).
'''
if size == 'lines':
for item in stream:
# for item in stream.readline():
yield item
return
if size is None:
size = MAXBUF
while True:
buf = stream.read(size)
if not buf:
return
yield buf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_logfile():
# type: () -> None """Write a DEBUG log file COMMAND-YYYYMMDD-HHMMSS.ffffff.log.""" |
command = os.path.basename(os.path.realpath(os.path.abspath(sys.argv[0])))
now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')
filename = '{}-{}.log'.format(command, now)
with open(filename, 'w') as logfile:
if six.PY3:
logfile.write(_LOGFILE_STREAM.getvalue())
else:
logfile.write(_LOGFILE_STREAM.getvalue().decode( # type: ignore
errors='replace')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def excepthook(type, value, traceback):
# pylint: disable=unused-argument """Log exceptions instead of printing a traceback to stderr.""" |
try:
six.reraise(type, value, traceback)
except type:
_LOGGER.exception(str(value))
if isinstance(value, KeyboardInterrupt):
message = "Cancelling at the user's request."
else:
message = handle_unexpected_exception(value)
print(message, file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_unexpected_exception(exc):
# type: (BaseException) -> str """Return an error message and write a log file if logging was not enabled. Args: exc: The unexpected exception. Returns: A message to display to the user concerning the unexpected exception. """ |
try:
write_logfile()
addendum = 'Please see the log file for more information.'
except IOError:
addendum = 'Unable to write log file.'
try:
message = str(exc)
return '{}{}{}'.format(message, '\n' if message else '', addendum)
except Exception: # pylint: disable=broad-except
return str(exc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enable_logging(log_level):
# type: (typing.Union[None, int]) -> None """Configure the root logger and a logfile handler. Args: log_level: The logging level to set the logger handler. """ |
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
logfile_handler = logging.StreamHandler(_LOGFILE_STREAM)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(logging.Formatter(
'%(levelname)s [%(asctime)s][%(name)s] %(message)s'))
root_logger.addHandler(logfile_handler)
if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL:
signal.signal(signal.SIGTERM, _logfile_sigterm_handler)
if log_level:
handler = logging.StreamHandler()
handler.setFormatter(_LogColorFormatter())
root_logger.setLevel(log_level)
root_logger.addHandler(handler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_log_level(args):
# type: (typing.Dict[str, typing.Any]) -> int """Get the log level from the CLI arguments. Removes logging arguments from sys.argv. Args: args: The parsed docopt arguments to be used to determine the logging level. Returns: The correct log level based on the three CLI arguments given. Raises: ValueError: Raised if the given log level is not in the acceptable list of values. """ |
index = -1
log_level = None
if '<command>' in args and args['<command>']:
index = sys.argv.index(args['<command>'])
if args.get('--debug'):
log_level = 'DEBUG'
if '--debug' in sys.argv and sys.argv.index('--debug') < index:
sys.argv.remove('--debug')
elif '-d' in sys.argv and sys.argv.index('-d') < index:
sys.argv.remove('-d')
elif args.get('--verbose'):
log_level = 'INFO'
if '--verbose' in sys.argv and sys.argv.index('--verbose') < index:
sys.argv.remove('--verbose')
elif '-v' in sys.argv and sys.argv.index('-v') < index:
sys.argv.remove('-v')
elif args.get('--log-level'):
log_level = args['--log-level']
sys.argv.remove('--log-level')
sys.argv.remove(log_level)
if log_level not in (None, 'DEBUG', 'INFO', 'WARN', 'ERROR'):
raise exceptions.InvalidLogLevelError(log_level)
return getattr(logging, log_level) if log_level else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _logfile_sigterm_handler(*_):
"""Handle exit signals and write out a log file. Raises: SystemExit: Contains the signal as the return code. """ |
logging.error('Received SIGTERM.')
write_logfile()
print('Received signal. Please see the log file for more information.',
file=sys.stderr)
sys.exit(signal) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format(self, record):
# type: (logging.LogRecord) -> str """Format the log record with timestamps and level based colors. Args: record: The log record to format. Returns: The formatted log record. """ |
if record.levelno >= logging.ERROR:
color = colorama.Fore.RED
elif record.levelno >= logging.WARNING:
color = colorama.Fore.YELLOW
elif record.levelno >= logging.INFO:
color = colorama.Fore.RESET
else:
color = colorama.Fore.CYAN
format_template = (
'{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s')
if sys.stdout.isatty():
self._fmt = format_template.format(
colorama.Style.BRIGHT,
color,
colorama.Fore.RESET,
colorama.Style.RESET_ALL
)
else:
self._fmt = format_template.format(*[''] * 4)
if six.PY3:
self._style._fmt = self._fmt # pylint: disable=protected-access
return super(_LogColorFormatter, self).format(record) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ask_question(self, question_text, question=None):
"""Ask Watson a question via the Question and Answer API :param question_text: question to ask Watson :type question_text: str :param question: if question_text is not provided, a Question object representing the question to ask Watson :type question: WatsonQuestion :return: Answer """ |
if question is not None:
q = question.to_dict()
else:
q = WatsonQuestion(question_text).to_dict()
r = requests.post(self.url + '/question', json={'question': q}, headers={
'Accept': 'application/json',
'X-SyncTimeout': 30
}, auth=(self.username, self.password))
try:
response_json = r.json()
except ValueError:
raise Exception('Failed to parse response JSON')
return WatsonAnswer(response_json) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_queue(queue=None, **kwargs):
"""Loops and waits on queue calling queue's `next_task` method. If an exception occurs, log the error, log the exception, and break. """ |
while True:
item = queue.get()
if item is None:
queue.task_done()
logger.info(f"{queue}: exiting process queue.")
break
filename = os.path.basename(item)
try:
queue.next_task(item, **kwargs)
except Exception as e:
queue.task_done()
logger.warn(f"{queue}: item={filename}. {e}\n")
logger.exception(e)
sys.stdout.write(
style.ERROR(
f"{queue}. item={filename}. {e}. Exception has been logged.\n"
)
)
sys.stdout.flush()
break
else:
logger.info(f"{queue}: Successfully processed {filename}.\n")
queue.task_done() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def memoize(func):
""" Classic memoize decorator for non-class methods """ |
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cached_method(func):
""" Memoize for class methods """ |
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guard(func):
""" Prevents the decorated function from parallel execution. Internally, this decorator creates a Lock object and transparently obtains/releases it when calling the function. """ |
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def threadpool(num_workers=None):
"""Apply stutils.mapreduce.map to the given function""" |
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def invalidate_all(self):
""" Remove all files caching this function """ |
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ctmc(data, numstates, transintv=1.0, toltime=1e-8, debug=False):
""" Continous Time Markov Chain Parameters data : list of lists A python list of N examples (e.g. rating histories of N companies, the event data of N basketball games, etc.). The i-th example consist of one list with M_i encoded state labels and M_i the durations or time periods the state lasted since the recording started. numstates : int number of unique states transintv : float The time interval toltime : float (If debug=True) Will throw an exception if the aggregated state duration or aggregated time periods of any state is smaller than toltime. debug : bool (Default: False) If True run the ctmc.datacheck function. Enable this flag if you to check if your 'data' variable has been processed correctly. Returns ------- transmat : ndarray The estimated transition/stochastic matrix. genmat : ndarray The estimated generator matrix transcount : ndarray statetime : ndarray Errors: ------- - ctmc assumes a clean data object and does not autocorrect any errors as result of it The main error sources are - transitions counting (e.g. two consequtive states has not been aggregated, only one distinct state reported) and - a state is modeled ore required that does not occur in the dataset (e.g. you a certain scale in mind and just assume it's in the data) or resp. involved in any transition (e.g. an example with just one state) You can enable error checking and exceptions by setting debug=True. You should do this for the first run on a smaller dataset. Example: -------- Use `datacheck` to check during preprocessing the dataset ctmc.datacheck(data, numstates, toltime) Disable checks in `ctmc` transmat, genmat, transcount, statetime = ctmc.ctmc( data, numstates, toltime, checks=False) Check aftwards if there has been an error ctmc.errorcheck(transcount, statetime, toltime) """ |
# raise an exception if the data format is wrong
if debug:
datacheck(data, numstates, toltime)
# aggregate event data
transcount, statetime = aggregateevents(data, numstates)
# raise an exception if the event data aggregation failed
if debug:
errorcheck(transcount, statetime, toltime)
# create generator matrix
genmat = generatormatrix(transcount, statetime)
# compute matrix exponential of the generator matrix
transmat = scipy.linalg.expm(genmat * transintv)
# done
return transmat, genmat, transcount, statetime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plugins(group, spec=None):
# TODO: share this documentation with `../doc/plugin.rst`...
'''
Returns a `PluginSet` object for the specified setuptools-style
entrypoint `group`. This is just a wrapper around
`pkg_resources.iter_entry_points` that allows the plugins to sort
and override themselves.
The optional `spec` parameter controls how and what plugins are
loaded. If it is ``None`` or the special value ``'*'``, then the
normal plugin loading will occur, i.e. all registered plugins will
be loaded and their self-declared ordering and dependencies will be
applied.
Otherwise, the `spec` is taken as a comma- or whitespace-separated
list of plugins to load. In this mode, the `spec` can either specify
an exact list of plugins to load, in the specified order, referred
to as an "absolute" spec. Otherwise, it is a "relative" spec, which
indicates that it only adjusts the standard registered plugin
loading. A spec is a list of either absolute or relative
instructions, and they cannot be mixed.
In either mode, a plugin is identified either by name for registered
plugins (e.g. ``foo``), or by fully-qualified Python module and
symbol name for unregistered plugins
(e.g. ``package.module.symbol``).
Plugins in an absolute spec are loaded in the order specified and
can be optionally prefixed with the following special characters:
* ``'?'`` : the specified plugin should be loaded if available. If
it is not registered, cannot be found, or cannot be loaded, then
it is ignored (a DEBUG log message will be emitted, however).
Plugins in a relative spec are always prefixed with at least one of
the following special characters:
* ``'-'`` : removes the specified plugin; this does not affect
plugin ordering, it only removes the plugin from the loaded
list. If the plugin does not exist, no error is thrown.
* ``'+'`` : adds or requires the specified plugin to the loaded
set. If the plugin is not a named/registered plugin, then it will
be loaded as an asset-symbol, i.e. a Python-dotted module and
symbol name. If the plugin does not exist or cannot be loaded,
this will throw an error. It does not affect the plugin ordering
of registered plugins.
* ``'/'`` : the plugin name is taken as a regular expression that
will be used to match plugin names and it must terminate in a
slash. Note that this must be the **last** element in the spec
list.
Examples:
* ``'*'`` : load all registered plugins.
* ``'foo,bar'`` : load the "foo" plugin, then the "bar" plugin.
* ``'foo,?bar'`` : load the "foo" plugin and if the "bar" plugin
exists, load it too.
* ``'-zig'`` : load all registered plugins except the "zig" plugin.
* ``'+pkg.foo.bar'`` : load all registered plugins and then load
the "pkg.foo.bar" Python symbol.
* ``'pkg.foo.bar'`` : load only the "pkg.foo.bar" Python symbol.
'''
pspec = _parse_spec(spec)
plugs = list(_get_registered_plugins(group, pspec))
plugs += list(_get_unregistered_plugins(group, plugs, pspec))
return PluginSet(group, spec, list(_sort_plugins(group, plugs, pspec, spec))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle(self, object, *args, **kw):
'''
Calls each plugin in this PluginSet with the specified object,
arguments, and keywords in the standard group plugin order. The
return value from each successive invoked plugin is passed as the
first parameter to the next plugin. The final return value is the
object returned from the last plugin.
If this plugin set is empty (i.e. no plugins exist or matched the
spec), then a ValueError exception is thrown.
'''
if not bool(self):
if not self.spec or self.spec == SPEC_ALL:
raise ValueError('No plugins available in group %r' % (self.group,))
raise ValueError(
'No plugins in group %r matched %r' % (self.group, self.spec))
for plugin in self.plugins:
object = plugin.handle(object, *args, **kw)
return object |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.