text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config_dir(mkcustom=False):
"""Returns the configuration directory for custom package settings. """ |
from acorn.utility import reporoot
from acorn.base import testmode
from os import path
alternate = path.join(path.abspath(path.expanduser("~")), ".acorn")
if testmode or (not path.isdir(alternate) and not mkcustom):
return path.join(reporoot, "acorn", "config")
else:
if mkcustom:# pragma: no cover
#This never gets reached when we are in testmode because we don't
#want to clobber the user's local config cache.
from os import mkdir
mkdir(alternate)
return alternate |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _package_path(package):
"""Returns the full path to the default package configuration file. Args: package (str):
name of the python package to return a path for. """ |
from os import path
confdir = config_dir()
return path.join(confdir, "{}.cfg".format(package)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_single(parser, filepath):
"""Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser):
parser to read the file into. filepath (str):
full path to the config file. """ |
from os import path
global packages
if path.isfile(filepath):
parser.readfp(open(filepath)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def settings(package, reload_=False):
"""Returns the config settings for the specified package. Args: package (str):
name of the python package to get settings for. """ |
global packages
if package not in packages or reload_:
from os import path
result = CaseConfigParser()
if package != "acorn":
confpath = _package_path(package)
_read_single(result, confpath)
_read_single(result, _package_path("acorn"))
packages[package] = result
return packages[package] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def descriptors(package):
"""Returns a dictionary of descriptors deserialized from JSON for the specified package. Args: package (str):
name of the python package to get settings for. """ |
from os import path
dpath = _descriptor_path(package)
if path.isfile(dpath):
import json
with open(dpath) as f:
jdb = json.load(f)
return jdb
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_magnet(magnet_uri):
"""returns a dictionary of parameters contained in a magnet uri""" |
data = defaultdict(list)
if not magnet_uri.startswith('magnet:'):
return data
else:
magnet_uri = magnet_uri.strip('magnet:?')
for segment in magnet_uri.split('&'):
key, value = segment.split('=')
if key == 'dn':
data['name'] = requests.utils.unquote(value).replace('+', '.')
elif key == 'xt':
data['infoHash'] = value.strip('urn:btih:')
elif key == 'tr':
data['trackers'].append(requests.utils.unquote(value))
else:
data[key] = value
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_torrent_file(torrent):
"""parse local or remote torrent file""" |
link_re = re.compile(r'^(http?s|ftp)')
if link_re.match(torrent):
response = requests.get(torrent, headers=HEADERS, timeout=20)
data = parse_torrent_buffer(response.content)
elif os.path.isfile(torrent):
with open(torrent, 'rb') as f:
data = parse_torrent_buffer(f.read())
else:
data = None
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_torrent_buffer(torrent):
"""parse a torrent buffer""" |
md = {}
try:
metadata = bencode.bdecode(torrent)
except bencode.BTL.BTFailure:
print 'Not a valid encoded torrent'
return None
if 'announce-list' in metadata:
md['trackers'] = []
for tracker in metadata['announce-list']:
md['trackers'].append(tracker[0])
if 'announce' in metadata:
md['trackers'].append(metadata['announce'])
md['trackers'] = list(set(md['trackers']))
if 'name' in metadata['info']:
md['name'] = metadata['info']['name']
webseeds = []
if 'httpseeds' in metadata:
webseeds = metadata['httpseeds']
if 'url-list' in metadata:
webseeds += md['url-list']
if webseeds:
md['webseeds'] = webseeds
if 'created by' in metadata:
md['creator'] = metadata['created by']
if 'creation date' in metadata:
utc_dt = datetime.utcfromtimestamp(metadata['creation date'])
md['created'] = utc_dt.strftime('%Y-%m-%d %H:%M:%S')
if 'comment' in metadata:
md['comment'] = metadata['comment']
md['piece_size'] = metadata['info']['piece length']
if 'length' in metadata['info']:
md['file'] = {'path': metadata['info']['name'],
'length': metadata['info']['length']}
if 'files' in metadata['info']:
md['files'] = []
for item in metadata['info']['files']:
md['files'].append(
{'path': item['path'][0], 'length': item['length']})
# TODO check if torrent is private and encoding
hashcontents = bencode.bencode(metadata['info'])
digest = hashlib.sha1(hashcontents).digest()
md['infoHash'] = hashlib.sha1(hashcontents).hexdigest()
b32hash = base64.b32encode(digest)
md['infoHash_b32'] = b32hash
md['pieces'] = _split_pieces(metadata['info']['pieces'])
md['total_size'] = hsize(sum([x['length'] for x in md['files']]))
return md |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hsize(bytes):
"""converts a bytes to human-readable format""" |
sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB']
if bytes == 0:
return '0 Byte'
i = int(math.floor(math.log(bytes) / math.log(1024)))
r = round(bytes / math.pow(1024, i), 2)
return str(r) + '' + sizes[i] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ratio(leechs, seeds):
""" computes the torrent ratio""" |
try:
ratio = float(seeds) / float(leechs)
except ZeroDivisionError:
ratio = int(seeds)
return ratio |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_torrent(magnet_link):
"""turn a magnet link to a link to a torrent file""" |
infoHash = parse_magnet(magnet_link)['infoHash']
torcache = 'http://torcache.net/torrent/' + infoHash + '.torrent'
torrage = 'https://torrage.com/torrent/' + infoHash + '.torrent'
reflektor = 'http://reflektor.karmorra.info/torrent/' + \
infoHash + '.torrent'
thetorrent = 'http://TheTorrent.org/'+infoHash
btcache = 'http://www.btcache.me/torrent/'+infoHash
for link in [torcache, torrage, reflektor, btcache, thetorrent]:
try:
print "Checking "+link
response = requests.head(link, headers=HEADERS)
if response.headers['content-type'] in ['application/x-bittorrent',
'application/octet-stream']:
return link
except requests.exceptions.ConnectionError:
pass
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_story():
""" Returns a boiler plate story as an object. """ |
return Story(
slug="la-data-latimes-ipsum",
headline="This is not a headline",
byline="This is not a byline",
pub_date=datetime.now(),
canonical_url="http://www.example.com/",
kicker="This is not a kicker",
description=lorem_ipsum.COMMON_P.split(".")[0],
sources="This is not a source",
credits="This is not a credit",
content=six.text_type('\n\n'.join(lorem_ipsum.paragraphs(6))),
image=get_image(900)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_related_items(count=4):
""" Returns the requested number of boiler plate related items as a list. """ |
defaults = dict(
headline="This is not a headline",
url="http://www.example.com/",
image=get_image(400, 400)
)
return [RelatedItem(**defaults) for x in range(0, count)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_image(width, height=None, background_color="cccccc", random_background_color=False):
""" Returns image with caption, credit, and random background color as requested. """ |
return Image(
url=placeholdit.get_url(
width,
height=height,
background_color=background_color,
random_background_color=random_background_color
),
credit="This is not an image credit",
caption="This is not a caption"
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_queryset(self, value, queryset):
""" Filter the queryset to all instances matching the given attribute. """ |
filter_kwargs = {self.field_name: value}
return queryset.filter(**filter_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exclude_current_instance(self, queryset):
""" If an instance is being updated, then do not include that instance itself as a uniqueness conflict. """ |
if self.instance is not None:
return queryset.exclude(pk=self.instance.pk)
return queryset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enforce_required_fields(self, attrs):
""" The `UniqueTogetherValidator` always forces an implied 'required' state on the fields it applies to. """ |
if self.instance is not None:
return
missing = {
field_name: self.missing_message
for field_name in self.fields
if field_name not in attrs
}
if missing:
raise ValidationError(missing) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_queryset(self, attrs, queryset):
""" Filter the queryset to all instances matching the given attributes. """ |
# If this is an update, then any unprovided field should
# have it's value set based on the existing instance attribute.
if self.instance is not None:
for field_name in self.fields:
if field_name not in attrs:
attrs[field_name] = getattr(self.instance, field_name)
# Determine the filter keyword arguments and filter the queryset.
filter_kwargs = {
field_name: attrs[field_name]
for field_name in self.fields
}
return queryset.filter(**filter_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify(self, view):
""" adds the get item as extra context """ |
view.params['extra_context'][self.get['name']] = self.get['value']
return view |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bucket_list():
""" Get list of S3 Buckets """ |
args = parser.parse_args()
for b in s3_conn(args.aws_access_key_id, args.aws_secret_access_key).get_all_buckets():
print(''.join([i if ord(i) < 128 else ' ' for i in b.name])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_back_up_generator(frame_function, *args, **kwargs):
"""Create a generator for the provided animation function that backs up the cursor after a frame. Assumes that the animation function provides a generator that yields strings of constant width and height. Args: frame_function: A function that returns a FrameGenerator. args: Arguments for frame_function. kwargs: Keyword arguments for frame_function. Returns: a generator that generates backspace/backline characters for the animation func generator. """ |
lines = next(frame_function(*args, **kwargs)).split('\n')
width = len(lines[0])
height = len(lines)
if height == 1:
return util.BACKSPACE_GEN(width)
return util.BACKLINE_GEN(height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _backspaced_single_line_animation(animation_, *args, **kwargs):
"""Turn an animation into an automatically backspaced animation. Args: animation: A function that returns a generator that yields strings for animation frames. args: Arguments for the animation function. kwargs: Keyword arguments for the animation function. Returns: the animation generator, with backspaces applied to each but the first frame. """ |
animation_gen = animation_(*args, **kwargs)
yield next(animation_gen) # no backing up on the first frame
yield from util.concatechain(
util.BACKSPACE_GEN(kwargs['width']), animation_gen) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _raise_if_annotated(self, func):
"""Raise TypeError if a function is decorated with Annotate, as such functions cause visual bugs when decorated with Animate. Animate should be wrapped by Annotate instead. Args: func (function):
Any callable. Raises: TypeError """ |
if hasattr(func, ANNOTATED) and getattr(func, ANNOTATED):
msg = ('Functions decorated with {!r} '
'should not be decorated with {!r}.\n'
'Please reverse the order of the decorators!'.format(
self.__class__.__name__, Annotate.__name__))
raise TypeError(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start_print(self):
"""Print the start message with or without newline depending on the self._start_no_nl variable. """ |
if self._start_no_nl:
sys.stdout.write(self._start_msg)
sys.stdout.flush()
else:
print(self._start_msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
"""Reset the current animation generator.""" |
animation_gen = self._frame_function(*self._animation_args,
**self._animation_kwargs)
self._current_generator = itertools.cycle(
util.concatechain(animation_gen, self._back_up_generator)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_erase_frame(self):
"""Return a frame that completely erases the current frame, and then backs up. Assumes that the current frame is of constant width.""" |
lines = self._current_frame.split('\n')
width = len(lines[0])
height = len(lines)
line = ' ' * width
if height == 1:
frame = line + BACKSPACE * width
else:
frame = '\n'.join([line] * height) + BACKLINE * (height - 1)
return frame |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def string(self):
# noqa: C901 """ Return a human-readable version of the decoded report. """ |
lines = ["station: %s" % self.station_id]
if self.type:
lines.append("type: %s" % self.report_type())
if self.time:
lines.append("time: %s" % self.time.ctime())
if self.temp:
lines.append("temperature: %s" % self.temp.string("C"))
if self.dewpt:
lines.append("dew point: %s" % self.dewpt.string("C"))
if self.wind_speed:
lines.append("wind: %s" % self.wind())
if self.wind_speed_peak:
lines.append("peak wind: %s" % self.peak_wind())
if self.wind_shift_time:
lines.append("wind shift: %s" % self.wind_shift())
if self.vis:
lines.append("visibility: %s" % self.visibility())
if self.runway:
lines.append("visual range: %s" % self.runway_visual_range())
if self.press:
lines.append(f"pressure: {self.press.string('MB')} {self.press.string('IN')} {self.press.string('MM')}")
if self.weather:
lines.append("weather: %s" % self.present_weather())
if self.sky:
lines.append("sky: %s" % self.sky_conditions("\n "))
if self.press_sea_level:
lines.append("sea-level pressure: %s" % self.press_sea_level.string("mb"))
if self.max_temp_6hr:
lines.append("6-hour max temp: %s" % str(self.max_temp_6hr))
if self.max_temp_6hr:
lines.append("6-hour min temp: %s" % str(self.min_temp_6hr))
if self.max_temp_24hr:
lines.append("24-hour max temp: %s" % str(self.max_temp_24hr))
if self.max_temp_24hr:
lines.append("24-hour min temp: %s" % str(self.min_temp_24hr))
if self.precip_1hr:
lines.append("1-hour precipitation: %s" % str(self.precip_1hr))
if self.precip_3hr:
lines.append("3-hour precipitation: %s" % str(self.precip_3hr))
if self.precip_6hr:
lines.append("6-hour precipitation: %s" % str(self.precip_6hr))
if self.precip_24hr:
lines.append("24-hour precipitation: %s" % str(self.precip_24hr))
if self._remarks:
lines.append("remarks:")
lines.append("- " + self.remarks("\n- "))
if self._unparsed_remarks:
lines.append("- " + ' '.join(self._unparsed_remarks))
lines.append("METAR: " + self.code)
return "\n".join(lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handlePressure(self, d):
""" Parse an altimeter-pressure group. The following attributes are set: press [int] """ |
press = d['press']
if press != '////':
press = float(press.replace('O', '0'))
if d['unit']:
if d['unit'] == 'A' or (d['unit2'] and d['unit2'] == 'INS'):
self.press = CustomPressure(press / 100, 'IN')
elif d['unit'] == 'SLP':
if press < 500:
press = press / 10 + 1000
else:
press = press / 10 + 900
self.press = CustomPressure(press)
self._remarks.append("sea-level pressure %.1fhPa" % press)
else:
self.press = CustomPressure(press)
elif press > 2500:
self.press = CustomPressure(press / 100, 'IN')
else:
self.press = CustomPressure(press) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handleSealvlPressRemark(self, d):
""" Parse the sea-level pressure remark group. """ |
value = float(d['press']) / 10.0
if value < 50:
value += 1000
else:
value += 900
if not self.press:
self.press = CustomPressure(value)
self.press_sea_level = CustomPressure(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tasklogger(name="TaskLogger"):
"""Get a TaskLogger object Parameters logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ |
try:
return logging.getLogger(name).tasklogger
except AttributeError:
return logger.TaskLogger(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_debug(msg, logger="TaskLogger"):
"""Log a DEBUG message Convenience function to log a message to the default Logger Parameters msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ |
tasklogger = get_tasklogger(logger)
tasklogger.debug(msg)
return tasklogger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_info(msg, logger="TaskLogger"):
"""Log an INFO message Convenience function to log a message to the default Logger Parameters msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ |
tasklogger = get_tasklogger(logger)
tasklogger.info(msg)
return tasklogger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_warning(msg, logger="TaskLogger"):
"""Log a WARNING message Convenience function to log a message to the default Logger Parameters msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ |
tasklogger = get_tasklogger(logger)
tasklogger.warning(msg)
return tasklogger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_error(msg, logger="TaskLogger"):
"""Log an ERROR message Convenience function to log a message to the default Logger Parameters msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ |
tasklogger = get_tasklogger(logger)
tasklogger.error(msg)
return tasklogger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_critical(msg, logger="TaskLogger"):
"""Log a CRITICAL message Convenience function to log a message to the default Logger Parameters msg : str Message to be logged name : `str`, optional (default: "TaskLogger") Name used to retrieve the unique TaskLogger Returns ------- logger : TaskLogger """ |
tasklogger = get_tasklogger(logger)
tasklogger.critical(msg)
return tasklogger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_indent(indent=2, logger="TaskLogger"):
"""Set the indent function Convenience function to set the indent size Parameters indent : int, optional (default: 2) number of spaces by which to indent based on the number of tasks currently running` Returns ------- logger : TaskLogger """ |
tasklogger = get_tasklogger(logger)
tasklogger.set_indent(indent)
return tasklogger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(miz_path):
""" Artifact from earlier development """ |
from emiz.miz import Miz
with Miz(miz_path) as m:
mis = m.mission
result = defaultdict(dict)
for unit in mis.units:
airport, spot = unit.group_name.split('#')
spot = int(spot)
# print(airport, int(spot), unit.unit_position)
result[airport][spot] = unit.unit_position
import pickle # nosec
with open('_parking_spots.py', mode='w') as f:
f.write('parkings = {}\n'.format(pickle.dumps(result))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _traverse(self, name, create_missing=False, action=None, value=NO_DEFAULT):
"""Traverse to the item specified by ``name``. To create missing items on the way to the ``name``d item, pass ``create_missing=True``. This will insert an item for each missing segment in ``name``. The type and value of item that will be inserted for a missing segment depends on the *next* segment. If a ``default`` value is passed, the ``name``d item will be set to this value; otherwise, a default default will be used. See :meth:`_create_segment` for more info. """ |
obj = self
segments = self._parse_path(name)
for segment, next_segment in zip(segments, segments[1:] + [None]):
last = next_segment is None
if create_missing:
self._create_segment(obj, segment, next_segment)
try:
next_obj = obj[segment]
except IndexError:
raise KeyError(segment)
if not last:
obj = next_obj
else:
if action:
value = action(obj, segment)
elif value is not NO_DEFAULT:
obj[segment] = value
else:
value = obj[segment]
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_path(self, path):
"""Parse ``path`` into segments. Paths must start with a WORD (i.e., a top level Django setting name). Path segments are separated by dots. Compound path segments (i.e., a name with a dot in it) can be grouped inside parentheses. Examples:: ['WORD'] ['WORD', 'x'] ['WORD', 'x'] ['WORD', 'x.y'] ['WORD', 'x.y', 'z'] ['WORD', 0, 'z'] ['WORD', '0', 'z'] ['WORD', '0X', 'z'] An example of where compound names are actually useful is in logger settings:: LOGGING.loggers.(package.module).handlers = ["console"] LOGGING.loggers.(package.module).level = "DEBUG" Paths may also contain interpolation groups. Dotted names in these groups will not be split (so there's no need to group them inside parentheses):
: ['WORD', '{{x}}'] ['WORD', '{{x.y}}'] ['WORD', '{{x.y.z}}XYZ'] Interpolation groups *can* be wrapped in parentheses, but doing so is redundant:: ['WORD', '{{x.y.z}}XYZ'] Any segment that A) looks like an int and B) does *not* contain that start with a leading "0" followed by other digits will not be converted. """ |
if not path:
raise ValueError('path cannot be empty')
segments = []
path_iter = zip(iter(path), chain(path[1:], (None,)))
if six.PY2:
# zip() returns a list on Python 2
path_iter = iter(path_iter)
convert_name = self._convert_name
current_segment = []
current_segment_contains_group = False
def append_segment():
segment = ''.join(current_segment)
if not current_segment_contains_group:
segment = convert_name(segment)
segments.append(segment)
del current_segment[:]
for c, d in path_iter:
if c == '.':
append_segment()
current_segment_contains_group = False
elif c == '(':
nested = 0
for c, d in path_iter:
current_segment.append(c)
if c == '(':
nested += 1
elif c == ')':
if nested:
nested -= 1
else:
current_segment.pop() # Remove the closing paren
current_segment_contains_group = True
break
else:
raise ValueError('Unclosed (...) in %s' % path)
elif c == '{' and d == '{':
current_segment_contains_group = True
current_segment.append(c)
for c, d in path_iter:
current_segment.append(c)
if c == '}' and d == '}':
current_segment_contains_group = True
break
else:
raise ValueError('Unclosed {{...}} in %s' % path)
else:
current_segment.append(c)
if current_segment:
append_segment()
return segments |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _convert_name(self, name):
"""Convert ``name`` to int if it looks like an int. Otherwise, return it as is. """ |
if re.search('^\d+$', name):
if len(name) > 1 and name[0] == '0':
# Don't treat strings beginning with "0" as ints
return name
return int(name)
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def basic_consume(self, queue_name='', consumer_tag='', no_local=False, no_ack=False, exclusive=False, no_wait=False, arguments=None, wait_message=True, timeout=0):
"""Starts the consumption of message into a queue. the callback will be called each time we're receiving a message. Args: queue_name: str, the queue to receive message from consumer_tag: str, optional consumer tag no_local: bool, if set the server will not send messages to the connection that published them. no_ack: bool, if set the server does not expect acknowledgements for messages exclusive: bool, request exclusive consumer access, meaning only this consumer can access the queue no_wait: bool, if set, the server will not respond to the method arguments: dict, AMQP arguments to be passed to the server wait_message: Indicates if the consumer should wait for new messages in the queue or simply return None if the queue is empty. timeout: A timeout for waiting messages. ``wait_message`` has precendence over timeout. """ |
# If a consumer tag was not passed, create one
consumer_tag = consumer_tag or 'ctag%i.%s' % (
self.channel_id, uuid.uuid4().hex)
if arguments is None:
arguments = {}
frame = amqp_frame.AmqpRequest(
self.protocol._stream_writer, amqp_constants.TYPE_METHOD,
self.channel_id)
frame.declare_method(
amqp_constants.CLASS_BASIC, amqp_constants.BASIC_CONSUME)
request = amqp_frame.AmqpEncoder()
request.write_short(0)
request.write_shortstr(queue_name)
request.write_shortstr(consumer_tag)
request.write_bits(no_local, no_ack, exclusive, no_wait)
request.write_table(arguments)
self.consumer_queues[consumer_tag] = asyncio.Queue(self.max_queue_size)
self.last_consumer_tag = consumer_tag
consumer = self.CONSUMER_CLASS(
self, self.consumer_queues[consumer_tag],
consumer_tag, nowait=not wait_message,
timeout=timeout)
await self._write_frame_awaiting_response(
'basic_consume', frame, request, no_wait)
if not no_wait:
self._ctag_events[consumer_tag].set()
return consumer |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getPluginVersion():
"""The version must be updated in the .cdmp file""" |
desc_file = os.path.join('cdmplugins', 'gc', plugin_desc_file)
if not os.path.exists(desc_file):
print('Cannot find the plugin description file. Expected here: ' +
desc_file, file=sys.stderr)
sys.exit(1)
with open(desc_file) as dec_file:
for line in dec_file:
line = line.strip()
if line.startswith('Version'):
return line.split('=')[1].strip()
print('Cannot find a version line in the ' + desc_file,
file=sys.stderr)
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chainCerts(data):
""" Matches and returns any certificates found except the first match. Regex code copied from L{twisted.internet.endpoints._parseSSL}. Related ticket: https://twistedmatrix.com/trac/ticket/7732 @type path: L{bytes} @param data: PEM-encoded data containing the certificates. @rtype: L{list} containing L{Certificate}s. """ |
matches = re.findall(
r'(-----BEGIN CERTIFICATE-----\n.+?\n-----END CERTIFICATE-----)',
data,
flags=re.DOTALL)
chainCertificates = [
Certificate.loadPEM(chainCertPEM).original
for chainCertPEM in matches]
return chainCertificates[1:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def directories(self):
""" Return the names of directories to be created. """ |
directories_description = [
self.project_name,
self.project_name + '/conf',
self.project_name + '/static',
]
return directories_description |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_job_resolver(self, job_resolver):
"""Remove job_resolver from the list of job resolvers. Keyword arguments: job_resolver -- Function reference of the job resolver to be removed. """ |
for i, r in enumerate(self.job_resolvers()):
if job_resolver == r:
del self._job_resolvers[i] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_job(self, name):
"""Attempt to resolve the task name in to a job name. If no job resolver can resolve the task, i.e. they all return None, return None. Keyword arguments: name -- Name of the task to be resolved. """ |
for r in self.job_resolvers():
resolved_name = r(name)
if resolved_name is not None:
return resolved_name
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setColor(self, id, color):
""" Command 0x01 sets the color of a specific light Data: """ |
header = bytearray()
header.append(LightProtocolCommand.SetColor)
if not isinstance(id, list):
id = [id]
if not isinstance(color, list):
color = [color]
header.extend(struct.pack('<H', len(id)))
i = 0
light = bytearray()
for curr_id in id:
light.extend(struct.pack('<H', curr_id))
light.extend(color[i])
i += 1
buff = header + light
return self.send(buff) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setSeries(self, startId, length, color):
""" Command 0x07 sets all lights in the series starting from "startId" to "endId" to "color" Data: [0x07][startId][length][r][g][b] """ |
buff = bytearray()
buff.append(LightProtocolCommand.SetSeries)
buff.extend(struct.pack('<H', startId))
buff.extend(struct.pack('<H', length))
buff.extend(color)
return self.send(buff) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v0_highlighter_post(request, response, tfidf, cid):
'''Obtain highlights for a document POSTed as the body, which is the
pre-design-thinking structure of the highlights API. See v1 below.
NB: This end point will soon be deleted.
The route for this endpoint is:
``POST /dossier/v0/highlighter/<cid>``.
``content_id`` is the id to associate with the given feature
collection. The feature collection should be in the request
body serialized as JSON.
'''
logger.info('got %r', cid)
tfidf = tfidf or None
content_type = request.headers.get('content-type', '')
if not content_type.startswith('text/html'):
logger.critical('content-type=%r', content_type)
response.status = 415
return {'error': {'code': 0, 'message': 'content_type=%r and should be text/html' % content_type}}
url = urllib.unquote(cid.split('|', 1)[1])
body = request.body.read()
if len(body) == 0:
response.status = 420
return {'error': {'code': 1, 'message': 'empty body'}}
logger.info('parsing %d bytes for url: %r', len(body), url)
fc = etl.create_fc_from_html(url, body, tfidf=tfidf)
if fc is None:
logger.critical('failed to get FC using %d bytes from %r', len(body), url)
response.status = 506
return {'error': {'code': 2, 'message': 'FC not generated for that content'}}
highlights = dict()
for feature_name, pretty_name in feature_pretty_names:
# Each type of string is
if feature_name not in fc: continue
total = sum(fc[feature_name].values())
highlights[pretty_name] = [
(phrase, count / total, [], [])
for phrase, count in sorted(fc[feature_name].items(), key=itemgetter(1), reverse=True)]
logger.info('%r and %d keys', feature_name, len(highlights[pretty_name]))
return {'highlights': highlights} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_highlights_get(response, kvlclient, file_id_str, max_elapsed = 300):
'''Obtain highlights for a document POSTed previously to this end
point. See documentation for v1_highlights_post for further
details. If the `state` is still `pending` for more than
`max_elapsed` after the start of the `WorkUnit`, then this reports
an error, although the `WorkUnit` may continue in the background.
'''
file_id = make_file_id(file_id_str)
kvlclient.setup_namespace(highlights_kvlayer_tables)
payload_strs = list(kvlclient.get('highlights', file_id))
if not (payload_strs and payload_strs[0][1]):
response.status = 500
payload = {
'state': ERROR,
'error': {
'code': 8,
'message': 'unknown error'}}
logger.critical('got bogus info for %r: %r', file_id, payload_strs)
else:
payload_str = payload_strs[0][1]
try:
payload = json.loads(payload_str)
if payload['state'] == HIGHLIGHTS_PENDING:
elapsed = time.time() - payload.get('start', 0)
if elapsed > max_elapsed:
response.status = 500
payload = {
'state': ERROR,
'error': {
'code': 8,
'message': 'hit timeout'}}
logger.critical('hit timeout on %r', file_id)
kvlclient.put('highlights', (file_id, json.dumps(payload)))
else:
payload['elapsed'] = elapsed
logger.info('returning stored payload for %r', file_id)
except Exception, exc:
logger.critical('failed to decode out of %r',
payload_str, exc_info=True)
response.status = 400
payload = {
'state': ERROR,
'error': {
'code': 9,
'message': 'nothing known about file_id=%r' % file_id}
}
# only place where payload is returned
return payload |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_highlights(data, tfidf):
'''compute highlights for `data`, store it in the store using
`kvlclient`, and return a `highlights` response payload.
'''
try:
fc = etl.create_fc_from_html(
data['content-location'], data['body'], tfidf=tfidf, encoding=None)
except Exception, exc:
logger.critical('failed to build FC', exc_info=True)
return {
'state': ERROR,
'error': {'code': 7,
'message': 'internal error: %s' % traceback.format_exc(exc),
}
}
if fc is None:
logger.critical('failed to get FC using %d bytes from %r',
len(body), data['content-location'])
response.status = 500
return {
'state': ERROR,
'error': {
'code': 7,
'message': 'internal error: FC not generated for that content',
},
}
try:
highlights = dict()
for feature_name, pretty_name in feature_pretty_names:
# Each type of string is
if feature_name not in fc:
continue
total = sum(fc[feature_name].values())
bow = sorted(fc[feature_name].items(), key=itemgetter(1), reverse=True)
highlights[pretty_name] = [(phrase, count / total)
for phrase, count in bow]
logger.info('%r and %d keys',
feature_name, len(highlights[pretty_name]))
highlight_objs = build_highlight_objects(data['body'], highlights)
except Exception, exc:
logger.critical('failed to build highlights', exc_info=True)
return {
'state': ERROR,
'error': {'code': 7,
'message': 'internal error: %s' % traceback.format_exc(exc),
}
}
payload = {
'highlights': highlight_objs,
'state': COMPLETED,
}
return payload |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def make_xpath_ranges(html, phrase):
'''Given a HTML string and a `phrase`, build a regex to find offsets
for the phrase, and then build a list of `XPathRange` objects for
it. If this fails, return empty list.
'''
if not html:
return []
if not isinstance(phrase, unicode):
try:
phrase = phrase.decode('utf8')
except:
logger.info('failed %r.decode("utf8")', exc_info=True)
return []
phrase_re = re.compile(
phrase, flags=re.UNICODE | re.IGNORECASE | re.MULTILINE)
spans = []
for match in phrase_re.finditer(html, overlapped=False):
spans.append(match.span()) # a list of tuple(start, end) char indexes
# now run fancy aligner magic to get xpath info and format them as
# XPathRange per above
try:
xpath_ranges = list(char_offsets_to_xpaths(html, spans))
except:
logger.info('failed to get xpaths', exc_info=True)
return []
ranges = []
for xpath_range in filter(None, xpath_ranges):
ranges.append(dict(
start=dict(node=xpath_range.start_xpath,
idx=xpath_range.start_offset),
end=dict(node=xpath_range.end_xpath,
idx=xpath_range.end_offset)))
return ranges |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def eval_poly(uvec, nvec, Jvec):
'''Evaluate multi-dimensional polynomials through tensor multiplication.
:param list uvec: vector value of the uncertain parameters at which to evaluate the
polynomial
:param list nvec: order in each dimension at which to evaluate the polynomial
:param list Jvec: Jacobi matrix of each dimension's 1D polynomial
:return: poly_value - value of the polynomial evaluated at uvec
:rtype: float
'''
us = _makeIter(uvec)
ns = _makeIter(nvec)
Js = _makeIter(Jvec)
return np.prod([_eval_poly_1D(u, n, J) for u, n, J in zip(us, ns, Js)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getQuadraturePointsAndWeights(self):
'''Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray)
'''
qw_list, qp_list = [], []
for ii in np.arange(len(self.J_list)):
d, Q = np.linalg.eig(self.J_list[ii])
qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)
qw = (Q[0, qpi]**2).reshape([d.size, 1])
qw_list.append(qw)
qp_list.append(qp)
umesh = np.meshgrid(*qp_list)
upoints = np.vstack([m.flatten() for m in umesh]).T
wmesh = np.meshgrid(*qw_list)
wpoints = np.vstack([m.flatten() for m in wmesh]).T
return upoints, wpoints |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _model_columns(ins):
""" Get columns info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaColumnDoc] """ |
columns = []
for c in ins.column_attrs:
# Skip protected
if c.key.startswith('_'):
continue
# Collect
columns.append(SaColumnDoc(
key=c.key,
doc=c.doc or '',
type=str(c.columns[0].type), # FIXME: support multi-column properties
null=c.columns[0].nullable,
))
return columns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _model_foreign(ins):
""" Get foreign keys info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaForeignkeyDoc] """ |
fks = []
for t in ins.tables:
fks.extend([
SaForeignkeyDoc(
key=fk.column.key,
target=fk.target_fullname,
onupdate=fk.onupdate,
ondelete=fk.ondelete
)
for fk in t.foreign_keys])
return fks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _model_unique(ins):
""" Get unique constraints info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[tuple[str]] """ |
unique = []
for t in ins.tables:
for c in t.constraints:
if isinstance(c, UniqueConstraint):
unique.append(tuple(col.key for col in c.columns))
return unique |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _model_relations(ins):
""" Get relationships info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaRelationshipDoc] """ |
relations = []
for r in ins.relationships:
# Hard times with the foreign model :)
if isinstance(r.argument, Mapper):
model_name = r.argument.class_.__name__
elif hasattr(r.argument, 'arg'):
model_name = r.argument.arg
else:
model_name = r.argument.__name__
# Format
relations.append(SaRelationshipDoc(
key=r.key,
doc=r.doc or '',
model=model_name,
pairs=map(lambda a_b_tuple: a_b_tuple[0].key if a_b_tuple[0].key == a_b_tuple[1].key else '{}={}'.format(a_b_tuple[0].key, a_b_tuple[1].key), r.local_remote_pairs),
uselist=r.uselist
))
return relations |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def doc(model):
""" Get documentation object for an SqlAlchemy model :param model: Model :type model: sqlalchemy.ext.declarative.DeclarativeBase :rtype: SaModelDoc """ |
ins = inspect(model)
return SaModelDoc(
name=model.__name__,
table=[t.name for t in ins.tables],
doc=getdoc(ins.class_),
columns=_model_columns(ins),
primary=_model_primary(ins),
foreign=_model_foreign(ins),
unique=_model_unique(ins),
relations=_model_relations(ins)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GetModuleBaseNameFromWSDL(wsdl):
"""By default try to construct a reasonable base name for all generated modules. Otherwise return None. """ |
base_name = wsdl.name or wsdl.services[0].name
base_name = SplitQName(base_name)[1]
if base_name is None:
return None
return NCName_to_ModuleName(base_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def condense(ss_unescaped):
""" Given multiple strings, returns a compressed regular expression just for these strings 'he(moglobin|r)?|she' """ |
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_solid(regex):
""" Check the given regular expression is solid. True True True True False False """ |
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def danger_unpack(regex):
""" Remove the outermost parens 'abc' 'abc' 'abc' '[abc]' """ |
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def concat(regex_list):
""" Concat multiple regular expression into one, if the given regular expression is not packed, a pair of paren will be add. (a|b)(c|d|e) """ |
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_cache(config_fpath, cache_dir):
"""Write the cache directory to the dtool config file. :param config_fpath: path to the dtool config file :param cache_dir: the path to the dtool cache direcotory """ |
cache_dir = os.path.abspath(cache_dir)
return write_config_value_to_file(
CACHE_DIRECTORY_KEY,
cache_dir,
config_fpath
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_azure_secret_access_key(config_fpath, container, az_secret_access_key):
"""Write the ECS access key id to the dtool config file. :param config_fpath: path to the dtool config file :param container: azure storage container name :param az_secret_access_key: azure secret access key for the container """ |
key = AZURE_KEY_PREFIX + container
return write_config_value_to_file(key, az_secret_access_key, config_fpath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_azure_containers(config_fpath):
"""List the azure storage containers in the config file. :param config_fpath: path to the dtool config file :returns: the list of azure storage container names """ |
config_content = _get_config_dict_from_file(config_fpath)
az_container_names = []
for key in config_content.keys():
if key.startswith(AZURE_KEY_PREFIX):
name = key[len(AZURE_KEY_PREFIX):]
az_container_names.append(name)
return sorted(az_container_names) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, filename, create = None, default_conf = {}):
"""Load the config file Args: filename (str):
the filename of the config, without any path create (str):
if the config file not found, and this parameter is not None, a config file will be create with content of default_conf default_conf (dict):
content of the default config data Returns: Return value of the ConfigFormatter.decode or the default_conf value Raises: ConfigFileNotFoundException: if the config file not found """ |
filenames, tries = self.__search_config_files(filename)
if len(filenames):
self.__loaded_config_file = filenames if self.__nested else filenames[0]
return self.__load_config_files(filenames if self.__nested else filenames[:1])
if create is not None:
self.__loaded_config_file = os.path.join(create, filename)
self.save(default_conf)
return default_conf
raise ConfigFileNotFoundException("Config file not found in: %s" % tries) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, data):
"""Save the config data Args: data: any serializable config data Raises: ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name, or the data is not serializable or the loader is nested """ |
if self.__nested:
raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!")
if self.__loaded_config_file is None:
raise ConfigLoaderException("Load not called yet!")
try:
with open(self.__loaded_config_file, 'w') as f:
f.write(self.__formatter.encode(data))
except Exception as e:
raise ConfigLoaderException("Config data is not serializable: %s" % e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def metar_to_speech(metar: str) -> str: """ Creates a speakable text from a METAR Args: metar: METAR string to use Returns: speakable METAR for TTS """ |
LOGGER.info('getting speech text from METAR: %s', metar)
metar_data, metar_units = emiz.avwx.metar.parse_in(metar)
speech = emiz.avwx.speech.metar(metar_data, metar_units)
speech = str(speech).replace('Altimeter', 'Q N H')
LOGGER.debug('resulting speech: %s', speech)
return speech |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Return rel path to a downloaded file as `include` node argument.""" |
document = self.state.document
env = document.settings.env
buildpath = env.app.outdir
link = self.arguments[0]
try:
r = requests.get(link)
r.raise_for_status()
downloadpath = os.path.join(buildpath, '_downloads')
if not os.path.isdir(downloadpath):
os.makedirs(downloadpath)
rstfile = os.path.join(downloadpath, os.path.basename(link))
with open(rstfile, 'w') as f:
f.write(r.text)
rstfile = os.path.relpath(rstfile, os.path.dirname(env.doc2path
(env.docname)))
self.arguments = [rstfile]
return super(RemoteInclude, self).run()
except Exception:
err = 'Unable to resolve ' + link
return [document.reporter.warning(str(err), line=self.lineno)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize(self, body):
""" Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict """ |
resource = body['data']
data = {'rtype': resource['type']}
if 'attributes' in resource:
attributes = resource['attributes']
attributes = self._normalize_attributes(attributes)
data.update(attributes)
if 'relationships' in resource:
relationships = resource['relationships']
relationships = self._normalize_relationships(relationships)
data.update(relationships)
if resource.get('id'):
data['rid'] = resource['id']
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_attributes(self, attributes):
""" Ensure compliance with the spec's attributes section Specifically, the attributes object of the single resource object. This contains the key / values to be mapped to the model. :param attributes: dict JSON API attributes object """ |
link = 'jsonapi.org/format/#document-resource-object-attributes'
if not isinstance(attributes, dict):
self.fail('The JSON API resource object attributes key MUST '
'be a hash.', link)
elif 'id' in attributes or 'type' in attributes:
self.fail('A field name of `id` or `type` is not allowed in '
'the attributes object. They should be top-level '
'keys.', link) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_relationships(self, relationships):
""" Ensure compliance with the spec's relationships section Specifically, the relationships object of the single resource object. For modifications we only support relationships via the `data` key referred to as Resource Linkage. :param relationships: dict JSON API relationships object """ |
link = 'jsonapi.org/format/#document-resource-object-relationships'
if not isinstance(relationships, dict):
self.fail('The JSON API resource object relationships key MUST '
'be a hash & comply with the spec\'s resource linkage '
'section.', link)
for key, val in relationships.items():
if not isinstance(val, dict) or 'data' not in val:
self.fail('Relationship key %s MUST be a hash & contain '
'a `data` field compliant with the spec\'s '
'resource linkage section.' % key, link)
elif isinstance(val['data'], dict):
data = val['data']
rid = isinstance(data.get('id'), unicode)
rtype = isinstance(data.get('type'), unicode)
if not rid or not rtype:
self.fail('%s relationship\'s resource linkage MUST '
'contain `id` & `type` fields. Additionally, '
'they must both be strings.' % key, link)
elif isinstance(val['data'], list):
abort(exceptions.ModificationDenied(**{
'detail': 'Modifying the %s relationship or any to-many '
'relationships for that matter are is not '
'currently supported. Instead, modify the '
'to-one side directly.' % key,
'links': link,
}))
elif val['data']:
self.fail('The relationship key %s is malformed & impossible '
'for us to understand your intentions. It MUST be '
'a hash & contain a `data` field compliant with '
'the spec\'s resource linkage section or null if '
'you want to unset the relationship.' % key, link) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_resource(self, resource):
""" Ensure compliance with the spec's resource objects section :param resource: dict JSON API resource object """ |
link = 'jsonapi.org/format/#document-resource-objects'
rid = isinstance(resource.get('id'), unicode)
rtype = isinstance(resource.get('type'), unicode)
if not rtype or (self.req.is_patching and not rid):
self.fail('JSON API requires that every resource object MUST '
'contain a `type` top-level key. Additionally, when '
'modifying an existing resource object an `id` '
'top-level key is required. The values of both keys '
'MUST be strings. Your request did not comply with '
'one or more of these 3 rules', link)
elif 'attributes' not in resource and 'relationships' not in resource:
self.fail('Modifiying or creating resources require at minimum '
'an attributes object and/or relationship object.', link)
elif rid and self.req.is_posting:
abort(exceptions.ModificationDenied(**{
'detail': 'Our API does not support client-generated ID\'s '
'when creating NEW resources. Instead, our API '
'will generate one for you & return it in the '
'response.',
'links': 'jsonapi.org/format/#crud-creating-client-ids',
})) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_top_level(self, body):
""" Ensure compliance with the spec's top-level section """ |
link = 'jsonapi.org/format/#document-top-level'
try:
if not isinstance(body['data'], dict):
raise TypeError
except (KeyError, TypeError):
self.fail('JSON API payloads MUST be a hash at the most '
'top-level; rooted at a key named `data` where the '
'value must be a hash. Currently, we only support '
'JSON API payloads that comply with the single '
'Resource Object section.', link)
if 'errors' in body:
self.fail('JSON API payloads MUST not have both `data` & '
'`errors` top-level keys.', link) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, body):
""" Invoke the JSON API spec compliant parser Order is important. Start from the request body root key & work your way down so exception handling is easier to follow. :return: the parsed & vetted request body """ |
self._parse_top_level(body)
self._parse_resource(body['data'])
resource = body['data']
if 'attributes' in resource:
self._parse_attributes(resource['attributes'])
if 'relationships' in resource:
self._parse_relationships(resource['relationships']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_signer_by_version(digest, ver):
"""Returns a new signer object for a digest and version combination. Keyword arguments: digest -- a callable that may be passed to the initializer of any Signer object in this library. The callable must return a hasher object when called with no arguments. ver -- the version of the signature. This may be any value convertible to an int. """ |
if int(ver) == 1:
return V1Signer(digest)
elif int(ver) == 2:
return V2Signer(digest)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def identify(self, header):
"""Identifies a signature and returns the appropriate Signer object. This is done by reading an authorization header and matching it to signature characteristics. None is returned if the authorization header does not match the format of any signature identified by this identifier. Keyword arguments: header -- the Authorization header of a request. """ |
for ver, signer in self.signers.items():
if signer.matches(header):
return signer
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_transforms(transforms):
""" Load transform modules and return instance of transform class. Parameters transforms : [str] or [[str]] array of transform module name, or nested array of transform module name with argv to load Returns ------- array of transform instance """ |
from . import Transform
import inspect
# normalize arguments to form as [(name, [option, ...]), ...]
transforms_with_argv = map(lambda t: (t[0], t[1:]) if isinstance(t, list) else (t, []),
transforms)
def instantiate_transform(module_name, argv):
tr_module = __import__(
module_name if module_name.count('.') > 0 else TRANSFORM_MODULE_PREFIX + module_name,
fromlist=['dummy'])
tr_classes = inspect.getmembers(
tr_module,
lambda c: issubclass(c if inspect.isclass(c) else None.__class__,
Transform))
if len(tr_classes) != 1:
raise TypeError('Transform module must have only one subclass of Transform')
tr_class = tr_classes[0]
return tr_class[1](argv)
return [instantiate_transform(tr[0], tr[1])
for tr in transforms_with_argv] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def date_tuple(ovls):
""" We should have a list of overlays from which to extract day month year. """ |
day = month = year = 0
for o in ovls:
if 'day' in o.props:
day = o.value
if 'month' in o.props:
month = o.value
if 'year' in o.props:
year = o.value
if 'date' in o.props:
day, month, year = [(o or n) for o, n in zip((day, month,
year), o.value)]
return (day, month, year) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def longest_overlap(ovls):
""" From a list of overlays if any overlap keep the longest. """ |
# Ovls know how to compare to each other.
ovls = sorted(ovls)
# I know this could be better but ovls wont be more than 50 or so.
for i, s in enumerate(ovls):
passing = True
for l in ovls[i + 1:]:
if s.start in Rng(l.start, l.end, rng=(True, True)) or \
s.end in Rng(l.start, l.end, rng=(True, True)):
passing = False
break
if passing:
yield s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance. """ |
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getXMLNS(self, prefix=None):
"""deference prefix or by default xmlns, returns namespace. """ |
if prefix == XMLSchemaComponent.xml:
return XMLNS.XML
parent = self
ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
while not ns:
parent = parent._parent()
ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
if not ns and isinstance(parent, WSDLToolsAdapter):
if prefix is None:
return ''
raise SchemaError, 'unknown prefix %s' %prefix
return ns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getAttribute(self, attribute):
"""return requested attribute value or None """ |
if type(attribute) in (list, tuple):
if len(attribute) != 2:
raise LookupError, 'To access attributes must use name or (namespace,name)'
ns_dict = self.attributes.get(attribute[0])
if ns_dict is None:
return None
return ns_dict.get(attribute[1])
return self.attributes.get(attribute) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If class variable representing attribute is None, then it must be defined as an instance variable. """ |
for k,v in self.__class__.attributes.items():
if v is not None and self.attributes.has_key(k) is False:
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isQualified(self):
""" Local elements can be qualified or unqualifed according to the attribute form, or the elementFormDefault. By default local elements are unqualified. """ |
form = self.getAttribute('form')
if form == 'qualified':
return True
if form == 'unqualified':
return False
raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_title_tag(context, is_og=False):
""" Returns the title as string or a complete open graph meta tag. """ |
request = context['request']
content = ''
# Try to get the title from the context object (e.g. DetailViews).
if context.get('object'):
try:
content = context['object'].get_meta_title()
except AttributeError:
pass
elif context.get('meta_tagger'):
content = context['meta_tagger'].get('title')
if not content:
# Try to get the title from the cms page.
try:
content = request.current_page.get_page_title() # Try the `page_title` before the `title of the CMS page.
if not content:
content = request.current_page.get_title()
except (AttributeError, NoReverseMatch):
pass
if not is_og:
return content
else:
return mark_safe('<meta property="og:title" content="{content}">'.format(content=content)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_description_meta_tag(context, is_og=False):
""" Returns the description as meta or open graph tag. """ |
request = context['request']
content = ''
# Try to get the description from the context object (e.g. DetailViews).
if context.get('object'):
try:
content = context['object'].get_meta_description()
except AttributeError:
pass
elif context.get('meta_tagger'):
content = context['meta_tagger'].get('description')
if not content:
try:
# Try for the meta description of the cms page.
content = request.current_page.get_meta_description()
except (AttributeError, NoReverseMatch):
pass
if content:
return mark_safe('<meta {attr_name}="{tag_name}" content="{content}">'.format(
attr_name='name' if not is_og else 'property',
tag_name='description' if not is_og else 'og:description',
content=content
))
else:
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_robots_meta_tag(context):
""" Returns the robots meta tag. """ |
request = context['request']
robots_indexing = None
robots_following = None
# Prevent indexing any unwanted domains (e.g. staging).
if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST:
# Try to get the title from the context object (e.g. DetailViews).
if context.get('object'):
try:
robots_indexing = context['object'].get_robots_indexing()
robots_following = context['object'].get_robots_following()
except AttributeError:
pass
elif context.get('meta_tagger'):
robots_indexing = context['meta_tagger'].get('robots_indexing', robots_indexing)
robots_following = context['meta_tagger'].get('robots_following', robots_following)
# Try fetching the robots values of the cms page.
if robots_indexing is None:
try:
robots_indexing = request.current_page.metatagpageextension.robots_indexing
except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist):
robots_indexing = True
if robots_following is None:
try:
robots_following = request.current_page.metatagpageextension.robots_following
except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist):
robots_following = True
return mark_safe('<meta name="robots" content="{robots_indexing}, {robots_following}">'.format(
robots_indexing='index' if robots_indexing else 'noindex',
robots_following='follow' if robots_following else 'nofollow'
)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def list_buckets(self):
'''
a method to retrieve a list of buckets on s3
:return: list of buckets
'''
title = '%s.list_buckets' % self.__class__.__name__
bucket_list = []
# send request to s3
try:
response = self.connection.list_buckets()
except:
raise AWSConnectionError(title)
# create list from response
for bucket in response['Buckets']:
bucket_list.append(bucket['Name'])
self.bucket_list = bucket_list
return self.bucket_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def delete_bucket(self, bucket_name):
'''
a method to delete a bucket in s3 and all its contents
:param bucket_name: string with name of bucket
:return: string with status of method
'''
title = '%s.delete_bucket' % self.__class__.__name__
# validate inputs
input_fields = {
'bucket_name': bucket_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# check for existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
status_msg = 'S3 bucket "%s" does not exist.' % bucket_name
self.iam.printer(status_msg)
return status_msg
# retrieve list of records in bucket
record_keys = []
record_list, next_key = self.list_versions(bucket_name)
for record in record_list:
details = {
'Key': record['key'],
'VersionId': record['version_id']
}
record_keys.append(details)
# delete records in bucket
kw_args = {
'Bucket': bucket_name,
'Delete': { 'Objects': record_keys }
}
if record_keys:
try:
response = self.connection.delete_objects(**kw_args)
except:
raise AWSConnectionError(title)
# continue deleting objects in bucket until empty
if next_key:
while next_key:
record_keys = []
record_list, next_key = self.list_versions(bucket_name, starting_key=next_key['key'], starting_version=next_key['version_id'])
for record in record_list:
details = {
'Key': record['key'],
'VersionId': record['version_id']
}
record_keys.append(details)
kw_args = {
'Bucket': bucket_name,
'Delete': { 'Objects': record_keys }
}
try:
response = self.connection.delete_objects(**kw_args)
except:
raise AWSConnectionError(title)
# send delete bucket request
try:
self.connection.delete_bucket( Bucket=bucket_name )
except:
raise AWSConnectionError(title)
# report result and return true
status_msg = 'S3 bucket "%s" deleted.' % bucket_name
self.iam.printer(status_msg)
return status_msg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_headers(self, bucket_name, record_key, record_version='', version_check=False):
'''
a method for retrieving the headers of a record from s3
:param bucket_name: string with name of bucket
:param record_key: string with key value of record
:param record_version: [optional] string with aws id of version of record
:param version_check: [optional] boolean to enable current version check
:return: dictionary with headers of record
'''
title = '%s.read_headers' % self.__class__.__name__
from datetime import datetime
from dateutil.tz import tzutc
# validate inputs
input_fields = {
'bucket_name': bucket_name,
'record_key': record_key,
'record_version': record_version
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# verify existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name))
# create key word argument dictionary
headers_kwargs = {
'Bucket': bucket_name,
'Key': record_key
}
if record_version:
headers_kwargs['VersionId'] = record_version
# create metadata default
metadata_details = {}
# send request for record header
try:
record = self.connection.head_object(**headers_kwargs)
except Exception as err:
try:
import requests
requests.get('https://www.google.com')
return metadata_details
except:
raise AWSConnectionError(title, captured_error=err)
# create metadata from response
metadata_details = {
'key': record_key,
'version_id': '',
'current_version': True,
'content_type': '',
'content_encoding': '',
'metadata': {}
}
metadata_details = self.iam.ingest(record, metadata_details)
epoch_zero = datetime.fromtimestamp(0).replace(tzinfo=tzutc())
metadata_details['last_modified'] = (metadata_details['last_modified'] - epoch_zero).total_seconds()
if 'response_metadata' in metadata_details.keys():
del metadata_details['response_metadata']
# determine current version from version id
if record_version and version_check:
version_kwargs = {
'Bucket': bucket_name,
'Prefix': record_key
}
try:
version_check = self.connection.list_object_versions(**version_kwargs)
for version in version_check['Versions']:
if version['VersionId'] == metadata_details['version_id']:
metadata_details['current_version'] = version['IsLatest']
break
except:
raise AWSConnectionError(title)
return metadata_details |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def delete_record(self, bucket_name, record_key, record_version=''):
'''
a method for deleting an object record in s3
:param bucket_name: string with name of bucket
:param record_key: string with key value of record
:param record_version: [optional] string with aws id of version of record
:return: dictionary with status of delete request
'''
title = '%s.delete_record' % self.__class__.__name__
# validate inputs
input_fields = {
'bucket_name': bucket_name,
'record_key': record_key,
'record_version': record_version
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# verify existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name))
# create key word argument dictionary
delete_kwargs = {
'Bucket': bucket_name,
'Key': record_key
}
if record_version:
delete_kwargs['VersionId'] = record_version
# send request to delete record
try:
response = self.connection.delete_object(**delete_kwargs)
except:
raise AWSConnectionError(title)
# report status
response_details = {
'version_id': ''
}
response_details = self.iam.ingest(response, response_details)
if 'response_metadata' in response_details.keys():
del response_details['response_metadata']
return response_details |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def import_records(self, bucket_name, import_path='', overwrite=True):
'''
a method to importing records from local files to a bucket
:param bucket_name: string with name of bucket
:param export_path: [optional] string with path to root directory of files
:param overwrite: [optional] boolean to overwrite existing files matching records
:return: True
'''
title = '%s.import_records' % self.__class__.__name__
# validate inputs
input_fields = {
'bucket_name': bucket_name,
'import_path': import_path
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate path
from os import path
if not import_path:
import_path = './'
if not path.exists(import_path):
raise ValueError('%s(import_path="%s") is not a valid path.' % (title, import_path))
elif not path.isdir(import_path):
raise ValueError('%s(import_path="%s") must be a directory.' % (title, import_path))
# verify existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name))
# create records from walk of local path
self.iam.printer('Importing records from path "%s" to bucket "%s".' % (import_path, bucket_name), flush=True)
from labpack.platforms.localhost import localhostClient
localhost_client = localhostClient()
import_path = path.abspath(import_path)
for file_path in localhost_client.walk(import_path):
relative_path = path.relpath(file_path, import_path)
try:
byte_data = open(file_path, 'rb').read()
self.create_record(bucket_name, relative_path, byte_data, overwrite=overwrite)
self.iam.printer('.', flush=True)
except ValueError as err:
if str(err).find('already contains') > -1:
self.iam.printer('.\n%s already exists. Record skipped. Continuing.' % relative_path, flush=True)
else:
raise
except:
raise AWSConnectionError(title)
# report completion and return true
self.iam.printer(' done.')
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def save(self, record_key, record_data, overwrite=True, secret_key=''):
'''
a method to create a file in the collection folder on S3
:param record_key: string with name to assign to record (see NOTES below)
:param record_data: byte data for record body
:param overwrite: [optional] boolean to overwrite records with same name
:param secret_key: [optional] string with key to encrypt data
:return: string with name of record
NOTE: record_key may only contain alphanumeric, /, _, . or -
characters and may not begin with the . or / character.
NOTE: using one or more / characters splits the key into
separate segments. these segments will appear as a
sub directories inside the record collection and each
segment is used as a separate index for that record
when using the list method
eg. lab/unittests/1473719695.2165067.json is indexed:
[ 'lab', 'unittests', '1473719695.2165067', '.json' ]
'''
title = '%s.save' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key,
'secret_key': secret_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate byte data
if not isinstance(record_data, bytes):
raise ValueError('%s(record_data=b"...") must be byte data.' % title)
# encrypt data
if secret_key:
from labpack.encryption import cryptolab
record_data, secret_key = cryptolab.encrypt(record_data, secret_key)
# define keyword arguments
from time import time
create_kwargs = {
'bucket_name': self.bucket_name,
'record_key': record_key,
'record_data': record_data,
'overwrite': overwrite,
'record_metadata': { 'last_modified': str(time()) }
}
# add encryption metadata
if secret_key:
create_kwargs['record_metadata']['encryption'] = 'lab512'
# add record mimetype and encoding
import mimetypes
guess_mimetype, guess_encoding = mimetypes.guess_type(record_key)
if not guess_mimetype:
if record_key.find('.yaml') or record_key.find('.yml'):
guess_mimetype = 'application/x-yaml'
if record_key.find('.drep'):
guess_mimetype = 'application/x-drep'
if guess_mimetype:
create_kwargs['record_mimetype'] = guess_mimetype
if guess_encoding:
create_kwargs['record_encoding'] = guess_encoding
# create record
self.s3.create_record(**create_kwargs)
return record_key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load(self, record_key, secret_key=''):
'''
a method to retrieve byte data of an S3 record
:param record_key: string with name of record
:param secret_key: [optional] string used to decrypt data
:return: byte data for record body
'''
title = '%s.load' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key,
'secret_key': secret_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# retrieve record data from s3
record_data, record_metadata = self.s3.read_record(self.bucket_name, record_key)
# validate secret key
error_msg = '%s(secret_key="...") required to decrypt record "%s"' % (title, record_key)
if 'encryption' in record_metadata['metadata'].keys():
if record_metadata['metadata']['encryption'] == 'lab512':
if not secret_key:
raise Exception(error_msg)
else:
self.s3.iam.printer('[WARNING]: %s uses unrecognized encryption method. Decryption skipped.' % record_key)
secret_key = ''
# decrypt (if necessary)
if secret_key:
from labpack.encryption import cryptolab
record_data = cryptolab.decrypt(record_data, secret_key)
return record_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''):
'''
a method to list keys in the collection
:param prefix: string with prefix value to filter results
:param delimiter: string with value results must not contain (after prefix)
:param filter_function: (positional arguments) function used to filter results
:param max_results: integer with maximum number of results to return
:param previous_key: string with key in collection to begin search after
:return: list of key strings
NOTE: each key string can be divided into one or more segments
based upon the / characters which occur in the key string as
well as its file extension type. if the key string represents
a file path, then each directory in the path, the file name
and the file extension are all separate indexed values.
eg. lab/unittests/1473719695.2165067.json is indexed:
[ 'lab', 'unittests', '1473719695.2165067', '.json' ]
it is possible to filter the records in the collection according
to one or more of these path segments using a filter_function.
NOTE: the filter_function must be able to accept an array of positional
arguments and return a value that can evaluate to true or false.
while searching the records, list produces an array of strings
which represent the directory structure in relative path of each
key string. if a filter_function is provided, this list of strings
is fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
'''
title = '%s.list' % self.__class__.__name__
# validate input
input_fields = {
'prefix': prefix,
'delimiter': delimiter,
'max_results': max_results,
'record_key': previous_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct default response
results_list = []
# handle filter function filter
if filter_function:
# validate filter function
try:
path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ]
filter_function(*path_segments)
except:
err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__)
raise TypeError('%s must accept positional arguments.' % err_msg)
# construct keyword arguments
list_kwargs = {
'bucket_name': self.bucket_name,
'prefix': prefix,
'delimiter': delimiter
}
# determine starting key
starting_key = '1'
if previous_key:
previous_kwargs = {}
previous_kwargs.update(**list_kwargs)
previous_kwargs['max_results'] = 1
previous_kwargs['starting_key'] = previous_key
search_list, next_key = self.s3.list_records(**list_kwargs)
list_kwargs['starting_key'] = next_key
# iterate filter over collection
import os
while starting_key:
search_list, starting_key = self.s3.list_records(**list_kwargs)
for record in search_list:
record_key = record['key']
path_segments = record_key.split(os.sep)
if filter_function(*path_segments):
results_list.append(record_key)
if len(results_list) == max_results:
return results_list
# handle other filters
else:
# construct keyword arguments
list_kwargs = {
'bucket_name': self.bucket_name,
'prefix': prefix,
'delimiter': delimiter,
'max_results': max_results
}
# determine starting key
if previous_key:
previous_kwargs = {}
previous_kwargs.update(**list_kwargs)
previous_kwargs['max_results'] = 1
previous_kwargs['starting_key'] = previous_key
search_list, starting_key = self.s3.list_records(**list_kwargs)
list_kwargs['starting_key'] = starting_key
# retrieve results
search_list, starting_key = self.s3.list_records(**list_kwargs)
# construct result list
for record in search_list:
results_list.append(record['key'])
return results_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def delete(self, record_key):
''' a method to delete a record from S3
:param record_key: string with key of record
:return: string reporting outcome
'''
title = '%s.delete' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# delete record
try:
self.s3.delete_record(self.bucket_name, record_key)
except:
if not self.exists(record_key):
exit_msg = '%s does not exist.' % record_key
return exit_msg
raise
exit_msg = '%s has been deleted.' % record_key
return exit_msg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.