text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k β |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_up(group, identifier, date):
"""Delete all of a groups local mbox, index, and state files. :type group: str :param group: group name :type identifier: str :param identifier: the identifier for the given group. :rtype: bool :returns: True """ |
#log.error('exception raised, cleaning up files.')
glob_pat = '{g}.{d}.mbox*'.format(g=group, d=date)
for f in glob(glob_pat):
#log.error('removing {f}'.format(f=f))
try:
os.remove(f)
except OSError:
continue
glob_pat = '{id}_state.json'.format(id=identifier)
for f in glob(glob_pat):
#log.error('removing {f}'.format(f=f))
try:
os.remove(f)
except OSError:
continue
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def utf8_encode_str(string, encoding='UTF-8'):
"""Attempt to detect the native encoding of `string`, and re-encode to utf-8 :type string: str :param string: The string to be encoded. :rtype: str :returns: A utf-8 encoded string. """ |
if not string:
return ''
src_enc = chardet.detect(string)['encoding']
try:
return string.decode(src_enc).encode(encoding)
except:
return string.decode('ascii', errors='replace').encode(encoding) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inline_compress_chunk(chunk, level=1):
"""Compress a string using gzip. :type chunk: str :param chunk: The string to be compressed. :rtype: str :returns: `chunk` compressed. """ |
b = cStringIO.StringIO()
g = gzip.GzipFile(fileobj=b, mode='wb', compresslevel=level)
g.write(chunk)
g.close()
cc = b.getvalue()
b.close()
return cc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_list_of_git_directories():
"""Returns a list of paths of git repos under the current directory.""" |
dirs = [path[0] for path in list(os.walk('.')) if path[0].endswith('.git')]
dirs = ['/'.join(path.split('/')[:-1]) for path in dirs]
return sorted(dirs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_git_concurrently(base_dir):
"""Runs the 'git status' and 'git pull' commands in threads and reports the results in a pretty table.""" |
os.chdir(base_dir)
git_dirs = get_list_of_git_directories()
print("Processing %d git repos: %s" % (len(git_dirs), ', '.join(git_dirs)))
widgets = [Percentage(),
' ', Bar(),
' ', Counter(),
' ', AdaptiveETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(git_dirs))
pbar.start()
threads = {git_dir:GitPuller(git_dir) for git_dir in git_dirs}
for thread in threads.values():
thread.start()
while True:
pbar.update(len([t for t in threads.values() if not t.is_alive()]))
if all([not t.is_alive() for t in threads.values()]):
break
time.sleep(0.2)
table = PrettyTable(["repo", "local", "pull"])
table.align["repo"] = "l"
table.align["local"] = "l"
table.align["pull"] = "l"
for git_dir in sorted(threads):
thread = threads[git_dir]
if thread.local_ok:
if thread.has_uncommitted_changes:
local_changes_text = colored(
'Uncommitted changes', 'green', attrs=['bold'])
else:
local_changes_text = colored('OK', 'green')
else:
local_changes_text = colored('Problem', 'red')
if thread.git_pull_ok:
if thread.is_up_to_date:
pull_text = colored('OK', 'green')
else:
pull_text = colored('Changed', 'green', attrs=['bold'])
else:
pull_text = colored('Problem', 'red')
table.add_row([git_dir, local_changes_text, pull_text])
print(table)
for git_dir in sorted(threads):
if not threads[git_dir].git_pull_ok:
thread = threads[git_dir]
print colored('%s: ' % git_dir, 'red')
print thread.git_pull_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_search(request, response, visid_to_dbid, config,
search_engines, filters, cid, engine_name):
'''Search feature collections.
The route for this endpoint is:
``/dossier/v1/<content_id>/search/<search_engine_name>``.
``content_id`` can be any *profile* content identifier. (This
restriction may be lifted at some point.) Namely, it must start
with ``p|``.
``engine_name`` corresponds to the search strategy to
use. The list of available search engines can be retrieved with the
:func:`v1_search_engines` endpoint.
This endpoint returns a JSON payload which is an object with a
single key, ``results``. ``results`` is a list of objects, where
the objects each have ``content_id`` and ``fc`` attributes.
``content_id`` is the unique identifier for the result returned,
and ``fc`` is a JSON serialization of a feature collection.
There are also two query parameters:
* **limit** limits the number of results to the number given.
* **filter** sets the filtering function. The default
filter function, ``already_labeled``, will filter out any
feature collections that have already been labeled with the
query ``content_id``.
'''
db_cid = visid_to_dbid(cid)
try:
search_engine = search_engines[engine_name]
except KeyError as e:
bottle.abort(404, 'Search engine "%s" does not exist.' % e.message)
query = request.query if request.method == 'GET' else request.forms
search_engine = (config.create(search_engine)
.set_query_id(db_cid)
.set_query_params(query))
for name, filter in filters.items():
search_engine.add_filter(name, config.create(filter))
return search_engine.respond(response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_fc_get(visid_to_dbid, store, cid):
'''Retrieve a single feature collection.
The route for this endpoint is:
``/dossier/v1/feature-collections/<content_id>``.
This endpoint returns a JSON serialization of the feature collection
identified by ``content_id``.
'''
fc = store.get(visid_to_dbid(cid))
if fc is None:
bottle.abort(404, 'Feature collection "%s" does not exist.' % cid)
return util.fc_to_json(fc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_random_fc_get(response, dbid_to_visid, store):
'''Retrieves a random feature collection from the database.
The route for this endpoint is:
``GET /dossier/v1/random/feature-collection``.
Assuming the database has at least one feature collection,
this end point returns an array of two elements. The first
element is the content id and the second element is a
feature collection (in the same format returned by
:func:`dossier.web.routes.v1_fc_get`).
If the database is empty, then a 404 error is returned.
Note that currently, this may not be a uniformly random sample.
'''
# Careful, `store.scan()` would be obscenely slow here...
sample = streaming_sample(store.scan_ids(), 1, 1000)
if len(sample) == 0:
bottle.abort(404, 'The feature collection store is empty.')
return [dbid_to_visid(sample[0]), util.fc_to_json(store.get(sample[0]))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_label_put(request, response, visid_to_dbid, config, label_hooks,
label_store, cid1, cid2, annotator_id):
'''Store a single label.
The route for this endpoint is:
``PUT /dossier/v1/labels/<content_id1>/<content_id2>/<annotator_id>``.
``content_id`` are the ids of the feature collections to
associate. ``annotator_id`` is a string that identifies the
human that created the label. The value of the label should
be in the request body as one of the following three values:
``-1`` for not coreferent, ``0`` for "I don't know if they
are coreferent" and ``1`` for coreferent.
Optionally, the query parameters ``subtopic_id1`` and
``subtopic_id2`` may be specified. Neither, both or either may
be given. ``subtopic_id1`` corresponds to a subtopic in
``content_id1`` and ``subtopic_id2`` corresponds to a subtopic
in ``content_id2``.
This endpoint returns status ``201`` upon successful storage.
Any existing labels with the given ids are overwritten.
'''
coref_value = CorefValue(int(request.body.read()))
lab = Label(visid_to_dbid(cid1), visid_to_dbid(cid2),
annotator_id, coref_value,
subtopic_id1=request.query.get('subtopic_id1'),
subtopic_id2=request.query.get('subtopic_id2'))
label_store.put(lab)
response.status = 201 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_label_direct(request, response, visid_to_dbid, dbid_to_visid,
label_store, cid, subid=None):
'''Return directly connected labels.
The routes for this endpoint are
``/dossier/v1/label/<cid>/direct`` and
``/dossier/v1/label/<cid>/subtopic/<subid>/direct``.
This returns all directly connected labels for ``cid``. Or, if
a subtopic id is given, then only directly connected labels for
``(cid, subid)`` are returned.
The data returned is a JSON list of labels. Each label is a
dictionary with the following keys: ``content_id1``,
``content_id2``, ``subtopic_id1``, ``subtopic_id2``,
``annotator_id``, ``epoch_ticks`` and ``value``.
'''
lab_to_json = partial(label_to_json, dbid_to_visid)
ident = make_ident(visid_to_dbid(cid), subid)
labs = imap(lab_to_json, label_store.directly_connected(ident))
return list(paginate(request, response, labs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_label_negative_inference(request, response,
visid_to_dbid, dbid_to_visid,
label_store, cid):
'''Return inferred negative labels.
The route for this endpoint is:
``/dossier/v1/label/<cid>/negative-inference``.
Negative labels are inferred by first getting all other content ids
connected to ``cid`` through a negative label. For each directly
adjacent ``cid'``, the connected components of ``cid`` and
``cid'`` are traversed to find negative labels.
The data returned is a JSON list of labels. Each label is a
dictionary with the following keys: ``content_id1``,
``content_id2``, ``subtopic_id1``, ``subtopic_id2``,
``annotator_id``, ``epoch_ticks`` and ``value``.
'''
# No subtopics yet? :-(
lab_to_json = partial(label_to_json, dbid_to_visid)
labs = imap(lab_to_json,
label_store.negative_inference(visid_to_dbid(cid)))
return list(paginate(request, response, labs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_folder_list(request, kvlclient):
'''Retrieves a list of folders for the current user.
The route for this endpoint is: ``GET /dossier/v1/folder``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of folder identifiers.
'''
return sorted(imap(attrgetter('name'),
ifilter(lambda it: it.is_folder(),
new_folders(kvlclient, request).list('/')))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_folder_add(request, response, kvlclient, fid):
'''Adds a folder belonging to the current user.
The route for this endpoint is: ``PUT /dossier/v1/folder/<fid>``.
If the folder was added successfully, ``201`` status is returned.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
'''
fid = urllib.unquote(fid)
new_folders(kvlclient, request).put_folder(fid)
response.status = 201 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_subfolder_list(request, response, kvlclient, fid):
'''Retrieves a list of subfolders in a folder for the current user.
The route for this endpoint is:
``GET /dossier/v1/folder/<fid>/subfolder``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of subfolder identifiers.
'''
fid = urllib.unquote(fid)
try:
return sorted(imap(attrgetter('name'),
ifilter(lambda it: it.is_folder(),
new_folders(kvlclient, request).list(fid))))
except KeyError:
response.status = 404
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_subfolder_add(request, response, kvlclient,
fid, sfid, cid, subid=None):
'''Adds a subtopic to a subfolder for the current user.
The route for this endpoint is:
``PUT /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>/<subid>``.
``fid`` is the folder identifier, e.g., ``My_Folder``.
``sfid`` is the subfolder identifier, e.g., ``My_Subtopic``.
``cid`` and ``subid`` are the content id and subtopic id of the
subtopic being added to the subfolder.
If the subfolder does not already exist, it is created
automatically. N.B. An empty subfolder cannot exist!
If the subtopic was added successfully, ``201`` status is returned.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
'''
if subid is not None:
assert '@' not in subid
path = [
urllib.unquote(fid),
urllib.unquote(sfid),
cid + (('@' + subid) if subid is not None else ''),
]
path = '/'.join(path)
new_folders(kvlclient, request).put(path)
response.status = 201 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_subtopic_list(request, response, kvlclient, fid, sfid):
'''Retrieves a list of items in a subfolder.
The route for this endpoint is:
``GET /dossier/v1/folder/<fid>/subfolder/<sfid>``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of two element arrays. The first
element in the array is the item's content id and the second
element is the item's subtopic id.
'''
path = urllib.unquote(fid) + '/' + urllib.unquote(sfid)
try:
items = []
for it in new_folders(kvlclient, request).list(path):
if '@' in it.name:
items.append(it.name.split('@'))
else:
items.append((it.name, None))
return items
except KeyError:
response.status = 404
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_folder_delete(request, response, kvlclient,
fid, sfid=None, cid=None, subid=None):
'''Deletes a folder, subfolder or item.
The routes for this endpoint are:
* ``DELETE /dossier/v1/folder/<fid>``
* ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>``
* ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>``
* ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>/<subid>``
'''
new_folders(kvlclient, request).delete(make_path(fid, sfid, cid, subid))
response.status = 204 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_folder_rename(request, response, kvlclient,
fid_src, fid_dest, sfid_src=None, sfid_dest=None):
'''Rename a folder or a subfolder.
The routes for this endpoint are:
* ``POST /dossier/v1/<fid_src>/rename/<fid_dest>``
* ``POST /dossier/v1/<fid_src>/subfolder/<sfid_src>/rename/
<fid_dest>/subfolder/<sfid_dest>``
'''
src, dest = make_path(fid_src, sfid_src), make_path(fid_dest, sfid_dest)
new_folders(kvlclient, request).move(src, dest)
response.status = 200 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_query_param(url, param, value):
'''Returns a new URL with the given query parameter set to ``value``.
``value`` may be a list.'''
scheme, netloc, path, qs, frag = urlparse.urlsplit(url)
params = urlparse.parse_qs(qs)
params[param] = value
qs = urllib.urlencode(params, doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, qs, frag)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_status(self):
"""utility method to get the status of a slicing job resource, but also used to initialize slice objects by location""" |
if self._state in ["processed", "error"]:
return self._state
get_resp = requests.get(self.location, cookies={"session": self.session})
self._state = get_resp.json()["status"]
self.slice_time = get_resp.json()["slice_time"]
return self._state |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _to_json(self):
""" Gets a dict of this object's properties so that it can be used to send a dump to the client """ |
return dict(( (k, v) for k, v in self.__dict__.iteritems() if k != 'server')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def change_name(self, username):
""" changes the username to given username, throws exception if username used """ |
self.release_name()
try:
self.server.register_name(username)
except UsernameInUseException:
logging.log(', '.join(self.server.registered_names))
self.server.register_name(self.name)
raise
self.name = username |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_name(self, username):
""" register a name """ |
if self.is_username_used(username):
raise UsernameInUseException('Username {username} already in use!'.format(username=username))
self.registered_names.append(username) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def release_name(self, username):
""" release a name and add it to the temp list """ |
self.temp_names.append(username)
if self.is_username_used(username):
self.registered_names.remove(username) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _do_exit(self, cmd, args):
"""\ Exit shell. exit | C-D Exit to the parent shell. exit root | end Exit to the root shell. exit all Exit to the command line. """ |
if cmd == 'end':
if not args:
return 'root'
else:
self.stderr.write(textwrap.dedent('''\
end: unrecognized arguments: {}
''')).format(args)
# Hereafter, cmd == 'exit'.
if not args:
return True
if len(args) > 1:
self.stderr.write(textwrap.dedent('''\
exit: too many arguments: {}
''')).format(args)
exit_directive = args[0]
if exit_directive == 'root':
return 'root'
if exit_directive == 'all':
return 'all'
self.stderr.write(textwrap.dedent('''\
exit: unrecognized arguments: {}
''')).format(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _complete_exit(self, cmd, args, text):
"""Find candidates for the 'exit' command.""" |
if args:
return
return [ x for x in { 'root', 'all', } \
if x.startswith(text) ] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _do_history(self, cmd, args):
"""\ Display history. history Display history. history clear Clear history. history clearall Clear history for all shells. """ |
if args and args[0] == 'clear':
readline.clear_history()
readline.write_history_file(self.history_fname)
elif args and args[0] == 'clearall':
readline.clear_history()
shutil.rmtree(self._temp_dir, ignore_errors = True)
os.makedirs(os.path.join(self._temp_dir, 'history'))
else:
readline.write_history_file(self.history_fname)
with open(self.history_fname, 'r', encoding = 'utf8') as f:
self.stdout.write(f.read()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _complete_history(self, cmd, args, text):
"""Find candidates for the 'history' command.""" |
if args:
return
return [ x for x in { 'clear', 'clearall' } \
if x.startswith(text) ] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __dump_stack(self):
"""Dump the shell stack in a human friendly way. An example output is: 0 PlayBoy 1 βββ foo-prompt: foo@[] 2 βββ karPROMPT: kar@[] 3 βββ DEBUG: debug@['shell'] """ |
maxdepth = len(self._mode_stack)
maxdepth_strlen = len(str(maxdepth))
index_width = 4 - (-maxdepth_strlen) % 4 + 4
index_str = lambda i: '{:<{}d}'.format(i, index_width)
self.stdout.write(index_str(0) + self.root_prompt)
self.stdout.write('\n')
tree_prefix = 'βββ '
for i in range(maxdepth):
index_prefix = index_str(i + 1)
whitespace_prefix = ' ' * len(tree_prefix) * i
mode = self._mode_stack[i]
line = index_prefix + whitespace_prefix + \
tree_prefix + mode.prompt + \
': {}@{}'.format(mode.cmd, mode.args)
self.stdout.write(line)
self.stdout.write('\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _do_help(self, cmd, args):
"""Display doc strings of the shell and its commands. """ |
print(self.doc_string())
print()
# Create data of the commands table.
data_unsorted = []
cls = self.__class__
for name in dir(cls):
obj = getattr(cls, name)
if iscommand(obj):
cmds = []
for cmd in getcommands(obj):
cmds.append(cmd)
cmd_str = ','.join(sorted(cmds))
doc_str = textwrap.dedent(obj.__doc__).strip() if obj.__doc__ else \
'(no doc string available)'
data_unsorted.append([cmd_str, doc_str])
data_sorted = sorted(data_unsorted, key = lambda x: x[0])
data = [['COMMANDS', 'DOC STRING']] + data_sorted
# Create the commands table.
table_banner = 'List of Available Commands'
table = terminaltables.SingleTable(data, table_banner)
table.inner_row_border = True
table.inner_heading_row_border = True
print(table.table) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hazeDriver():
""" Process the command line arguments and run the appropriate haze subcommand. We want to be able to do git-style handoffs to subcommands where if we do `haze aws foo bar` and the executable haze-aws-foo exists, we'll call it with the argument bar. We deliberately don't do anything with the arguments other than hand them off to the haze subcommand. Subcommands are responsible for their own argument parsing. """ |
try:
(command, args) = findSubCommand(sys.argv)
# If we can't construct a subcommand from sys.argv, it'll still be able
# to find this haze driver script, and re-running ourself isn't useful.
if os.path.basename(command) == "haze":
print "Could not find a subcommand for %s" % " ".join(sys.argv)
sys.exit(1)
except StandardError:
print "Could not find a subcommand for %s" % " ".join(sys.argv)
sys.exit(1)
check_call([command] + args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quoted_split(string, sep, quotes='"'):
""" Split a string on the given separation character, but respecting double-quoted sections of the string. Returns an iterator. :param string: The string to split. :param sep: The character separating sections of the string. :param quotes: A string specifying all legal quote characters. :returns: An iterator which will iterate over each element of the string separated by the designated separator. """ |
# Initialize the algorithm
start = None
escape = False
quote = False
# Walk through the string
for i, c in enumerate(string):
# Save the start index
if start is None:
start = i
# Handle escape sequences
if escape:
escape = False
# Handle quoted strings
elif quote:
if c == '\\':
escape = True
elif c == quote:
quote = False
# Handle the separator
elif c == sep:
yield string[start:i]
start = None
# Handle quotes
elif c in quotes:
quote = c
# Yield the last part
if start is not None:
yield string[start:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_ctype(ctype):
""" Parse a content type. :param ctype: The content type, with corresponding parameters. :returns: A tuple of the content type and a dictionary containing the content type parameters. The content type will additionally be available in the dictionary as the '_' key. """ |
result_ctype = None
result = {}
for part in quoted_split(ctype, ';'):
# Extract the content type first
if result_ctype is None:
result_ctype = part
result['_'] = part
continue
# OK, we have a 'key' or 'key=value' to handle; figure it
# out...
equal = part.find('=')
if equal > 0 and part.find('"', 0, equal) < 0:
result[part[:equal]] = unquote(part[equal + 1:])
else:
# If equal > 0 but it's preceded by a ", it's seriously
# messed up, but go ahead and be liberal...
result[part] = True
# If we failed to parse a content type out, return an empty
# content type
if result_ctype is None:
result_ctype = ''
return result_ctype, result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _match_mask(mask, ctype):
""" Determine if a content type mask matches a given content type. :param mask: The content type mask, taken from the Accept header. :param ctype: The content type to match to the mask. """ |
# Handle the simple cases first
if '*' not in mask:
return ctype == mask
elif mask == '*/*':
return True
elif not mask.endswith('/*'):
return False
mask_major = mask[:-2]
ctype_major = ctype.split('/', 1)[0]
return ctype_major == mask_major |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def best_match(requested, allowed):
""" Determine the best content type to use for the request. :param ctypes: A list of the available content types. :returns: A tuple of the best match content type and the parameters for that content type. """ |
requested = [parse_ctype(ctype) for ctype in quoted_split(requested, ',')]
best_q = -1
best_ctype = ''
best_params = {}
best_match = '*/*'
# Walk the list of content types
for ctype in allowed:
# Compare to the accept list
for ctype_mask, params in requested:
try:
q = float(params.get('q', 1.0))
except ValueError:
# Bad quality value
continue
if q < best_q:
# Not any better
continue
elif best_q == q:
# Base on the best match
if best_match.count('*') <= ctype_mask.count('*'):
continue
# OK, see if we have a match
if _match_mask(ctype_mask, ctype):
best_q = q
best_ctype = ctype
best_params = params
best_match = ctype_mask
# Return the best match
return best_ctype, best_params |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_key(log_prefix, result_dict, key, value, desc="parameter"):
""" Helper to set a key value in a dictionary. This function issues a warning if the key has already been set, and issues a warning and returns without setting the value if the value is not surrounded by parentheses. This is used to eliminate duplicated code from the rule parsers below. :param log_prefix: A prefix to use in log messages. This should be the configuration key. :param result_dict: A dictionary of results, into which the key and value should be inserted. :param key: The dictionary key to insert. :param value: The value to insert into the dictionary. :param desc: A description of what the dictionary is. This is used in log messages help the user understand what the log message is referring to. By default, this description is "parameter", indicating that entries in the dictionary are parameters of something; however, _parse_type_rule() also uses "token type" to help identify its more complex tokens. """ |
if key in result_dict:
LOG.warn("%s: Duplicate value for %s %r" %
(log_prefix, desc, key))
# Allow the overwrite
# Demand the value be quoted
if len(value) <= 2 or value[0] not in ('"', "'") or value[0] != value[-1]:
LOG.warn("%s: Invalid value %r for %s %r" %
(log_prefix, value, desc, key))
return
# Save the value
result_dict[key] = value[1:-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_version_rule(loader, version, verspec):
""" Parse a version rule. The first token is the name of the application implementing that API version. The remaining tokens are key="quoted value" pairs that specify parameters; these parameters are ignored by AVersion, but may be used by the application. :param loader: An object with a get_app() method, which will be used to load the actual applications. :param version: The version name. :param verspec: The version text, described above. :returns: A dictionary of three keys: "app" is the application; "name" is the version identification string; and "params" is a dictionary of parameters. """ |
result = dict(name=version, params={})
for token in quoted_split(verspec, ' ', quotes='"\''):
if not token:
continue
# Convert the application
if 'app' not in result:
result['app'] = loader.get_app(token)
continue
# What remains is key="quoted value" pairs...
key, _eq, value = token.partition('=')
# Set the parameter key
_set_key('version.%s' % version, result['params'], key, value)
# Make sure we have an application
if 'app' not in result:
raise ImportError("Cannot load application for version %r" % version)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_alias_rule(alias, alias_spec):
""" Parse an alias rule. The first token is the canonical name of the version. The remaining tokens are key="quoted value" pairs that specify parameters; these parameters are ignored by AVersion, but may be used by the application. :param alias: The alias name. :param alias_spec: The alias text, described above. :returns: A dictionary of three keys: "alias" is the alias name; "version" is the canonical version identification string; and "params" is a dictionary of parameters. """ |
result = dict(alias=alias, params={})
for token in quoted_split(alias_spec, ' ', quotes='"\''):
if not token:
continue
# Suck out the canonical version name
if 'version' not in result:
result['version'] = token
continue
# What remains is key="quoted value" pairs...
key, _eq, value = token.partition('=')
# Set the parameter key
_set_key('alias.%s' % alias, result['params'], key, value)
# Make sure we have a canonical version
if 'version' not in result:
raise KeyError("Cannot determine canonical version for alias %r" %
alias)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_ctype(self, ctype, orig_ctype=None):
""" Set the selected content type. Will not override the value of the content type if that has already been determined. :param ctype: The content type string to set. :param orig_ctype: The original content type, as found in the configuration. """ |
if self.ctype is None:
self.ctype = ctype
self.orig_ctype = orig_ctype |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process(self, request, result=None):
""" Process the rules for the request. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in. If None, one will be allocated. :returns: A Result object, containing the selected version and content type. """ |
# Allocate a result and process all the rules
result = result if result is not None else Result()
self._proc_uri(request, result)
self._proc_ctype_header(request, result)
self._proc_accept_header(request, result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _proc_uri(self, request, result):
""" Process the URI rules for the request. Both the desired API version and desired content type can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in. """ |
if result:
# Result has already been fully determined
return
# First, determine the version based on the URI prefix
for prefix, version in self.uris:
if (request.path_info == prefix or
request.path_info.startswith(prefix + '/')):
result.set_version(version)
# Update the request particulars
request.script_name += prefix
request.path_info = request.path_info[len(prefix):]
if not request.path_info:
request.path_info = '/'
break
# Next, determine the content type based on the URI suffix
for format, ctype in self.formats.items():
if request.path_info.endswith(format):
result.set_ctype(ctype)
# Update the request particulars
request.path_info = request.path_info[:-len(format)]
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _proc_ctype_header(self, request, result):
""" Process the Content-Type header rules for the request. Only the desired API version can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in. """ |
if result:
# Result has already been fully determined
return
try:
ctype = request.headers['content-type']
except KeyError:
# No content-type header to examine
return
# Parse the content type
ctype, params = parse_ctype(ctype)
# Is it a recognized content type?
if ctype not in self.types:
return
# Get the mapped ctype and version
mapped_ctype, mapped_version = self.types[ctype](params)
# Update the content type header and set the version
if mapped_ctype:
request.environ['aversion.request_type'] = mapped_ctype
request.environ['aversion.orig_request_type'] = ctype
request.environ['aversion.content_type'] = \
request.headers['content-type']
if self.overwrite_headers:
request.headers['content-type'] = mapped_ctype
if mapped_version:
result.set_version(mapped_version) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _proc_accept_header(self, request, result):
""" Process the Accept header rules for the request. Both the desired API version and content type can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in. """ |
if result:
# Result has already been fully determined
return
try:
accept = request.headers['accept']
except KeyError:
# No Accept header to examine
return
# Obtain the best-match content type and its parameters
ctype, params = best_match(accept, self.types.keys())
# Is it a recognized content type?
if ctype not in self.types:
return
# Get the mapped ctype and version
mapped_ctype, mapped_version = self.types[ctype](params)
# Set the content type and version
if mapped_ctype:
result.set_ctype(mapped_ctype, ctype)
if mapped_version:
result.set_version(mapped_version) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_available_hashes():
""" Returns a tuple of the available hashes """ |
if sys.version_info >= (3,2):
return hashlib.algorithms_available
elif sys.version_info >= (2,7) and sys.version_info < (3,0):
return hashlib.algorithms
else:
return 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_defaults_for(session, user, only_for=None, detail_values=None):
""" Create a sizable amount of defaults for a new user. """ |
detail_values = detail_values or {}
if not user.openid.endswith('.fedoraproject.org'):
log.warn("New user not from fedoraproject.org. No defaults set.")
return
# the openid is of the form USERNAME.id.fedoraproject.org
nick = user.openid.split('.')[0]
# TODO -- make the root here configurable.
valid_paths = fmn.lib.load_rules(root='fmn.rules')
def rule_maker(path, **kw):
""" Shorthand function, used inside loops below. """
return fmn.lib.models.Rule.create_from_code_path(
session, valid_paths, path, **kw)
def contexts():
names = ['email', 'irc', 'sse']
if only_for:
names = [only_for.name]
for name in names:
context = fmn.lib.models.Context.get(session, name)
if context:
yield context
else:
log.warn("No such context %r is in the DB." % name)
# For each context, build one little and two big filters
for context in contexts():
pref = fmn.lib.models.Preference.load(session, user, context)
if not pref:
value = detail_values.get(context.name)
pref = fmn.lib.models.Preference.create(
session, user, context, detail_value=value)
# Add a filter that looks for packages of this user
filt = fmn.lib.models.Filter.create(
session, "Events on packages that I own")
filt.add_rule(session, valid_paths,
"fmn.rules:user_package_filter", fasnick=nick)
# If this is a message about a package of mine, **and** i'm responsible
# for it, then don't trigger this filter. Rely on the previous one.
filt.add_rule(session, valid_paths,
"fmn.rules:user_filter",
fasnick=nick,
negated=True)
# Right off the bat, ignore all messages from non-primary kojis.
filt.add_rule(session, valid_paths,
"fmn.rules:koji_instance",
instance="ppc,s390,arm",
negated=True)
# And furthermore, exclude lots of message types
for code_path in exclusion_packages + exclusion_mutual:
filt.add_rule(
session, valid_paths, "fmn.rules:%s" % code_path, negated=True)
pref.add_filter(session, filt, notify=True)
# END "packages I own"
# Add a filter that looks for this user
filt = fmn.lib.models.Filter.create(
session, "Events referring to my username")
filt.add_rule(session, valid_paths,
"fmn.rules:user_filter", fasnick=nick)
# Right off the bat, ignore all messages from non-primary kojis.
filt.add_rule(session, valid_paths,
"fmn.rules:koji_instance",
instance="ppc,s390,arm",
negated=True)
# And furthermore exclude lots of message types
for code_path in exclusion_username + exclusion_mutual:
filt.add_rule(
session, valid_paths, "fmn.rules:%s" % code_path, negated=True)
pref.add_filter(session, filt, notify=True)
# END "events references my username"
# Add a taskotron filter
filt = fmn.lib.models.Filter.create(
session, "Critical taskotron tasks on my packages")
filt.add_rule(session, valid_paths,
"fmn.rules:user_package_filter",
fasnick=nick)
filt.add_rule(session, valid_paths,
"fmn.rules:taskotron_release_critical_task")
filt.add_rule(session, valid_paths,
"fmn.rules:taskotron_task_particular_or_changed_outcome",
outcome='FAILED')
pref.add_filter(session, filt, notify=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
# DROP_WITH_DJANGO15 """Use the same ordering as TreeManager""" |
args = (self.model._mptt_meta.tree_id_attr,
self.model._mptt_meta.left_attr)
method = 'get_query_set' if django.VERSION < (1, 6) else 'get_queryset'
return getattr(super(SectionManager, self), method)().order_by(*args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def item_related_name(self):
""" The ManyToMany field on the item class pointing to this class. If there is more than one field, this value will be None. """ |
if not hasattr(self, '_item_related_name'):
many_to_many_rels = \
get_section_many_to_many_relations(self.__class__)
if len(many_to_many_rels) != 1:
self._item_related_name = None
else:
self._item_related_name = many_to_many_rels[0].field.name
return self._item_related_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_item(self, item, field_name=None):
""" Add the item to the specified section. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined. """ |
field_name = self._choose_field_name(field_name)
related_manager = getattr(item, field_name)
related_manager.add(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_item(self, item, field_name=None):
""" Remove the item from the specified section. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined. """ |
field_name = self._choose_field_name(field_name)
related_manager = getattr(item, field_name)
related_manager.remove(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toggle_item(self, item, test_func, field_name=None):
""" Toggles the section based on test_func. test_func takes an item and returns a boolean. If it returns True, the item will be added to the given section. It will be removed from the section otherwise. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined. """ |
if test_func(item):
self.add_item(item, field_name)
return True
else:
self.remove_item(item, field_name)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def determine_ticks(low, high):
"""The function used to auto-generate ticks for an axis, based on its range of values. :param Number low: The lower bound of the axis. :param Number high: The upper bound of the axis. :rtype: ``tuple``""" |
range_ = high - low
tick_difference = 10 ** math.floor(math.log10(range_ / 1.25))
low_tick = math.floor(low / tick_difference) * tick_difference
ticks = [low_tick + tick_difference] if low_tick < low else [low_tick]
while ticks[-1] + tick_difference <= high:
ticks.append(ticks[-1] + tick_difference)
return tuple(ticks) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def x_ticks(self, *ticks):
"""The points on the x-axis for which there are markers and grid lines. There are default ticks, but you can pass values to this method to override the defaults. Otherwise the method will return the ticks. :param \*ticks: if given, these will be chart's x-ticks. :rtype: ``tuple``""" |
if ticks:
for tick in ticks:
if not is_numeric(tick):
raise TypeError("'%s' is not a numeric tick" % str(tick))
self._x_ticks = tuple(sorted(ticks))
else:
if self._x_ticks:
return self._x_ticks
else:
return determine_ticks(self.x_lower_limit(), self.x_upper_limit()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def y_ticks(self, *ticks):
"""The points on the y-axis for which there are markers and grid lines. There are default ticks, but you can pass values to this method to override the defaults. Otherwise the method will return the ticks. :param \*ticks: if given, these will be chart's x-ticks. :rtype: ``tuple``""" |
if ticks:
for tick in ticks:
if not is_numeric(tick):
raise TypeError("'%s' is not a numeric tick" % str(tick))
self._y_ticks = tuple(sorted(ticks))
else:
if self._y_ticks:
return self._y_ticks
else:
return determine_ticks(self.y_lower_limit(), self.y_upper_limit()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def x_grid(self, grid=None):
"""The horizontal lines that run accross the chart from the x-ticks. If a boolean value is given, these gridlines will be turned on or off. Otherwise, the method will return their current state. :param bool grid: Turns the gridlines on or off. :rtype: ``bool``""" |
if grid is None:
return self._x_grid
else:
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._x_grid = grid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def y_grid(self, grid=None):
"""The vertical lines that run accross the chart from the y-ticks. If a boolean value is given, these gridlines will be turned on or off. Otherwise, the method will return their current state. :param bool grid: Turns the gridlines on or off. :rtype: ``bool``""" |
if grid is None:
return self._y_grid
else:
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._y_grid = grid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grid(self, grid):
"""Turns all gridlines on or off :param bool grid: turns the gridlines on if ``True``, off if ``False``""" |
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._x_grid = self._y_grid = grid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_event_loop(self):
"""Get the event loop. This may be None or an instance of EventLoop. """ |
if (self._event_loop is None and
threading.current_thread().name == 'MainThread'):
self._event_loop = self.new_event_loop()
return self._event_loop |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_event_loop(self, event_loop):
"""Set the event loop.""" |
assert event_loop is None or isinstance(event_loop, AbstractEventLoop)
self._event_loop = event_loop |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch_datasette():
""" Monkey patching for original Datasette """ |
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
_inspect = {}
files = self.files
for filename in files:
self.files = (filename,)
path = Path(filename)
name = path.stem
if name in _inspect:
raise Exception("Multiple files with the same stem %s" % name)
try:
_inspect[name] = self.original_inspect()[name]
except sqlite3.DatabaseError:
tables, views, dbtype = connectors.inspect(path)
_inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"dbtype": dbtype,
"tables": tables,
"views": views,
}
self.files = files
self._inspect = _inspect
return self._inspect
datasette.app.Datasette.original_inspect = datasette.app.Datasette.inspect
datasette.app.Datasette.inspect = inspect
async def execute(self, db_name, sql, params=None, truncate=False, custom_time_limit=None, page_size=None):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def is_sqlite3_conn():
conn = getattr(connections, db_name, None)
if not conn:
info = self.inspect()[db_name]
return info.get('dbtype', 'sqlite3') == 'sqlite3'
else:
return isinstance(conn, sqlite3.Connection)
def sql_operation_in_thread():
conn = getattr(connections, db_name, None)
if not conn:
info = self.inspect()[db_name]
conn = connectors.connect(info['file'], info['dbtype'])
setattr(connections, db_name, conn)
rows, truncated, description = conn.execute(
sql,
params or {},
truncate=truncate,
page_size=page_size,
max_returned_rows=self.max_returned_rows,
)
return Results(rows, truncated, description)
if is_sqlite3_conn():
return await self.original_execute(db_name, sql, params=params, truncate=truncate, custom_time_limit=custom_time_limit, page_size=page_size)
else:
return await asyncio.get_event_loop().run_in_executor(
self.executor, sql_operation_in_thread
)
datasette.app.Datasette.original_execute = datasette.app.Datasette.execute
datasette.app.Datasette.execute = execute |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fas(config):
""" Return a fedora.client.fas2.AccountSystem object if the provided configuration contains a FAS username and password. """ |
global _FAS
if _FAS is not None:
return _FAS
# In some development environments, having fas_credentials around is a
# pain.. so, let things proceed here, but emit a warning.
try:
creds = config['fas_credentials']
except KeyError:
log.warn("No fas_credentials available. Unable to query FAS.")
return None
default_url = 'https://admin.fedoraproject.org/accounts/'
_FAS = AccountSystem(
creds.get('base_url', default_url),
username=creds['username'],
password=creds['password'],
cache_session=False,
insecure=creds.get('insecure', False)
)
return _FAS |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_packagers_of_package(config, package):
""" Retrieve the list of users who have commit on a package. :arg config: a dict containing the fedmsg config :arg package: the package you are interested in. :return: a set listing all the fas usernames that have some ACL on package. """ |
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_packagers_of_package, package)
creator = lambda: _get_pkgdb2_packagers_for(config, package)
return _cache.get_or_create(key, creator) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_packages_of_user(config, username, flags):
""" Retrieve the list of packages where the specified user some acl. :arg config: a dict containing the fedmsg config :arg username: the fas username of the packager whose packages are of interest. :return: a set listing all the packages where the specified user has some ACL. """ |
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
packages = []
groups = get_groups_of_user(config, get_fas(config), username)
owners = [username] + ['group::' + group for group in groups]
for owner in owners:
key = cache_key_generator(get_packages_of_user, owner)
creator = lambda: _get_pkgdb2_packages_for(config, owner, flags)
subset = _cache.get_or_create(key, creator)
packages.extend(subset)
return set(packages) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_user_of_group(config, fas, groupname):
''' Return the list of users in the specified group.
:arg config: a dict containing the fedmsg config
:arg fas: a fedora.client.fas2.AccountSystem object instanciated and loged
into FAS.
:arg groupname: the name of the group for which we want to retrieve the
members.
:return: a list of FAS user members of the specified group.
'''
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_user_of_group, groupname)
def creator():
if not fas:
return set()
return set([u.username for u in fas.group_members(groupname)])
return _cache.get_or_create(key, creator) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def delist(target):
''' for any "list" found, replace with a single entry if the list has exactly one entry '''
result = target
if type(target) is dict:
for key in target:
target[key] = delist(target[key])
if type(target) is list:
if len(target)==0:
result = None
elif len(target)==1:
result = delist(target[0])
else:
result = [delist(e) for e in target]
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def check_schema_coverage(doc, schema):
'''
FORWARD CHECK OF DOCUMENT
This routine looks at each element in the doc, and makes sure there
is a matching 'name' in the schema at that level.
'''
error_list = []
to_delete = []
for entry in doc.list_tuples():
(name, value, index, seq) = entry
temp_schema = schema_match_up(doc, schema)
if not name in temp_schema.list_values("name"):
error_list.append( ("[error]", "doc", seq, "a name of '{}' not found in schema".format(name)) )
to_delete.append(seq)
else:
# check subs
el = check_schema_coverage(doc[name, value, index], temp_schema["name", name])
error_list.extend(el)
for seq in to_delete:
doc.seq_delete(seq)
return error_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def sub_schema_raises(doc, schema):
'''
Look for "raise_error", "raise_warning", and "raise_log"
'''
error_list = []
temp_schema = schema_match_up(doc, schema)
for msg in temp_schema.list_values("raise_error"):
error_list.append( ("[error]", "doc", doc.seq, "'{}'".format(msg)) )
for msg in temp_schema.list_values("raise_warning"):
error_list.append( ("[warning]", "doc", doc.seq, "'{}'".format(msg)) )
for msg in temp_schema.list_values("raise_log"):
error_list.append( ("[log]", "doc", doc.seq, "'{}'".format(msg)) )
for entry in doc:
if temp_schema.has(("name", entry.name)):
el = sub_schema_raises(entry, temp_schema["name", entry.name])
error_list.extend(el)
return error_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_valid_row(cls, row):
"""Indicates whether or not the given row contains valid data.""" |
for k in row.keys():
if row[k] is None:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cursor(cls):
"""Return a message list cursor that returns sqlite3.Row objects""" |
db = SqliteConnection.get()
db.row_factory = sqlite3.Row
return db.cursor() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_header(self, header_string):
""" Parses the header and determines the column type and its column index. """ |
header_content = header_string.strip().split('\t')
if len(header_content) != self._snv_enum.HEADER_LEN.value:
raise MTBParserException(
"Only {} header columns found, {} expected!"
.format(len(header_content), self._snv_enum.HEADER_LEN.value))
counter = 0
for column in header_content:
for enum_type in self._snv_enum:
if column == enum_type.value:
self._header_to_column_mapping[enum_type.name] = counter
continue
counter+=1
if len(self._header_to_column_mapping) != self._snv_enum.HEADER_LEN.value:
debug_string = self._header_to_column_mapping.keys()
raise MTBParserException("Parsing incomplete: Not all columns have been "
"matched to speficied column types. Identified {} columns, but expected {}. {}"
.format(len(self._header_to_column_mapping), self._snv_enum.HEADER_LEN.value, debug_string)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_content(self, snv_entries):
""" Parses SNV entries to SNVItems, objects representing the content for every entry, that can be used for further processing. """ |
if len(snv_entries) == 1:
return
for line in snv_entries[1:]:
info_dict = self._map_info_to_col(line)
self._snv_list.append(SNVItem(**info_dict)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def actions(obj, **kwargs):
""" Return actions available for an object """ |
if 'exclude' in kwargs:
kwargs['exclude'] = kwargs['exclude'].split(',')
actions = obj.get_actions(**kwargs)
if isinstance(actions, dict):
actions = actions.values()
buttons = "".join("%s" % action.render() for action in actions)
return '<div class="actions">%s</div>' % buttons |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_models(self, limit=-1, offset=-1):
"""Get a list of models in the registry. Parameters limit : int Limit number of items in the result set offset : int Set offset in list (order as defined by object store) Returns ------- list(ModelHandle) """ |
return self.registry.list_models(limit=limit, offset=offset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Register a new model with the engine. Expects connection information for RabbitMQ to submit model run requests to workers. Raises ValueError if the given model identifier is not unique. Parameters model_id : string Unique model identifier properties : Dictionary Dictionary of model specific properties. parameters : list(scodata.attribute.AttributeDefinition) List of attribute definitions for model run parameters outputs : ModelOutputs Description of model outputs connector : dict Connection information to communicate with model workers. Expected to contain at least the connector name 'connector'. Returns ------- ModelHandle """ |
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_model(self, model_run, run_url):
"""Execute the given model run. Throws a ValueError if the given run specifies an unknown model or if the model connector is invalid. An EngineException is thrown if running the model (i.e., communication with the backend) fails. Parameters model_run : ModelRunHandle Handle to model run run_url : string URL for model run information """ |
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_connector(self, connector):
"""Validate a given connector. Raises ValueError if the connector is not valid. Parameters connector : dict Connection information """ |
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_model(self, model_run, run_url):
"""Run model by sending message to RabbitMQ queue containing the run end experiment identifier. Messages are persistent to ensure that a worker will process process the run request at some point. Throws a EngineException if communication with the server fails. Parameters model_run : ModelRunHandle Handle to model run run_url : string URL for model run information """ |
# Open connection to RabbitMQ server. Will raise an exception if the
# server is not running. In this case we raise an EngineException to
# allow caller to delete model run.
try:
credentials = pika.PlainCredentials(self.user, self.password)
con = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host,
port=self.port,
virtual_host=self.virtual_host,
credentials=credentials
))
channel = con.channel()
channel.queue_declare(queue=self.queue, durable=True)
except pika.exceptions.AMQPError as ex:
err_msg = str(ex)
if err_msg == '':
err_msg = 'unable to connect to RabbitMQ: ' + self.user + '@'
err_msg += self.host + ':' + str(self.port)
err_msg += self.virtual_host + ' ' + self.queue
raise EngineException(err_msg, 500)
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Send request
channel.basic_publish(
exchange='',
routing_key=self.queue,
body=json.dumps(request.to_dict()),
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
)
)
con.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_model(self, model_run, run_url):
"""Create entry in run request buffer. Parameters model_run : ModelRunHandle Handle to model run run_url : string URL for model run information """ |
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Write request and connector information into buffer
self.collection.insert_one({
'connector' : self.connector,
'request' : request.to_dict()
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_request(self, model_run, run_url):
"""Create request object to run model. Requests are handled by SCO worker implementations. Parameters model_run : ModelRunHandle Handle to model run run_url : string URL for model run information Returns ------- ModelRunRequest Object representing model run request """ |
return ModelRunRequest(
model_run.identifier,
model_run.experiment_id,
run_url
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def submit_error(url, user, project, area, description, extra=None, default_message=None):
"""Celery task for submitting errors asynchronously. :param url: string URL for bugzscout :param user: string fogbugz user to designate when submitting via bugzscout :param project: string fogbugz project to designate for cases :param area: string fogbugz area to designate for cases :param description: string description for error :param extra: string details for error :param default_message: string default message to return in responses """ |
LOG.debug('Creating new BugzScout instance.')
client = bugzscout.BugzScout(
url, user, project, area)
LOG.debug('Submitting BugzScout error.')
client.submit_error(
description, extra=extra, default_message=default_message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def apply_depth_first(nodes, func, depth=0, as_dict=False, parents=None):
'''
Given a structure such as the application menu layout described above, we
may want to apply an operation to each entry to create a transformed
version of the structure.
For example, let's convert all entries in the application menu layout from
above to upper-case:
>>> pprint(apply_depth_first(menu_actions, lambda node, parents, nodes: node.upper()))
[('FILE',
['LOAD', 'SAVE', ('QUIT', ['QUIT WITHOUT SAVING', 'SAVE AND QUIT'])]),
('EDIT', ['COPY', 'PASTE', ('FILL', ['DOWN', 'SERIES'])])]
Here we used the `apply_depth_first` function to apply a `lambda` function
to each entry to compute the upper-case value corresponding to each node/key.
`as_dict`
---------
To make traversing the structure easier, the output may be expressed as a
nested `OrderedDict` structure. For instance, let's apply the upper-case
transformation from above, but this time with `as_dict=True`:
>>> result = apply_depth_first(menu_actions, as_dict=True, \
... func=lambda node, parents, nodes: node.upper())
>>> type(result)
<class 'collections.OrderedDict'>
Here we see that the result is an ordered dictionary. Moreover, we can
look up the transformed `"File"` entry based on the original key/node
value. Since an entry may contain children, each entry is wrapped as a
`namedtuple` with `item` and `children` attributes.
>>> type(result['File'])
<class 'nested_structures.Node'>
>>> result['File'].item
'FILE'
>>> type(result['File'].children)
<class 'collections.OrderedDict'>
If an entry has children, the `children` attribute is an `OrderedDict`.
Otherwise, the `children` is set to `None`.
Given the information from above, we can look up the `"Load"` child entry
of the `"File"` entry.
>>> result['File'].children['Load']
Node(item='LOAD', children=None)
Similarly, we can look up the `"Save and quit"` child entry of the `"Quit"`
entry.
>>> result['File'].children['Quit'].children['Save and quit']
Node(item='SAVE AND QUIT', children=None)
Note that this function *(i.e., `apply_depth_first`)* could be used to,
e.g., create a menu GUI item for each entry in the structure. This would
decouple the description of the layout from the GUI framework used.
'''
if as_dict:
items = OrderedDict()
else:
items = []
if parents is None:
parents = []
node_count = len(nodes)
for i, node in enumerate(nodes):
first = (i == 0)
last = (i == (node_count - 1))
if isinstance(node, tuple):
node, nodes = node
else:
nodes = []
item = func(node, parents, nodes, first, last, depth)
item_parents = parents + [node]
if nodes:
children = apply_depth_first(nodes, func,
depth=depth + 1,
as_dict=as_dict,
parents=item_parents)
else:
children = None
if as_dict:
items[node] = Node(item, children)
elif nodes:
items.append((item, children))
else:
items.append(item)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def apply_dict_depth_first(nodes, func, depth=0, as_dict=True, parents=None, pre=None, post=None):
'''
This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form.
'''
if as_dict:
items = OrderedDict()
else:
items = []
if parents is None:
parents = []
node_count = len(nodes)
for i, (k, node) in enumerate(nodes.iteritems()):
first = (i == 0)
last = (i == (node_count - 1))
if pre is not None:
pre(k, node, parents, first, last, depth)
item = func(k, node, parents, first, last, depth)
item_parents = parents + [(k, node)]
if node.children is not None:
children = apply_dict_depth_first(node.children, func,
depth=depth + 1,
as_dict=as_dict,
parents=item_parents,
pre=pre, post=post)
else:
children = None
if post is not None:
post(k, node, parents, first, last, depth)
if as_dict:
items[k] = Node(item, children)
elif children:
items.append((item, children))
else:
items.append(item)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def collect(nested_nodes, transform=None):
'''
Return list containing the result of the `transform` function applied to
each item in the supplied list of nested nodes.
A custom transform function may be applied to each entry during the
flattening by specifying a function through the `transform` keyword
argument. The `transform` function will be passed the following arguments:
- `node`: The node/key of the entry.
- `parents`: The node/key of the parents as a `list`.
- `nodes`: The children of the entry.
By default, the `transform` function simply returns the node/key, resulting
in a flattened version of the original nested nodes structure.
'''
items = []
if transform is None:
transform = lambda node, parents, nodes, *args: node
def __collect__(node, parents, nodes, first, last, depth):
items.append(transform(node, parents, nodes, first, last, depth))
apply_depth_first(nested_nodes, __collect__)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wrap_split_big_content(func_, *args, **kwargs):
""" chunk the content into smaller binary blobs before inserting this function should chunk in such a way that this is completely transparent to the user. :param func_: :param args: :param kwargs: :return: <dict> RethinkDB dict from insert """ |
obj_dict = args[0]
if len(obj_dict[CONTENT_FIELD]) < MAX_PUT:
obj_dict[PART_FIELD] = False
return func_(*args, **kwargs)
else:
return _perform_chunking(func_, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _only_if_file_not_exist(func_, *args, **kwargs):
""" horribly non-atomic :param func_: :param args: :param kwargs: :return: """ |
obj_dict = args[1]
conn = args[-1]
try:
RBF.get(obj_dict[PRIMARY_FIELD]).pluck(PRIMARY_FIELD).run(conn)
err_str = "Duplicate primary key `Name`: {}".format(obj_dict[PRIMARY_FIELD])
err_dict = {'errors': 1,
'first_error': err_str}
return err_dict
except r.errors.ReqlNonExistenceError:
return func_(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _perform_chunking(func_, *args, **kwargs):
""" internal function alled only by wrap_split_big_content performs the actual chunking. :param func_: :param args: :param kwargs: :return: <dict> RethinkDB dict from insert """ |
obj_dict = args[0]
start_point = 0
file_count = 0
new_dict = {}
resp_dict = Counter({})
file_list = []
while start_point < len(obj_dict[CONTENT_FIELD]):
file_count += 1
chunk_fn = CHUNK_POSTFIX.format(obj_dict[PRIMARY_FIELD],
str(file_count).zfill(CHUNK_ZPAD))
new_dict[PRIMARY_FIELD] = chunk_fn
file_list.append(new_dict[PRIMARY_FIELD])
new_dict[CONTENTTYPE_FIELD] = obj_dict[CONTENTTYPE_FIELD]
new_dict[TIMESTAMP_FIELD] = obj_dict[TIMESTAMP_FIELD]
end_point = file_count * MAX_PUT
sliced = obj_dict[CONTENT_FIELD][start_point: end_point]
new_dict[CONTENT_FIELD] = sliced
new_dict[PART_FIELD] = True
new_dict[PARENT_FIELD] = obj_dict[PRIMARY_FIELD]
start_point = end_point
new_args = (new_dict, args[1])
resp_dict += Counter(func_(*new_args, **kwargs))
obj_dict[CONTENT_FIELD] = b""
obj_dict[PARTS_FIELD] = file_list
obj_dict[PART_FIELD] = False
new_args = (obj_dict, args[1])
resp_dict += Counter(func_(*new_args, **kwargs))
return resp_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def import_class(import_path):
""" Imports a class dynamically from a full import path. """ |
if not '.' in import_path:
raise IncorrectImportPath(
"Invalid Python-style import path provided: {0}.".format(
import_path
)
)
path_bits = import_path.split('.')
mod_path = '.'.join(path_bits[:-1])
klass_name = path_bits[-1]
try:
mod = importlib.import_module(mod_path)
except ImportError:
raise IncorrectImportPath(
"Could not import module '{0}'.".format(mod_path)
)
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise IncorrectImportPath(
"Imported module '{0}' but could not find class '{1}'.".format(
mod_path,
klass_name
)
)
return klass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_hfs_accounts(netid):
""" Return a restclients.models.hfs.HfsAccounts object on the given uwnetid """ |
url = ACCOUNTS_URL.format(uwnetid=netid)
response = get_resource(url)
return _object_from_json(response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _timedelta_from_elements(elements):
""" Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month. """ |
days = sum((
elements['days'],
_months_to_days(elements.get('months', 0)),
_years_to_days(elements.get('years', 0))
))
return datetime.timedelta(days=days,
hours=elements.get('hours', 0),
minutes=elements.get('minutes', 0),
seconds=elements.get('seconds', 0)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""Return one of record in this batch in out-of-order. :raises: `StopIteration` when no more record is in this batch """ |
if self._records_iter >= len(self._records):
raise StopIteration
self._records_iter += 1
return self._records[self._records_iter - 1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def formatted_str(self, format):
"""Return formatted str. :param format: one of 'json', 'csv' are supported """ |
assert(format in ('json', 'csv'))
ret_str_list = []
for rec in self._records:
if format == 'json':
ret_str_list.append('{')
for i in xrange(len(rec)):
colname, colval = self._rdef[i].name, rec[i]
ret_str_list.append('"%s":"%s"' % (colname, str(colval).replace('"', r'\"')))
ret_str_list.append(',')
ret_str_list.pop() # drop last comma
ret_str_list.append('}%s' % (os.linesep))
elif format == 'csv':
for i in xrange(len(rec)):
colval = rec[i]
ret_str_list.append('"%s"' % (str(colval).replace('"', r'\"')))
ret_str_list.append(',')
ret_str_list.pop() # drop last comma
ret_str_list.append('%s' % (os.linesep))
else:
assert(False)
return ''.join(ret_str_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def envify(app=None, add_repo_to_path=True):
""" This will simply activate virtualenv on openshift ans returs the app in a wsgi.py or app.py in your openshift python web app - wsgi.py from shiftpy.wsgi_utils import envify from myproject import app # wsgi expects an object named 'application' application = envify(app) """ |
if getvar('HOMEDIR'):
if add_repo_to_path:
sys.path.append(os.path.join(getvar('REPO_DIR')))
sys.path.insert(0, os.path.dirname(__file__) or '.')
virtenv = getvar('PYTHON_DIR') + '/virtenv/'
virtualenv = os.path.join(virtenv, 'bin/activate_this.py')
exec_namespace = dict(__file__=virtualenv)
try:
if sys.version_info >= (3, 0):
with open(virtualenv, 'rb') as f:
code = compile(f.read(), virtualenv, 'exec')
exec(code, exec_namespace)
else:
execfile(virtualenv, exec_namespace) # noqa
except IOError:
pass
return app |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_file(self, filename):
"""Return the lines from the given file, ignoring lines that start with comments""" |
result = []
with open(filename, 'r') as f:
lines = f.read().split('\n')
for line in lines:
nocomment = line.strip().split('#')[0].strip()
if nocomment:
result.append(nocomment)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def order_by(self, field, orientation='ASC'):
""" Indica los campos y el criterio de ordenamiento """ |
if isinstance(field, list):
self.raw_order_by.append(field)
else:
self.raw_order_by.append([field, orientation])
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_or_update_issue(self, title, body, culprit, labels, **kwargs):
'''Creates or comments on existing issue in the store.
:params title: title for the issue
:params body: body, the content of the issue
:params culprit: string used to identify the cause of the issue,
also used for aggregation
:params labels: (optional) list of labels attached to the issue
:returns: issue object
:rtype: :class:`exreporter.stores.github.GithubIssue`
'''
issues = self.search(q=culprit, labels=labels)
self.time_delta = kwargs.pop('time_delta')
self.max_comments = kwargs.pop('max_comments')
if issues:
latest_issue = issues.pop(0)
return self.handle_issue_comment(
issue=latest_issue, title=title, body=body, **kwargs)
else:
return self.create_issue(
title=title, body=body, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self, q, labels, state='open,closed', **kwargs):
"""Search for issues in Github. :param q: query string to search :param state: state of the issue :returns: list of issue objects :rtype: list """ |
search_result = self.github_request.search(q=q, state=state, **kwargs)
if search_result['total_count'] > 0:
return list(
map(lambda issue_dict: GithubIssue(
github_request=self.github_request, **issue_dict),
search_result['items'])
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_issue_comment(self, issue, title, body, **kwargs):
"""Decides whether to comment or create a new issue when trying to comment. :param issue: issue on which the comment is to be added :param title: title of the issue if new one is to be created :param body: body of the issue/comment to be created :returns: newly created issue or the one on which comment was created :rtype: :class:`exreporter.stores.github.GithubIssue` """ |
if self._is_time_delta_valid(issue.updated_time_delta):
if issue.comments_count < self.max_comments:
issue.comment(body=body)
return issue
else:
return self.create_issue(title=title, body=body, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_issue(self, title, body, labels=None):
"""Creates a new issue in Github. :params title: title of the issue to be created :params body: body of the issue to be created :params labels: (optional) list of labels for the issue :returns: newly created issue :rtype: :class:`exreporter.stores.github.GithubIssue` """ |
kwargs = self.github_request.create(
title=title, body=body, labels=labels)
return GithubIssue(github_request=self.github_request, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updated_time_delta(self):
"""Returns the number of seconds ago the issue was updated from current time. """ |
local_timezone = tzlocal()
update_at = datetime.datetime.strptime(self.updated_at, '%Y-%m-%dT%XZ')
update_at_utc = pytz.utc.localize(update_at)
update_at_local = update_at_utc.astimezone(local_timezone)
delta = datetime.datetime.now(local_timezone) - update_at_local
return int(delta.total_seconds()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open_issue(self):
"""Changes the state of issue to 'open'. """ |
self.github_request.update(issue=self, state='open')
self.state = 'open' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def comment(self, body):
"""Adds a comment to the issue. :params body: body, content of the comment :returns: issue object :rtype: :class:`exreporter.stores.github.GithubIssue` """ |
self.github_request.comment(issue=self, body=body)
if self.state == 'closed':
self.open_issue()
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.