text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_book(self, name):
"""Create a new book""" |
name = name.strip()
if not len(name):
self.error("Cannot have a blank book name")
# The next could be relaxed, if users want commas in book names, but
# I prefer to keep it, in case later there could be a syntax for multiple
# book names, using comma.
if name.find(",") >= 0:
self.error("Cannot have a ',' in a book name")
existing = self.list_books()
nexisting = len(existing)
if name in existing:
self.error("Already have a book named '%s'" % name)
try:
self.cur.execute("INSERT INTO book (number, name) VALUES(?, ?);", (nexisting, name))
self.con.commit()
except:
self.fyi("Error adding a book named '%s'" % name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def initialize(self, author=""):
''' Initialize the database. This is dangerous since it removes any
existing content.'''
self.cur.execute("CREATE TABLE version(major, minor);")
self.cur.execute("INSERT INTO version(major, minor) VALUES (?,?);",
(self.appversion[0], self.appversion[1]))
#20150314 self.cur.execute("CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, in_trash DEFAULT 0);")
self.cur.execute("CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, book DEFAULT 1);")
self.cur.execute("CREATE TABLE author(authorId integer primary key autoincrement, name, nickname);")
self.cur.execute("CREATE TABLE alias(aliasId integer primary key autoincrement, item, alias);")
self.cur.execute("CREATE TABLE keyword(keywordId integer primary key autoincrement, keyword);")
self.cur.execute("CREATE TABLE notekeyword(notekeywordId integer primary key autoincrement, noteid, keywordid);")
self.cur.execute("CREATE TABLE book(bookId integer primary key autoincrement, number, name DEFAULT '');")
self.cur.execute("INSERT INTO book(number, name) VALUES (0, 'Trash');")
self.cur.execute("INSERT INTO book(number, name) VALUES (1, 'Default');")
self.con.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def keyword_hookup(self, noteId, keywords):
'''
Unhook existing cross-linking entries.
'''
try:
self.cur.execute("DELETE FROM notekeyword WHERE noteid=?", [noteId])
except:
self.error("ERROR: cannot unhook previous keywords")
# Now, hook up new the entries, one by one.
for keyword in keywords:
keyword = keyword.decode('utf-8')
self.fyi(" inserting keyword:", keyword)
# Make sure the keyword table contains the word in question.
keywordId = self.con.execute("SELECT keywordId FROM keyword WHERE keyword = ?;", [keyword]).fetchone()
try:
if keywordId:
self.fyi(" (existing keyword with id: %s)" % keywordId)
keywordId = keywordId[0]
else:
self.fyi(" (new keyword)")
self.cur.execute("INSERT INTO keyword(keyword) VALUES (?);", [keyword])
keywordId = self.cur.lastrowid
# Finally, do the actual hookup for this word.
self.con.execute("INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)", [noteId, keywordId])
except:
self.error("error hooking up keyword '%s'" % keyword)
self.con.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def list_keywords(self):
''' Return the list of keywords '''
names = []
try:
for n in self.cur.execute("SELECT keyword FROM keyword;").fetchall():
# Strip out leading and trailing whitespaces (can be artifacts of old data)
k = n[0].strip()
if len(k):
names.extend([k])
except:
self.error("ERROR: cannot find database table 'keyword'")
names = list(set(names)) # remove duplicates
names = sorted(names, key=lambda s: s.lower())
return(names) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def find_recent(self, nrecent=4):
'''Find recent non-trashed notes'''
try:
rows = self.cur.execute("SELECT noteId FROM note WHERE book > 0 ORDER BY date DESC LIMIT %d;"%nrecent).fetchall()
except:
self.error("nota.find_recent() cannot look up note list")
# Possibly save time by finding IDs first.
noteIds = []
for r in rows:
noteIds.append(r[0],)
self.fyi("noteIds: %s" % noteIds)
rval = []
for n in noteIds:
note = None
try:
note = self.cur.execute("SELECT noteId, date, title, content, hash, book FROM note WHERE noteId = ?;", [n]).fetchone()
except:
self.warning("Problem extracting note %s from database for recent-list" % n)
next
if note:
keywordIds = []
keywordIds.extend(self.con.execute("SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;", [n]))
keywords = []
for k in keywordIds:
keywords.append(self.cur.execute("SELECT keyword FROM keyword WHERE keywordId=?;", k).fetchone()[0])
rval.append({"noteId":note[0], "date":note[1], "title":note[2], "keywords":keywords,
"content":note[3], "hash":note[4], "book":note[5]})
return rval |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cosmoid_request(self, resource, cosmoid, **kwargs):
""" Maps to the Generic API method for requests who's only parameter is ``cosmoid`` """ |
params = {
'cosmoid': cosmoid,
}
params.update(kwargs)
return self.make_request(resource, params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def season_info(self, cosmoid, season, **kwargs):
""" Returns information about a season of a TV series Maps to the `season info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:Season>`_ API method. """ |
resource = 'season/%d/info' % season
return self._cosmoid_request(resource, cosmoid, **kwargs)
params = {
'cosmoid': cosmoid,
}
params.update(kwargs)
return self.make_request(resource, params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def episode_info(self, cosmoid, season, episode, **kwargs):
""" Returns information about an episode in a television series Maps to the `episode info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:SeasonEpisode>`_ API method. """ |
resource = 'season/%d/episode/%d/info' % (season, episode)
return self._cosmoid_request(resource, cosmoid, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_zip_wav(zfile, offset=0, count=None):
"""Load a wav file into an array from frame start to fram end :param zfile: ZipExtFile file-like object from where to load the audio :param offset: First sample to load :param count: Maximum number of samples to load :return: The audio samples in a numpy array of floats """ |
buf = StringIO.StringIO(zfile.read())
sample_rate, audio = wavfile.read(buf)
audio = audio[offset:]
if count:
audio = audio[:count]
return sample_rate, audio |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_edit_scripts(pron_a, pron_b, edit_costs=(1.0, 1.0, 1.0)):
"""Get the edit scripts to transform between two given pronunciations. :param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme :param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme :param edit_costs: Costs of insert, replace and delete respectively :return: List of edit scripts. Each edit script is represented as a list of operations, where each operation is a dictionary. """ |
op_costs = {'insert': lambda x: edit_costs[0],
'match': lambda x, y: 0 if x == y else edit_costs[1],
'delete': lambda x: edit_costs[2]}
distance, scripts, costs, ops = edit_distance.best_transforms(pron_a, pron_b, op_costs=op_costs)
return [full_edit_script(script.to_primitive()) for script in scripts] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _what_default(self, pronunciation):
"""Provide the default prediction of the what task. This function is used to predict the probability of a given pronunciation being reported for a given token. :param pronunciation: The list or array of confusion probabilities at each index """ |
token_default = self['metadata']['token_default']['what']
index_count = 2*len(pronunciation) + 1
predictions = {}
for i in range(index_count):
index_predictions = {}
if i % 2 == 0:
index_predictions.update(token_default['0'])
else:
presented_phoneme = pronunciation[int((i-1)/2)]
index_predictions[presented_phoneme] = token_default['1']['=']
index_predictions['*'] = token_default['1']['*']
index_predictions[''] = token_default['1']['']
predictions['{}'.format(i)] = index_predictions
return predictions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def where_task(self, token_id, presented_pronunciation, confusion_probability):
"""Provide the prediction of the where task. This function is used to predict the probability of a given pronunciation being reported for a given token. :param token_id: The token for which the prediction is being provided :param confusion_probability: The list or array of confusion probabilities at each index """ |
self['tokens'].setdefault(token_id, {}) \
.setdefault('where', self._where_default(presented_pronunciation))
if confusion_probability is not None:
self['tokens'][token_id]['where'] = list(confusion_probability) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def what_task(self, token_id, presented_pronunciation, index, phonemes, phonemes_probability, warn=True, default=True):
"""Provide the prediction of the what task. This function is used to predict the probability of a given phoneme being reported at a given index for a given token. :param token_id: The token for which the prediction is provided :param index: The index of the token for which the prediction is provided :param phonemes: The phoneme or phoneme sequence for which the prediction is being made (as a space separated string) :param phonemes_probability: The probability of the phoneme or phoneme sequence :param warn: Set to False in order to avoid warnings about 0 or 1 probabilities :param default: Set to False in order to avoid generating the default probabilities """ |
if phonemes_probability is not None and not 0. < phonemes_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to phonemes [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(phonemes_probability,
phonemes,
token_id))
default_preds = self._what_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('what', default_preds)
if index is not None:
self['tokens'][token_id]['what'].setdefault(str(index), {})
if phonemes is not None:
if phonemes_probability is not None and index is not None:
self['tokens'][token_id]['what'][str(index)][phonemes] = phonemes_probability
else:
if index is not None:
if phonemes in default_preds[str(index)]:
self['tokens'][token_id]['what'][str(index)][phonemes] = default_preds[str(index)][phonemes]
else:
self['tokens'][token_id]['what'][str(index)].pop(phonemes)
else:
if str(index) in default_preds:
self['tokens'][token_id]['what'][str(index)] = default_preds[str(index)]
else:
self['tokens'][token_id]['what'].pop(str(index)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def full_task(self, token_id, presented_pronunciation, pronunciation, pronunciation_probability, warn=True, default=True):
"""Provide the prediction of the full task. This function is used to predict the probability of a given pronunciation being reported for a given token. :param token_id: The token for which the prediction is provided :param pronunciation: The pronunciation for which the prediction is being made (as a list of strings or space separated string) :param pronunciation_probability: The probability of the pronunciation for the given token :param warn: Set to False in order to avoid warnings about 0 or 1 probabilities :param default: Set to False in order to avoid generating the default probabilities """ |
if pronunciation_probability is not None and not 0. < pronunciation_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to pronunciation [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(pronunciation_probability,
pronunciation,
token_id))
key = pronunciation
if isinstance(key, list):
if not all([isinstance(phoneme, basestring) for phoneme in key]):
raise ValueError('The pronunciation must be of type string (a sequence of space separated phonemes) '
'or of type list (containing phonemes of type strings).'
'User supplied: {}'.format(key))
key = ' '.join(pronunciation)
default_preds = self._full_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('full', default_preds)
if key is not None:
if pronunciation_probability is not None:
self['tokens'][token_id]['full'][key] = pronunciation_probability
else:
if key in default_preds:
self['tokens'][token_id]['full'][key] = default_preds[key]
else:
self['tokens'][token_id]['full'].pop(key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(fileobj):
"""Load the submission from a file-like object :param fileobj: File-like object :return: the loaded submission """ |
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
token_id, token = json.loads(line)
submission['tokens'][token_id] = token
return submission |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_metadata(fileobj):
"""Load the submission from a file. :param filename: where to load the submission from """ |
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
return json.loads(z.readline()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def submit(self, password=''):
"""Submits the participation to the web site. The passwords is sent as plain text. :return: the evaluation results. """ |
url = '{}/api/submit'.format(BASE_URL)
try:
r = requests.post(url,
data=self.dumps(),
headers={'content-type': 'application/json'},
auth=(self['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, password=''):
"""Evaluates the development set. The passwords is sent as plain text. :return: the evaluation results. """ |
# Make a copy only keeping the development set
dev_submission = self
if self['metadata'].get('evaluation_setting', {}).get('development_set', None):
dev_submission = copy.deepcopy(self)
dev_submission['tokens'] = {token_id: token for token_id, token in self['tokens'].items()
if token_id in self['metadata']['evaluation_setting']['development_set']}
url = '{}/api/evaluate'.format(BASE_URL)
try:
r = requests.post(url,
data=dev_submission.dumps(),
headers={'content-type': 'application/json'},
auth=(dev_submission['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def author_mail_from_git(self):
""" Get the author mail from git information. """ |
try:
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.email"], stdout=PIPE)
stdoutdata = cmd.communicate()
if (stdoutdata[0]):
self.author_mail = stdoutdata[0].rstrip(os.linesep)
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author_mail |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def author_mail_from_system(self):
""" Get the author mail from system information. It is probably often innacurate. """ |
self.author_mail = getpass.getuser() + '@' + socket.gethostname()
return self.author_mail |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def content_negotiation(formats, default_type='text/html'):
""" Provides basic content negotiation and returns a view method based on the best match of content types as indicated in formats. :param formats: dictionary of content types and corresponding methods :param default_type: string the decorated method is the return type for. Example usage:: def rdf_view(request, arg):
return RDF_RESPONSE @content_negotiation({'application/rdf+xml': rdf_view}) def html_view(request, arg):
return HTML_RESPONSE The above example would return the rdf_view on a request type of ``application/rdf+xml`` and the normal view for anything else. Any :class:`django.http.HttpResponse` returned by the view method chosen by content negotiation will have a 'Vary: Accept' HTTP header added. **NOTE:** Some web browsers do content negotiation poorly, requesting ``application/xml`` when what they really want is ``application/xhtml+xml`` or ``text/html``. When this type of Accept request is detected, the default type will be returned rather than the best match that would be determined by parsing the Accept string properly (since in some cases the best match is ``application/xml``, which could return non-html content inappropriate for display in a web browser). """ |
def _decorator(view_method):
@wraps(view_method)
def _wrapped(request, *args, **kwargs):
# Changed this to be a value passed as a method argument defaulting
# to text/html instead so it's more flexible.
# default_type = 'text/html' # If not specificied assume HTML request.
# Add text/html for the original method if not already included.
if default_type not in formats:
formats[default_type] = view_method
try:
req_type = request.META['HTTP_ACCEPT']
# If this request is coming from a browser like that, just
# give them our default type instead of honoring the actual best match
# (see note above for more detail)
if '*/*' in req_type:
req_type = default_type
except KeyError:
req_type = default_type
# Get the best match for the content type requested.
content_type = mimeparse.best_match(formats.keys(),
req_type)
# Return the view matching content type or the original view
# if no match.
if not content_type or content_type not in formats:
response = view_method(request, *args, **kwargs)
else:
response = formats[content_type](request, *args, **kwargs)
# set a Vary header to indicate content may vary based on Accept header
if isinstance(response, HttpResponse): # views should return HttpResponse objects, but check to be sure
# note: using the same utility method used by django's vary_on_headers decorator
patch_vary_headers(response, ['Accept'])
return response
return _wrapped
return _decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def animation(frame_function: types.FrameFunction) -> types.Animation: """Turn a FrameFunction into an Animation. Args: frame_function: A function that returns a FrameGenerator. Returns: an Animation decorator function. """ |
animation_ = core.Animation(frame_function)
@functools.wraps(frame_function)
def wrapper(*args, **kwargs):
return animation_(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def multiline_frame_function(frame_function: types.FrameFunction, height: int, offset: int = 0, *args, **kwargs) -> types.FrameGenerator: """Multiline a singlelined frame function. Simply chains several frame generators together, and applies the specified offset to each one. Args: frame_function: A function that returns a singleline FrameGenerator. height: The amount of frame generators to stack vertically (determines the height in characters). offset: An offset to apply to each successive generator. If the offset is 2, then the first generator starts at frame 0, the second at frame 2, the third at frame 4, and so on. Returns: a multiline version fo the generator returned by frame_function """ |
frame_generators = []
for i in range(height):
frame_generators.append(frame_function(*args, **kwargs))
for _ in range(i * offset): # advance animation
frame_generators[i].__next__()
frame_gen = concatechain(*frame_generators, separator='\n')
yield from frame_gen |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def region_by_identifier(self, identifier):
"""Return region of interest corresponding to the supplied identifier. :param identifier: integer corresponding to the segment of interest :returns: `jicbioimage.core.region.Region` """ |
if identifier < 0:
raise(ValueError("Identifier must be a positive integer."))
if not np.equal(np.mod(identifier, 1), 0):
raise(ValueError("Identifier must be a positive integer."))
if identifier == 0:
raise(ValueError("0 represents the background."))
return Region.select_from_array(self, identifier) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_regions(self, id1, id2):
"""Merge two regions into one. The merged region will take on the id1 identifier. :param id1: region 1 identifier :param id2: region 2 identifier """ |
region2 = self.region_by_identifier(id2)
self[region2] = id1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def task2ics():
"""Command line tool to convert from Taskwarrior to iCalendar""" |
from argparse import ArgumentParser, FileType
from sys import stdout
parser = ArgumentParser(description='Converter from Taskwarrior to iCalendar syntax.')
parser.add_argument('indir', nargs='?', help='Input Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
task = IcsTask(args.indir)
args.outfile.write(task.to_vobject().serialize()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ics2task():
"""Command line tool to convert from iCalendar to Taskwarrior""" |
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description='Converter from iCalendar to Taskwarrior syntax.')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input iCalendar file (default: stdin)')
parser.add_argument('outdir', nargs='?', help='Output Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
args = parser.parse_args()
vobject = readOne(args.infile.read())
task = IcsTask(args.outdir)
for todo in vobject.vtodo_list:
task.to_task(todo) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update(self):
"""Reload Taskwarrior files if the mtime is newer""" |
update = False
with self._lock:
for fname in ['pending.data', 'completed.data']:
data_file = join(self._data_location, fname)
if exists(data_file):
mtime = getmtime(data_file)
if mtime > self._mtime:
self._mtime = mtime
update = True
if update:
self._tasks = {}
tasklist = loads(run(['task', 'rc.verbose=nothing', 'rc.hooks=off', 'rc.data.location={self._data_location}'.format(**locals()), 'export'], stdout=PIPE).stdout.decode('utf-8'))
for task in tasklist:
project = task['project'] if 'project' in task else 'unaffiliated'
if project not in self._tasks:
self._tasks[project] = {}
self._tasks[project][task['uuid']] = task |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Taskwarrior entries in uids filename -- the Taskwarrior project uids -- the UIDs of the Taskwarrior tasks (all if None) """ |
self._update()
if not uids:
uids = self.get_uids(filename)
project = basename(filename)
items = []
for uid in uids:
vtodos = iCalendar()
uuid = uid.split('@')[0]
self._gen_vtodo(self._tasks[project][uuid], vtodos.add('vtodo'))
items.append((uid, vtodos, '"%s"' % self._tasks[project][uuid]['modified']))
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_vobject(self, project=None, uid=None):
"""Return vObject object of Taskwarrior tasks If filename and UID are specified, the vObject only contains that task. If only a filename is specified, the vObject contains all events in the project. Otherwise the vObject contains all all objects of all files associated with the IcsTask object. project -- the Taskwarrior project uid -- the UID of the task """ |
self._update()
vtodos = iCalendar()
if uid:
uid = uid.split('@')[0]
if not project:
for p in self._tasks:
if uid in self._tasks[p]:
project = p
break
self._gen_vtodo(self._tasks[basename(project)][uid], vtodos.add('vtodo'))
elif project:
for task in self._tasks[basename(project)].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
else:
for project in self._tasks:
for task in self._tasks[project].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
return vtodos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filesnames(self):
"""Return a list of all Taskwarrior projects as virtual files in the data directory""" |
self._update()
projects = set(list(self._tasks.keys()) + self._task_projects + ['all_projects', 'unaffiliated'])
return [join(self._data_location, p.split()[0]) for p in projects] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_vobject(self, uuid, from_project, to_project):
"""Update the project of the task with the UID uuid""" |
if to_project not in self.get_filesnames():
return
uuid = uuid.split('@')[0]
with self._lock:
run(['task', 'rc.verbose=nothing', 'rc.data.location={self._data_location}'.format(**locals()), 'rc.confirmation=no', uuid, 'modify', 'project:{}'.format(basename(to_project))]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(self, obj):
"""This function merge another object's values with this instance :param obj: An object to be merged with into this layer :type obj: object """ |
for attribute in dir(obj):
if '__' in attribute:
continue
setattr(self, attribute, getattr(obj, attribute)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attributes(self):
"""This function through the layers from top to bottom, and creates a list of all the attributes found :returns: A list of all the attributes names :rtype: list """ |
attributes = []
for i in reversed(xrange(len(self.layers))):
obj = self.layers[i]
stack_attributes = [attribute for attribute in obj.__dict__.keys()
if not attribute.startswith('__') and
not attribute.endswith('__')]
attributes = attributes + stack_attributes
return list(set(attributes)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_layer_from_env(self):
"""This function creates a new layer, gets a list of all the current attributes, and attempts to find matching environment variables with the prefix of FJS\_. If matches are found it sets those attributes in the new layer. """ |
self.add_layer()
for attribute in self.get_attributes():
env_attribute = os.environ.get('FJS_{}'.format(attribute))
if env_attribute:
setattr(self, attribute, env_attribute) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contrast(colour1, colour2):
r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour):
a colour colour2 (colourettu.Colour):
a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon 1.0 20.999999999999996 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon 3.0 4.5 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect. """ |
colour_for_type = Colour()
if type(colour1) is type(colour_for_type):
mycolour1 = colour1
else:
try:
mycolour1 = Colour(colour1)
except:
raise TypeError("colour1 must be a colourettu.colour")
if type(colour2) is type(colour_for_type):
mycolour2 = colour2
else:
try:
mycolour2 = Colour(colour2)
except:
raise TypeError("colour2 must be a colourettu.colour")
lum1 = mycolour1.luminance()
lum2 = mycolour2.luminance()
minlum = min(lum1, lum2)
maxlum = max(lum1, lum2)
return (maxlum + 0.05) / (minlum + 0.05) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hex(self):
""" Returns the HTML-style hex code for the Colour. Returns: str: the colour as a HTML-sytle hex string """ |
return "#{:02x}{:02x}{:02x}".format(self._r, self._g, self._b).upper() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalized_rgb(self):
r""" Returns a tuples of the normalized values of the red, green, and blue channels of the Colour. Returns: tuple: the rgb values of the colour (with values normalized between 0.0 and 1.0) .. note:: Uses the formula: \\[ r_{norm} = \\begin{cases} \\frac{r_{255}}{12.92}\\ \\qquad &\\text{if $r_{255}$ $\\le$ 0.03928} \\\\ \\left(\\frac{r_{255} + 0.055}{1.055}\\right)^{2.4} \\quad &\\text{otherwise} \\end{cases} \\] `Source <http://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef>`_ """ |
r1 = self._r / 255
g1 = self._g / 255
b1 = self._b / 255
if r1 <= 0.03928:
r2 = r1 / 12.92
else:
r2 = math.pow(((r1 + 0.055) / 1.055), 2.4)
if g1 <= 0.03928:
g2 = g1 / 12.92
else:
g2 = math.pow(((g1 + 0.055) / 1.055), 2.4)
if b1 <= 0.03928:
b2 = b1 / 12.92
else:
b2 = math.pow(((b1 + 0.055) / 1.055), 2.4)
return (r2, g2, b2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_tree(src, dst):
"""Copy directory tree""" |
for root, subdirs, files in os.walk(src):
current_dest = root.replace(src, dst)
if not os.path.exists(current_dest):
os.makedirs(current_dest)
for f in files:
shutil.copy(os.path.join(root, f), os.path.join(current_dest, f)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def conda_info(prefix):
"""returns conda infos""" |
cmd = [join(prefix, 'bin', 'conda')]
cmd.extend(['info', '--json'])
output = check_output(cmd)
return yaml.load(output) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install(self, update=False):
""" install conda packages """ |
offline = self.offline or update
self.create_env(offline)
self.install_pkgs(offline)
self.install_pip(offline)
return tuple() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_fields(self, **fields):
""" Set many fields using the proxy setter for each of them. """ |
for field_name, value in iteritems(fields):
field = getattr(self, field_name)
field.proxy_set(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_queue(cls, name, priority=0, **fields_if_new):
""" Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue. """ |
queue_kwargs = {'name': name, 'priority': priority}
retries = 0
while retries < 10:
retries += 1
try:
queue, created = cls.get_or_connect(**queue_kwargs)
except IndexError:
# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
# => retry
continue
except ValueError:
# more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?)
try:
queue = cls.collection(**queue_kwargs).instances()[0]
except IndexError:
# but no more now ?!
# => retry
continue
else:
created = False
# ok we have our queue, stop now
break
if created and fields_if_new:
queue.set_fields(**fields_if_new)
return queue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_waiting_keys(cls, names):
""" Return a list of all queue waiting keys, to use with blpop """ |
return [queue.waiting.key for queue in cls.get_all_by_priority(names)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count_waiting_jobs(cls, names):
""" Return the number of all jobs waiting in queues with the given names """ |
return sum([queue.waiting.llen() for queue in cls.get_all(names)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count_delayed_jobs(cls, names):
""" Return the number of all delayed jobs in queues with the given names """ |
return sum([queue.delayed.zcard() for queue in cls.get_all(names)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def requeue_delayed_jobs(self):
""" Put all delayed jobs that are now ready, back in the queue waiting list Return a list of failures """ |
lock_key = self.make_key(
self._name,
self.pk.get(),
"requeue_all_delayed_ready_jobs",
)
connection = self.get_connection()
if connection.exists(lock_key):
# if locked, a worker is already on it, don't wait and exit
return []
with Lock(connection, lock_key, timeout=60):
# stop here if we know we have nothing
first_delayed_time = self.first_delayed_time
if not first_delayed_time:
return []
# get when we are :)
now_timestamp = datetime_to_score(datetime.utcnow())
# the first job will be ready later, and so the other ones too, then
# abort
if float(first_delayed_time) > now_timestamp:
return []
failures = []
while True:
# get the first entry
first_entry = self.first_delayed
# no first entry, another worker took all from us !
if not first_entry:
break
# split into vars for readability
job_ident, delayed_until = first_entry
# if the date of the job is in the future, another work took the
# job we wanted, so we let this job here and stop the loop as we
# know (its a zset sorted by date) that no other jobs are ready
if delayed_until > now_timestamp:
break
# remove the entry we just got from the delayed ones
self.delayed.zrem(job_ident)
# and add it to the waiting queue
try:
job = Job.get_from_ident(job_ident)
if job.status.hget() == STATUSES.DELAYED:
job.status.hset(STATUSES.WAITING)
self.enqueue_job(job)
except Exception as e:
failures.append((job_ident, '%s' % e))
return failures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_from_ident(self, ident):
""" Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident """ |
model_repr, job_pk = ident.split(':', 1)
klass = import_class(model_repr)
return klass.get(job_pk) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_queue_name(cls, queue_name=None):
""" Return the given queue_name if defined, else the class's one. If both are None, raise an Exception """ |
if queue_name is None and cls.queue_name is None:
raise LimpydJobsException("Queue's name not defined")
if queue_name is None:
return cls.queue_name
return queue_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def duration(self):
""" If the start and end times of the job are defined, return a timedelta, else return None """ |
try:
start, end = self.hmget('start', 'end')
return parse(end) - parse(start)
except:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def requeue(self, queue_name=None, priority=None, delayed_for=None, delayed_until=None, queue_model=None):
""" Requeue the job in the given queue if it has previously failed """ |
queue_name = self._get_queue_name(queue_name)
# we can only requeue a job that raised an error
if self.status.hget() != STATUSES.ERROR:
raise LimpydJobsException('Job cannot be requeued if not in ERROR status')
self.hdel('start', 'end')
if priority is None:
priority = self.priority.hget()
delayed_until = compute_delayed_until(delayed_for, delayed_until)
self.enqueue_or_delay(queue_name, priority, delayed_until, queue_model=queue_model) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enqueue_or_delay(self, queue_name=None, priority=None, delayed_until=None, prepend=False, queue_model=None):
""" Will enqueue or delay the job depending of the delayed_until. """ |
queue_name = self._get_queue_name(queue_name)
fields = {'queued': '1'}
if priority is not None:
fields['priority'] = priority
else:
priority = self.priority.hget()
in_the_future = delayed_until and delayed_until > datetime.utcnow()
if in_the_future:
fields['delayed_until'] = str(delayed_until)
fields['status'] = STATUSES.DELAYED
else:
self.delayed_until.delete()
fields['status'] = STATUSES.WAITING
self.hmset(**fields)
if queue_model is None:
queue_model = self.queue_model
queue = queue_model.get_queue(queue_name, priority)
if in_the_future:
queue.delay_job(self, delayed_until)
else:
queue.enqueue_job(self, prepend) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collection_for_job(cls, job):
""" Helper to return a collection of errors for the given job """ |
return cls.collection(job_model_repr=job.get_model_repr(), identifier=getattr(job, '_cached_identifier', job.identifier.hget())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_batch(self, data):
""" Store multiple documents Args data <dict> data to store, use document ids as keys Returns revs <dict> dictionary of new revisions indexed by document ids """ |
# fetch existing documents to get current revisions
rows = self.bucket.view("_all_docs", keys=data.keys(), include_docs=True)
existing = {}
for row in rows:
key = row.id
if key and not data[key].has_key("_rev"):
data[key]["_rev"] = row.doc["_rev"]
for id,item in data.items():
data[id]["_id"] = id
revs = {}
for success, docid, rev_or_exc in self.bucket.update(data.values()):
if not success and self.logger:
self.logger.error("Document update conflict (batch) '%s', %s" % (docid, rev_or_exc))
elif success:
revs[docid] = rev_or_exc
return revs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_design(self, design_name):
""" Returns dict representation of the design document with the matching name design_name <str> name of the design """ |
try:
r = requests.request(
"GET",
"%s/%s/_design/%s" % (
self.host,
self.database_name,
design_name
),
auth=self.auth
)
return self.result(r.text)
except:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def del_design(self, design_name):
""" Removes the specified design design_name <str> """ |
try:
design = self.get_design(design_name)
r = requests.request(
"DELETE",
"%s/%s/_design/%s" % (
self.host,
self.database_name,
design_name
),
params={"rev" : design.get("_rev")},
auth=self.auth
)
return self.result(r.text)
except:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put_design(self, design_name, design, verbose=False):
""" Updates a design document for the loaded databases design_name <str> name of the design design <str> json string of the design document """ |
try:
try:
# check if there is a previous revision of the
# specified design, if there is get the _rev
# id from it and apply it to the new version
existing = self.get_design(design_name)
design = json.loads(design)
if design.get("version") and existing.get("version") == design.get("version"):
if verbose:
print "No change in design... skipping update!"
return
design["_rev"] = existing["_rev"]
design = json.dumps(design)
except RESTException:
pass
r = requests.request(
"PUT",
"%s/%s/_design/%s" % (
self.host,
self.database_name,
design_name
),
auth=self.auth,
data=design,
headers={"content-type" : "application/json"}
)
return self.result(r.text)
except:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def result(self, couchdb_response_text):
""" Return whether a REST couchdb operation was successful or not. On error will raise a RESTException """ |
result = json.loads(couchdb_response_text)
if result.get("ok"):
return True
elif result.get("error"):
raise RESTException(
"%s: %s" % (result.get("error"), result.get("reason"))
)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_child_path(path):
"""Return the default child of the parent path ,if it exists, else path. As an example, if the path `parent` show show the page `parent/child` by default, this method will return `parent/child` given `parent`. If `parent/child` should show `parent/child/grandchild` by default, this method will return `parent/child/grandchild` given `parent`. If no default child path exists, then `path` is returned. Keyword arguments: path -- The parent path to resolve in to its deepest default child path. """ |
try:
# Recurse until we find a path with no default child
child_path = default_child_path(
current_app.config['DEFAULT_CHILDREN'][path]
)
except KeyError:
child_path = path
return child_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def class_parameters(decorator):
""" To wrap all class methods with static_parameters decorator """ |
def decorate(the_class):
if not isclass(the_class):
raise TypeError(
'class_parameters(the_class=%s) you must pass a class' % (
the_class
)
)
for attr in the_class.__dict__:
if callable(
getattr(
the_class, attr)):
setattr(
the_class,
attr,
decorator(
getattr(the_class, attr)))
return the_class
return decorate |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dir_backup():
""" retrieves directory backup """ |
args = parser.parse_args()
s3_get_dir_backup(
args.aws_access_key_id,
args.aws_secret_access_key,
args.bucket_name,
args.s3_folder,
args.zip_backups_dir, args.project) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lazy_send(chainlet, chunks):
""" Canonical version of `chainlet_send` that always takes and returns an iterable :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ |
fork, join = chainlet.chain_fork, chainlet.chain_join
if fork and join:
return _send_n_get_m(chainlet, chunks)
elif fork:
return _lazy_send_1_get_m(chainlet, chunks)
elif join:
return _lazy_send_n_get_1(chainlet, chunks)
else:
return _lazy_send_1_get_1(chainlet, chunks) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eager_send(chainlet, chunks):
""" Eager version of `lazy_send` evaluating the return value immediately :note: The return value by an ``n`` to ``m`` link is considered fully evaluated. :param chainlet: the chainlet to receive and return data :type chainlet: chainlink.ChainLink :param chunks: the stream slice of data to pass to ``chainlet`` :type chunks: iterable :return: the resulting stream slice of data returned by ``chainlet`` :rtype: iterable """ |
fork, join = chainlet.chain_fork, chainlet.chain_join
if fork and join:
return _send_n_get_m(chainlet, chunks)
elif fork:
return tuple(_lazy_send_1_get_m(chainlet, chunks))
elif join:
return tuple(_lazy_send_n_get_1(chainlet, chunks))
else:
return tuple(_lazy_send_1_get_1(chainlet, chunks)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(fname):
""" Return content of specified file """ |
path = os.path.join(SCRIPTDIR, fname)
if PY3:
f = open(path, 'r', encoding='utf8')
else:
f = open(path, 'r')
content = f.read()
f.close()
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_function(self):
""" Return function object for my function. raise ProcessorConfigurationError when function could not be resolved. """ |
if not hasattr(self, '_function'):
try:
modname, funcname = self.function.rsplit('.', 1)
mod = import_module(modname)
self._function = getattr(mod, funcname)
except (ImportError, AttributeError, ValueError), err:
raise ProcessorConfigurationError(err)
return self._function |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_file(self, path, dryrun):
""" Print files path. """ |
# if dryrun just return files
if dryrun:
return path
# scan file and match lines
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
# if found matches return list of lines, else return None
return ret if len(ret) > 0 else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def concatenate(input_files, output_file):
""" Concatenates the input files into the single output file. In debug mode this function adds a comment with the filename before the contents of each file. """ |
from .modules import utils, concat
if not isinstance(input_files, (list, tuple)):
raise RuntimeError('Concatenate takes a list of input files.')
return {
'dependencies_fn': utils.no_dependencies,
'compiler_fn': concat.concatenate_input_files,
'input': input_files,
'output': output_file,
'kwargs': {},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_files(src_dir, dst_dir, filespec='*', recursive=False):
""" Copies any files matching filespec from src_dir into dst_dir. If `recursive` is `True`, also copies any matching directories. """ |
import os
from .modules import copyfiles
if src_dir == dst_dir:
raise RuntimeError('copy_files() src and dst directories must be different.')
if not os.path.isdir(src_dir):
raise RuntimeError('copy_files() src directory "{}" does not exist.'.format(src_dir))
return {
'dependencies_fn': copyfiles.list_files,
'compiler_fn': copyfiles.copy_files,
'input': src_dir,
'output': dst_dir,
'kwargs': {
'filespec': filespec,
'recursive': recursive,
},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def minify_js(input_files, output_file):
""" Minifies the input javascript files to the output file. Output file may be same as input to minify in place. In debug mode this function just concatenates the files without minifying. """ |
from .modules import minify, utils
if not isinstance(input_files, (list, tuple)):
raise RuntimeError('JS minifier takes a list of input files.')
return {
'dependencies_fn': utils.no_dependencies,
'compiler_fn': minify.minify_js,
'input': input_files,
'output': output_file,
'kwargs': {},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_css_for_ie_selector_limit(input_file, output_file):
""" Splits a large CSS file into several smaller files, each one containing less than the IE 4096 selector limit. """ |
from .modules import bless, utils
if not isinstance(input_file, str):
raise RuntimeError('CSS splitter takes only a single input file.')
return {
'dependencies_fn': utils.no_dependencies,
'compiler_fn': bless.bless_css,
'input': input_file,
'output': output_file,
'kwargs': {},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_less(input_file, output_file):
""" Compile a LESS source file. Minifies the output in release mode. """ |
from .modules import less
if not isinstance(input_file, str):
raise RuntimeError('LESS compiler takes only a single input file.')
return {
'dependencies_fn': less.less_dependencies,
'compiler_fn': less.less_compile,
'input': input_file,
'output': output_file,
'kwargs': {},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_sass(input_file, output_file):
""" Compile a SASS source file. Minifies the output in release mode. """ |
from .modules import sass
if not isinstance(input_file, str):
raise RuntimeError('SASS compiler takes only a single input file.')
return {
'dependencies_fn': sass.sass_dependencies,
'compiler_fn': sass.sass_compile,
'input': input_file,
'output': output_file,
'kwargs': {},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browserify_node_modules(module_name_list, output_file, babelify=False):
""" Browserify a list of libraries from node_modules into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. Note you may also specify the relative path to the module as ``./path/to/module`` or ``./path/to/module/file.js``. """ |
from .modules import browserify
if not isinstance(module_name_list, (list, tuple)):
raise RuntimeError('Browserify Node Modules compiler takes a list of node module names as input.')
return {
'dependencies_fn': browserify.browserify_deps_node_modules,
'compiler_fn': browserify.browserify_compile_node_modules,
'input': module_name_list,
'output': output_file,
'kwargs': {
'babelify': babelify,
},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browserify_libs(lib_dirs, output_file, babelify=False):
""" Browserify one or more library directories into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. The final directory name in each of lib_dirs is the library name for importing. Eg.:: lib_dirs = ['cordova_libs/jskit'] var MyClass = require('jskit/MyClass'); """ |
from .modules import browserify
if not isinstance(lib_dirs, (list, tuple)):
raise RuntimeError('Browserify Libs compiler takes a list of library directories as input.')
return {
'dependencies_fn': browserify.browserify_deps_libs,
'compiler_fn': browserify.browserify_compile_libs,
'input': lib_dirs,
'output': output_file,
'kwargs': {
'babelify': babelify,
},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browserify_file(entry_point, output_file, babelify=False, export_as=None):
""" Browserify a single javascript entry point plus non-external dependencies into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. By default, it is not possible to ``require()`` any exports from the entry point or included files. If ``export_as`` is specified, any module exports in the specified entry point are exposed for ``require()`` with the name specified by ``export_as``. """ |
from .modules import browserify
if not isinstance(entry_point, str):
raise RuntimeError('Browserify File compiler takes a single entry point as input.')
return {
'dependencies_fn': browserify.browserify_deps_file,
'compiler_fn': browserify.browserify_compile_file,
'input': entry_point,
'output': output_file,
'kwargs': {
'babelify': babelify,
'export_as': export_as,
},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def custom_function(func, input_files, output_file):
""" Calls a custom function which must create the output file. The custom function takes 3 parameters: ``input_files``, ``output_file`` and a boolean ``release``. """ |
from .modules import utils
return {
'dependencies_fn': utils.no_dependencies,
'compiler_fn': func,
'input': input_files,
'output': output_file,
'kwargs': {},
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_lxc(name, template='ubuntu', service=None):
"""Factory method for the generic LXC""" |
service = service or LXCService
service.create(name, template=template)
meta = LXCMeta(initial=dict(type='LXC'))
lxc = LXC.with_meta(name, service, meta, save=True)
return lxc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_lxc_with_overlays(name, base, overlays, overlay_temp_path=None, service=None):
"""Creates an LXC using overlays. This is a fast process in comparison to LXC.create because it does not involve any real copying of data. """ |
service = service or LXCService
# Check that overlays has content
if not overlays:
raise TypeError("Argument 'overlays' must have at least one item")
# Get the system's LXC path
lxc_path = service.lxc_path()
# Calculate base LXC's path
base_path = os.path.join(lxc_path, base)
# Calculate the new LXC's path
new_path = os.path.join(lxc_path, name)
# Create the new directory if it doesn't exist
if not os.path.exists(new_path):
os.mkdir(new_path)
overlay_group = OverlayGroup.create(new_path, base_path, overlays)
initial_meta = dict(type='LXCWithOverlays',
overlay_group=overlay_group.meta())
meta = LXCMeta(initial=initial_meta)
return LXCWithOverlays.with_meta(name, service, meta, overlay_group,
save=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Start this LXC""" |
if self.status == 'RUNNING':
raise LXCAlreadyStarted(self.name)
self._service.start(self.name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy(self, force=False):
"""UnmanagedLXC Destructor. It requires force to be true in order to work. Otherwise it throws an error. """ |
if force:
super(UnmanagedLXC, self).destroy()
else:
raise UnmanagedLXCError('Destroying an unmanaged LXC might not '
'work. To continue please call this method with force=True') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(self):
"""Get's all of the LXC's and creates objects for them""" |
service = self._service
lxc_names = service.list_names()
lxc_list = []
for name in lxc_names:
lxc = self.get(name)
lxc_list.append(lxc)
return lxc_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, name):
"""Retrieves a single LXC by name""" |
lxc_meta_path = self._service.lxc_path(name,
constants.LXC_META_FILENAME)
meta = LXCMeta.load_from_file(lxc_meta_path)
lxc = self._loader.load(name, meta)
return lxc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init(req, model):
# pylint: disable=unused-argument """ Determine the pagination preference by query parameter Numbers only, >=0, & each query param may only be specified once. :return: Paginator object """ |
limit = req.get_param('page[limit]') or goldman.config.PAGE_LIMIT
offset = req.get_param('page[offset]') or 0
try:
return Paginator(limit, offset)
except ValueError:
raise InvalidQueryParams(**{
'detail': 'The page[\'limit\'] & page[\'offset\'] query '
'params may only be specified once each & must '
'both be an integer >= 0.',
'links': 'jsonapi.org/format/#fetching-pagination',
'parameter': 'page',
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def first(self):
""" Generate query parameters for the first page """ |
if self.total and self.limit < self.total:
return {'page[offset]': 0, 'page[limit]': self.limit}
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def last(self):
""" Generate query parameters for the last page """ |
if self.limit > self.total:
return None
elif self.offset >= self.total:
return None
else:
offset = (self.total / self.limit) * self.limit
return {'page[offset]': offset, 'page[limit]': self.limit} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prev(self):
""" Generate query parameters for the prev page """ |
if self.total:
if self.offset - self.limit - self.limit < 0:
return self.first
else:
offset = self.offset - self.limit
return {'page[offset]': offset, 'page[limit]': self.limit}
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cast_page(val):
""" Convert the page limit & offset into int's & type check """ |
try:
val = int(val)
if val < 0:
raise ValueError
return val
except (TypeError, ValueError):
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" Convert the Paginator into a dict """ |
return {
'current': self.current,
'first': self.first,
'last': self.last,
'next': self.more,
'prev': self.prev,
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_zipped_file(url, encoding_error_opt='ignore'):
"""Download and unzip the report file at the given URL. Downloads and unzips the CO-TRACER archive at the given URL. This is not intended for data outside of the CO-TRACER official site and it will automatically extract the first file found in the downloaded zip archive as the CO-TRACER website produces single file archives. Note that the contents of that file are loaded directly into memory. @param url: The URL to download the archive from. @type url: str @return: The contents of the first file found in the provided archive. @rtype: str """ |
remotezip = urllib2.urlopen(url)
raw_contents = cStringIO.StringIO(remotezip.read())
target_zip = zipfile.ZipFile(raw_contents)
first_filename = target_zip.namelist()[0]
return unicode(target_zip.read(first_filename), errors=encoding_error_opt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_report_raw(year, report_type):
"""Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader """ |
if not is_valid_report_type(report_type):
msg = '%s is not a valid report type.' % report_type
raise ValueError(msg)
url = get_url(year, report_type)
raw_contents = get_zipped_file(url)
return csv.DictReader(cStringIO.StringIO(raw_contents)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_report_interpreted(year, report_type):
"""Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict """ |
if not is_valid_report_type(report_type):
msg = '%s is not a valid report type.' % report_type
raise ValueError(msg)
raw_report = get_report_raw(year, report_type)
interpreter = REPORT_TYPE_INTERPRETERS[report_type]
return interpreter(raw_report) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_report(year, report_type=None):
"""Download, extract, and interpret a CO-TRACER report or reports. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. If no report type is provided, this process is repeated for all available report types for the given year. @param year: The year to retrieve a report or reports for. @type year: int @keyword report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. If None, all reports for the given year will be downloaded. Defaults to None. @type report_type: str or None @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. If report_type was None, this will be a dictionary where the keys are the report types as listed in constants.REPORT_TYPES and the values are Iterable over dict. Each dict rendered by that iterable will have one report entry. @rtype: dict or Iterable over dict """ |
if report_type == None:
report_types = constants.REPORT_TYPES
report_sections = [
get_report_interpreted(year, report) for report in report_types
]
return dict(zip(constants.REPORT_TYPES, report_sections))
else:
return get_report_interpreted(year, report_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evalPatternInArray(pattern, arr):
""" returns similarity parameter of given pattern to be repeated in given array the index is scalled between 0-1 with 0 = identical and val>1 = different 0 0.09090909090909088 162.2057359307358 """ |
l = len(pattern)
ll = len(arr)
# print l, ll
mx_additions = 3
sim = 0
i = 0
j = 0
c = 0
p = pattern[j]
v = arr[i]
while True:
# relative difference:
if p == v:
d = 0
elif v + p == 0:
d = v
else:
d = (p - v) / (v + p)
# print d
if abs(d) < 0.15:
c = mx_additions
j += 1
i += 1
if j == l:
j = 0
if i == ll:
# print sim, v, p,a
return sim
p = pattern[j]
v = arr[i]
elif d < 0:
# surplus line
c += 1
j += 1
if j == l:
j = 0
p += pattern[j]
sim += abs(d)
else:
# line missing
c += 1
i += 1
if i == ll:
return sim
v += arr[i]
sim += abs(d)
if c == mx_additions:
sim += abs(d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def version():
""" Returns a human-readable version string. For official releases, it will follow a semver style (e.g. ``1.2.7``). For dev versions, it will have the semver style first, followed by hyphenated qualifiers (e.g. ``1.2.7-dev``). Returns a string. """ |
short = '.'.join([str(bit) for bit in __version__[:3]])
return '-'.join([short] + [str(bit) for bit in __version__[3:]]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup(self, puller: bool=None, subscriptions: Dict[str, Any]={}):
"""Sets up this Node with the specified Interfaces before it is run. Args: puller: Indication if a Puller Interface should be created. subscriptions: Collection of the Subscriber Interfaces to be created and their Slots. """ |
if puller:
puller = self._zmq.socket(zmq.PULL)
ip, port, host = self.rslv('rcv')
puller.bind('tcp://{}:{}'.format(host or ip, port))
self.poll(puller)
if subscriptions:
for publisher in subscriptions: # type: str
self.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length'))
logger.info('Listening to %s', {
k: (1 if subscriptions[k].get('slots') is None else len(subscriptions[k].get('slots')))
for k in subscriptions
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getFileCategory(self, fileInfo):
"""Function to get the file category for file info""" |
category = fileInfo[fileInfo.find("Category"):]
category = category[category.find("<FONT ") + 47:]
category = category[category.find('">') + 2:]
category = category[:category.find("</A></B>") - 0]
return category |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getFileAuthor(self, fileInfo):
"""Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here""" |
author = fileInfo[fileInfo.find("Author"):]
author = author[author.find("<FONT ") + 47:]
author = author[author.find('<B>') + 3:]
authormail = author[author.find("mailto:") + 7:]
authormail = authormail[:authormail.find('"')]
author = author[:author.find("</B></A>") - 0]
author = author + " (" + authormail + ")"
return author |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getNumDownloads(self, fileInfo):
"""Function to get the number of times a file has been downloaded""" |
downloads = fileInfo[fileInfo.find("FILE INFORMATION"):]
if -1 != fileInfo.find("not included in ranking"):
return "0"
downloads = downloads[:downloads.find(".<BR>")]
downloads = downloads[downloads.find("</A> with ") + len("</A> with "):]
return downloads |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def napoleon_to_sphinx(docstring, **config_params):
""" Convert napoleon docstring to plain sphinx string. Args: docstring (str):
Docstring in napoleon format. **config_params (dict):
Whatever napoleon doc configuration you want. Returns: str: Sphinx string. """ |
if "napoleon_use_param" not in config_params:
config_params["napoleon_use_param"] = False
if "napoleon_use_rtype" not in config_params:
config_params["napoleon_use_rtype"] = False
config = Config(**config_params)
return str(GoogleDocstring(docstring, config)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(cls, row, reader):
""" Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors. """ |
cls._parse_keys(row, reader.line_num)
cls._parse_relationships(row, reader.line_num) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.