text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_command_orig(cmd):
""" No idea how th f to get this to work """ |
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
os.killpg(os.getpgid(pro.pid), signal.SIGTERM)
else:
raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (process.returncode, cmd, stdout + stderr))
return stdout |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def page_count(self):
"""
Get count of total pages
""" |
postcount = self.post_set.count()
max_pages = (postcount / get_paginate_by())
if postcount % get_paginate_by() != 0:
max_pages += 1
return max_pages |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_image(self):
"""
Gets first image from post set.
""" |
posts_with_images = self.post_set.filter(image__gt='')
if posts_with_images:
return posts_with_images[0].image |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_url(self):
"""
Determine which page this post lives on within the topic
and return link to anchor within that page
""" |
topic = self.topic
topic_page = topic.post_set.filter(id__lt=self.id).count() / get_paginate_by() + 1
return "{0}page{1}/#post-{2}".format(topic.get_short_url(), topic_page, self.id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""Return a column one by one :raises: StopIteration """ |
if self._cur_col >= len(self._rec):
self._cur_col = 0
raise StopIteration
col = self._rec[self._cur_col]
self._cur_col += 1
return col |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_summaryRecordSysNumber(summaryRecordSysNumber):
""" Try to parse vague, not likely machine-readable description and return first token, which contains enough numbers in it. """ |
def number_of_digits(token):
digits = filter(lambda x: x.isdigit(), token)
return len(digits)
tokens = map(
lambda x: remove_hairs(x, r" .,:;<>(){}[]\/"),
summaryRecordSysNumber.split()
)
# pick only tokens that contains 3 digits
contains_digits = filter(lambda x: number_of_digits(x) > 3, tokens)
if not contains_digits:
return ""
return contains_digits[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def commandify(use_argcomplete=False, exit=True, *args, **kwargs):
'''Turns decorated functions into command line args
Finds the main_command and all commands and generates command line args
from these.'''
parser = CommandifyArgumentParser(*args, **kwargs)
parser.setup_arguments()
if use_argcomplete:
try:
import argcomplete
except ImportError:
print('argcomplete not installed, please install it.')
parser.exit(status=2)
# Must happen between setup_arguments() and parse_args().
argcomplete.autocomplete(parser)
args = parser.parse_args()
if exit:
parser.dispatch_commands()
parser.exit(0)
else:
return parser.dispatch_commands() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_command_args(self, command, args):
'''Work out the command arguments for a given command'''
command_args = {}
command_argument_names =\
command.__code__.co_varnames[:command.__code__.co_argcount]
for varname in command_argument_names:
if varname == 'args':
command_args['args'] = args
elif varname in self.provide_args:
command_args[varname] = self.provide_args[varname]
else:
command_args[varname] = getattr(args, varname)
return command_args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def types(*args, **kwargs):
"""Quick alias for the Types Annotation with only args and kwargs parameters. :param tuple args: may contain rtype. :param dict kwargs: may contain ptypes. """ |
rtype = first(args)
return Types(rtype=rtype, ptypes=kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _checkretry(self, mydelay, condition, tries_remaining, data):
"""Check if input parameters allow to retries function execution. :param float mydelay: waiting delay between two execution. :param int condition: condition to check with this condition. :param int tries_remaining: tries remaining. :param data: data to hook. """ |
result = mydelay
if self.condition & condition and tries_remaining > 0:
# hook data with tries_remaining and mydelay
if self.hook is not None:
self.hook(data, condition, tries_remaining, mydelay)
# wait mydelay seconds
sleep(mydelay)
result *= self.backoff # increment mydelay with this backoff
elif condition is Retries.ON_ERROR:
raise data # raise data if no retries and on_error
else: # else Nonify mydelay to prevent callee function to stop
result = None
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getkey(self, args, kwargs):
"""Get hash key from args and kwargs. args and kwargs must be hashable. :param tuple args: called vargs. :param dict kwargs: called keywords. :return: hash(tuple(args) + tuple((key, val) for key in sorted(kwargs)). :rtype: int.""" |
values = list(args)
keys = sorted(list(kwargs))
for key in keys:
values.append((key, kwargs[key]))
result = hash(tuple(values))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getparams(self, result):
"""Get result parameters. :param result: cached result. :raises: ValueError if result is not cached. :return: args and kwargs registered with input result. :rtype: tuple""" |
for key in self._cache:
if self._cache[key][2] == result:
args, kwargs, _ = self._cache[key]
return args, kwargs
else:
raise ValueError('Result is not cached') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_entailed_by(self, other):
""" If the other is as or more specific than self""" |
other = BoolCell.coerce(other)
if self.value == U or other.value == self.value:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other):
""" Merges two BoolCells """ |
other = BoolCell.coerce(other)
if self.is_equal(other):
# pick among dependencies
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.value = other.value
elif self.is_contradictory(other):
raise Contradiction("Cannot merge T and F")
else:
raise Exception
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _makeResult(self):
""" instantiates the result class reporters """ |
return [reporter(self.stream, self.descriptions, self.verbosity) for reporter in self.resultclass] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def module_can_run_parallel(test_module: unittest.TestSuite) -> bool: """ Checks if a given module of tests can be run in parallel or not :param test_module: the module to run :return: True if the module can be run on parallel, False otherwise """ |
for test_class in test_module:
# if the test is already failed, we just don't filter it
# and let the test runner deal with it later.
if hasattr(unittest.loader, '_FailedTest'): # import failure in python 3.4.5+
# noinspection PyProtectedMember
if isinstance(test_class, unittest.loader._FailedTest):
continue
if not isinstance(test_class, collections.Iterable): # likely an import failure in python 3.4.4-
# before python 3.4.5, test import failures were not serializable.
# We are unable to be sure that this is a module import failure, but it very likely is
# if this is the case, we'll just run this locally and see
raise TestClassNotIterable()
for test_case in test_class:
return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def class_can_run_parallel(test_class: unittest.TestSuite) -> bool: """ Checks if a given class of tests can be run in parallel or not :param test_class: the class to run :return: True if te class can be run in parallel, False otherwise """ |
for test_case in test_class:
return not getattr(test_case, "__no_parallel__", False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_summary(self, result, time_taken):
""" Prints the test summary, how many tests failed, how long it took, etc :param result: result class to use to print summary :param time_taken: the time all tests took to run """ |
if hasattr(result, "separator2"):
self.stream.writeln(result.separator2)
self.stream.writeln("Ran {number_of_tests} test{s} in {time:.3f}s\n".format(
number_of_tests=result.testsRun, s="s" if result.testsRun != 1 else "", time=time_taken
))
info = []
if not result.wasSuccessful():
self.stream.write("FAILED")
if result.failures:
info.append("failures={}".format(len(result.failures)))
if result.errors:
info.append("errors={}".format(len(result.errors)))
else:
self.stream.write("OK")
if result.skipped:
info.append("skipped={}".format(len(result.skipped)))
if result.expectedFailures:
info.append("expected failures={}".format(len(result.expectedFailures)))
if result.unexpectedSuccesses:
info.append("unexpected successes={}".format(len(result.unexpectedSuccesses)))
if info:
self.stream.writeln(" ({})".format(", ".join(info)))
else:
self.stream.write("\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, test: unittest.TestSuite):
""" Given a TestSuite, will create one process per test case whenever possible and run them concurrently. Will then wait for the result and return them :param test: the TestSuite to run :return: a summary of the test run """ |
start_time = time.time()
process = []
resource_manager = multiprocessing.Manager()
results_queue = resource_manager.Queue()
tasks_running = resource_manager.BoundedSemaphore(self.process_number)
test_suites, local_test_suites = self.collect_tests(test)
results_collector = ResultCollector(
self.stream, self.descriptions, self.verbosity,
result_queue=results_queue, test_results=self._makeResult(),
tests=test_suites
)
results_collector.start()
for index, suite in enumerate(test_suites):
tasks_running.acquire()
x = self.Process(index, suite, results_queue, tasks_running)
x.start()
process.append(x)
local_test_suites.run(results_collector)
for i in process:
i.join()
results_queue.join()
results_collector.end_collection()
results_collector.join()
results_collector.printErrors()
self.print_summary(results_collector, time.time() - start_time)
return results_collector |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_templates(path: Path) -> List[str]:
'''List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``.
'''
result = []
for item in path.glob('**/*'):
if item.is_file() and not item.name.startswith('_'):
result.append(item.relative_to(path.parent).as_posix())
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clone_with_new_elements( self, new_elements, drop_keywords=set([]), rename_dict={}, extra_kwargs={}):
""" Create another Collection of the same class and with same state but possibly different entries. Extra parameters to control which keyword arguments get passed to the initializer are necessary since derived classes have different constructors than the base class. """ |
kwargs = dict(
elements=new_elements,
distinct=self.distinct,
sort_key=self.sort_key,
sources=self.sources)
for name in drop_keywords:
kwargs.pop(name)
for old_name, new_name in rename_dict.items():
kwargs[new_name] = kwargs.pop(old_name)
kwargs.update(extra_kwargs)
return self.__class__(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def source(self):
""" Returns the single source name for a variant collection if it is unique, otherwise raises an error. """ |
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filenames(self):
""" Assuming sources are paths to VCF or MAF files, trim their directory path and return just the file names. """ |
return [os.path.basename(source) for source in self.sources if source] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def short_string(self):
""" Compact string representation which doesn't print any of the collection elements. """ |
source_str = ""
if self.sources:
source_str = " from '%s'" % ",".join(self.sources)
return "<%s%s with %d elements>" % (
self.__class__.__name__,
source_str,
len(self)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def multi_groupby(self, key_fn):
""" Like a groupby but expect the key_fn to return multiple keys for each element. """ |
result_dict = defaultdict(list)
for x in self:
for key in key_fn(x):
result_dict[key].append(x)
# convert result lists into same Collection type as this one
return {
k: self.clone_with_new_elements(elements)
for (k, elements)
in result_dict.items()
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_above_threshold( self, key_fn, value_dict, threshold, default_value=0.0):
"""The code for filtering by gene or transcript expression was pretty much identical aside from which identifier you pull off an effect. So, factored out the common operations for filtering an effect collection into this helper method. Parameters key_fn : callable Given an element of this collection, returns a key into `value_dict` value_dict : dict Dict from keys returned by `extract_key_fn` to float values threshold : float Only keep elements whose value in `value_dict` is above this threshold. default_value : float Value to use for elements whose key is not in `value_dict` """ |
def filter_fn(x):
key = key_fn(x)
value = value_dict.get(key, default_value)
return value > threshold
return self.filter(filter_fn) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_any_above_threshold( self, multi_key_fn, value_dict, threshold, default_value=0.0):
"""Like filter_above_threshold but `multi_key_fn` returns multiple keys and the element is kept if any of them have a value above the given threshold. Parameters multi_key_fn : callable Given an element of this collection, returns multiple keys into `value_dict` value_dict : dict Dict from keys returned by `extract_key_fn` to float values threshold : float Only keep elements whose value in `value_dict` is above this threshold. default_value : float Value to use for elements whose key is not in `value_dict` """ |
def filter_fn(x):
for key in multi_key_fn(x):
value = value_dict.get(key, default_value)
if value > threshold:
return True
return False
return self.filter(filter_fn) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def curl(url, params=None, auth=None, req_type='GET', data=None, headers=None, timeout=None, use_gzip=True, use_stream=False):
""" Make request to web resource :param url: Url to endpoint :param params: list of params after "?" :param auth: authorization tokens :param req_type: column_type of the request :param data: data which need to be posted :param headers: headers which would be posted with request :param timeout: Request timeout :param use_gzip: Accept gzip and deflate response from the server :param use_stream: Do not parse content of response ans stream it via raw property :return Response object :type url str :type params dict :type auth CURLAuth :type req_type str :type headers dict :type timeout int :type use_gzip bool :type use_stream bool :rtype CURLResponse """ |
post_req = ["POST", "PUT"]
get_req = ["GET", "DELETE"]
if params is not None:
url += "?" + urlencode(params)
if req_type not in post_req + get_req:
raise IOError("Wrong request column_type \"%s\" passed" % req_type)
_headers = {}
handler_chain = []
req_args = {
"headers": _headers
}
# process content
if req_type in post_req and data is not None:
_data, __header = __parse_content(data)
_headers.update(__header)
_headers["Content-Length"] = len(_data)
req_args["data"] = _data
# process gzip and deflate
if use_gzip:
if "Accept-Encoding" in _headers:
if "gzip" not in _headers["Accept-Encoding"]:
_headers["Accept-Encoding"] += ", gzip, x-gzip, deflate"
else:
_headers["Accept-Encoding"] = "gzip, x-gzip, deflate"
if auth is not None and auth.force is False:
manager = HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, url, auth.user, auth.password)
handler_chain.append(HTTPBasicAuthHandler(manager))
if auth is not None and auth.force:
_headers.update(auth.headers)
if headers is not None:
_headers.update(headers)
director = build_opener(*handler_chain)
req = Request(url, **req_args)
req.get_method = lambda: req_type
try:
if timeout is not None:
return CURLResponse(director.open(req, timeout=timeout), is_stream=use_stream)
else:
return CURLResponse(director.open(req), is_stream=use_stream)
except URLError as e:
if isinstance(e, HTTPError):
raise e
else:
raise TimeoutError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_str(string):
"""Generate a `AddEvent` object from a string """ |
match = re.match(r'^ADD (\w+)$', string)
if match:
return AddEvent(match.group(1))
else:
raise EventParseError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_str(string):
"""Generate a `SetReadingEvent` object from a string """ |
match = re.match(r'^START READING (\w+) FROM \w+ (\d+)$', string)
if match:
return SetReadingEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_str(string):
"""Generate a `ReadEvent` object from a string """ |
match = re.match(r'^READ (\w+) FOR (\d+) \w+S$', string)
if match:
return ReadEvent(match.group(1), int(match.group(2)))
else:
raise EventParseError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def field_default(colx, table_name, tables_dict):
"takes sqparse2.ColX, Table"
if colx.coltp.type.lower() == 'serial':
x = sqparse2.parse('select coalesce(max(%s),-1)+1 from %s' % (colx.name, table_name))
return sqex.run_select(x, tables_dict, Table)[0]
elif colx.not_null: raise NotImplementedError('todo: not_null error')
else: return toliteral(colx.default) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def apply_defaults(self, row, tables_dict):
"apply defaults to missing cols for a row that's being inserted"
return [
emergency_cast(colx, field_default(colx, self.name, tables_dict) if v is Missing else v)
for colx,v in zip(self.fields,row)
] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_youtube_url(youtube_url, no_controls, autoplay):
"""Use this function to convert the youtube URL. This function is used for converting the youtube URL so that it can be used correctly with Helium. It means that Helium will know the next video in the playlist. :param youtube_url: the URL of the youtube playlist video. :type youtube_url: str :param no_controls: whether or not to show controls in the Helium app. :type no_controls: bool :param autoplay: whether or not to play the next video in the playlist after the current video finishes. :type autoplay: bool :return: the new correct youtube URL. :rtype: str """ |
for section in youtube_url.split('&'):
if 'list' in section:
playlist_id = section.split('list=')[1]
break
return (
'https://www.youtube.com/embed/videoseries?{0}&{1}&'
'loop=1&html5=1&showinfo=0&listType=playlist&list={2}'.format(
'' if autoplay else 'autoplay=1',
'controls=0' if no_controls else '',
str(playlist_id)
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def submit_error(self, description, extra=None, default_message=None):
"""Send an error to bugzscout. Sends a request to the fogbugz URL for this instance. If a case exists with the **same** description, a new occurrence will be added to that case. It is advisable to remove personal info from the description for that reason. Account ids, emails, request ids, etc, will make the occurrence counting builtin to bugzscout less useful. Those values should go in the extra parameter, though, so the developer investigating the case has access to them. When extra is not specified, bugzscout will increase the number of occurrences for the case with the given description, but it will not include an entry for it (unless it is a new case). :param description: string description for error :param extra: string details for error :param default_message: string default message to return in responses """ |
req_data = {'ScoutUserName': self.user,
'ScoutProject': self.project,
'ScoutArea': self.area,
# When this matches, cases are grouped together.
'Description': description,
'Extra': extra,
# 1 forces a new bug to be created.
'ForceNewBug': 0,
'ScoutDefaultMessage': default_message,
# 0 sends XML response, 1 sends HTML response.
'FriendlyResponse': 0,
}
LOG.debug('Making bugzscout request to {0} with body {1}'.format(
self.url, req_data))
resp = requests.post(self.url, data=req_data)
LOG.debug('Response from bugzscout request: {0} body:\n{1}'.format(
resp, resp.content))
if resp.ok:
LOG.info('Successfully submitted error to bugzscout.')
else:
LOG.warn('Failed to submit error to bugzscout: {0}'.format(
resp.reason)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_logging(stream=sys.stderr, filepath=None, format='%(asctime).19s [%(levelname)s] %(name)s: %(message)s'):
""" Setup logging for the microcache module, but only do it once! :param stream: stream to log to (defaults to sys.stderr) :param filepath: path to a file to log to as well (defaults to None) :param format: override the default format with whatever you like """ |
if not (len(logger.handlers) == 1 and isinstance(logger.handlers[0], logging.NullHandler)):
logger.warn('logging has already been initialized, refusing to do it again')
return
formatter = logging.Formatter(format)
if stream is not None:
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
if filepath is not None:
handler = logging.FileHandler(filename=filepath)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('successfully initialized logger') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def this(func, cache_obj=CACHE_OBJ, key=None, ttl=None, *args, **kwargs):
""" Store the output from the decorated function in the cache and pull it from the cache on future invocations without rerunning. Normally, the value will be stored under a key which takes into account all of the parameters that are passed into it, thereby caching different invocations separately. If you specify a key, all invocations will be cached under that key, and different invocations will return the same value, which may be unexpected. So, be careful! If the cache is disabled, the decorated function will just run normally. Unlike the other functions in this module, you must pass a custom cache_obj to this() in order to operate on the non-global cache. This is because of wonky behavior when using decorator.decorator from a class method. :param func: (expensive?) function to decorate :param cache_obj: cache to a specific object (for use from the cache object itself) :param key: optional key to store the value under :param ttl: optional expiry to apply to the cached value :param *args: arg tuple to pass to the decorated function :param **kwargs: kwarg dict to pass to the decorated function """ |
key = key or (func.__name__ + str(args) + str(kwargs))
if cache_obj.has(key):
return cache_obj.get(key)
value = func(*args, **kwargs)
cache_obj.upsert(key, value, ttl)
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has(self, key):
""" See if a key is in the cache Returns CACHE_DISABLED if the cache is disabled :param key: key to search for """ |
if not self.options.enabled:
return CACHE_DISABLED
ret = key in self._dict.keys() and not self._dict[key].is_expired()
logger.debug('has({}) == {}'.format(repr(key), ret))
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upsert(self, key, value, ttl=None):
""" Perform an upsert on the cache Returns CACHE_DISABLED if the cache is disabled Returns True on successful operation :param key: key to store the value under :param value: value to cache :param ttl: optional expiry in seconds (defaults to None) """ |
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('upsert({}, {}, ttl={})'.format(repr(key), repr(value), ttl))
self._dict[key] = MicrocacheItem(value, ttl)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, default=CACHE_MISS):
""" Get a value out of the cache Returns CACHE_DISABLED if the cache is disabled :param key: key to search for :param default: value to return if the key is not found (defaults to CACHE_MISS) """ |
if not self.options.enabled:
return CACHE_DISABLED
ret = default
if self.has(key):
ret = self._dict[key].value
logger.debug('get({}, default={}) == {}'.format(repr(key), repr(default), repr(ret)))
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear(self, key=None):
""" Clear a cache entry, or the entire cache if no key is given Returns CACHE_DISABLED if the cache is disabled Returns True on successful operation :param key: optional key to limit the clear operation to (defaults to None) """ |
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('clear(key={})'.format(repr(key)))
if key is not None and key in self._dict.keys():
del self._dict[key]
logger.info('cache cleared for key: ' + repr(key))
elif not key:
for cached_key in [k for k in self._dict.keys()]:
del self._dict[cached_key]
logger.info('cache cleared for ALL keys')
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disable(self, clear_cache=True):
""" Disable the cache and clear its contents :param clear_cache: clear the cache contents as well as disabling (defaults to True) """ |
logger.debug('disable(clear_cache={})'.format(clear_cache))
if clear_cache:
self.clear()
self.options.enabled = False
logger.info('cache disabled') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten_dict(d, prefix='', sep='.'):
"""In place dict flattening. """ |
def apply_and_resolve_conflicts(dest, item, prefix):
for k, v in flatten_dict(item, prefix=prefix, sep=sep).items():
new_key = k
i = 2
while new_key in d:
new_key = '{key}{sep}{index}'.format(key=k, sep=sep, index=i)
i += 1
dest[new_key] = v
for key in list(d.keys()):
if any(unicode(prefix)):
new_key = u'{p}{sep}{key}'.format(p=prefix, key=key, sep=sep)
else:
new_key = key
if isinstance(d[key], (dict, collections.Mapping)):
apply_and_resolve_conflicts(d, d.pop(key), new_key)
elif isinstance(d[key], six.string_types):
d[new_key] = d.pop(key)
elif isinstance(d[key], (list, collections.Mapping)):
array = d.pop(key)
for i in range(len(array)):
index_key = '{key}{sep}{i}'.format(key=key, sep=sep, i=i)
while index_key in d:
i += 1
apply_and_resolve_conflicts(d, array[i], index_key)
else:
d[new_key] = d.pop(key)
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_url(url, replacement='_'):
""" Cleans the url for protocol prefix and trailing slash and replaces special characters with the given replacement. :param url: The url of the request. :param replacement: A string that is used to replace special characters. """ |
cleaned = re.sub(r'/$', '', re.sub(r'https?://', '', url))
for character in '/ _ ? & : ; %'.split():
cleaned = cleaned.replace(character, replacement)
return cleaned |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(resource, previous=None, migrations_path=None):
"""Create an empty migration for a resource""" |
if migrations_path:
file_path = migrate.create(resource, previous_version=previous, package=migrations_path)
else:
file_path = migrate.create(resource, previous_version=previous)
click.secho('Created migration file: ' + file_path, fg='green') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_address_save(self, sender, instance, **kwargs):
""" Custom handler for address save """ |
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_address_delete(self, sender, instance, **kwargs):
""" Custom handler for address delete """ |
objects = self.find_associated_with_address(instance)
# this is not called as django will delete associated project/address
# triggering handle_delete
for obj in objects: # pragma: no cover
self.handle_delete(obj.__class__, obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_job_and_work_save(self, sender, instance, **kwargs):
""" Custom handler for job and work save """ |
self.handle_save(instance.project.__class__, instance.project) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_job_and_work_delete(self, sender, instance, **kwargs):
""" Custom handler for job and work delete """ |
self.handle_delete(instance.project.__class__, instance.project) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_profile_save(self, sender, instance, **kwargs):
""" Custom handler for user profile save """ |
self.handle_save(instance.user.__class__, instance.user) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_profile_delete(self, sender, instance, **kwargs):
""" Custom handler for user profile delete """ |
try:
self.handle_save(instance.user.__class__, instance.user) # we call save just as well
except (get_profile_model().DoesNotExist):
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """ |
self.handle_save(instance.__class__, instance) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_m2m_user(self, sender, instance, **kwargs):
""" Handle many to many relationships for user field """ |
self.handle_save(instance.user.__class__, instance.user) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_associated_with_address(self, instance):
""" Returns list with projects and organizations associated with given address """ |
objects = []
objects += list(Project.objects.filter(address=instance))
objects += list(Organization.objects.filter(address=instance))
return objects |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def refresh_token(token, session=None):
"""Refresh Google OAuth token. :param OAuthToken token: the token to refresh :param requests.Session session: Optional `requests` session to use. """ |
session = session or HTTP_SESSION
refresh_data = dict(
refresh_token=token.refresh_token,
client_id=token.consumer_key,
client_secret=token.consumer_secret,
grant_type='refresh_token'
)
resp = session.post(REFRESH_TOKEN_URL, data=refresh_data)
resp_json = resp.json()
if 'error' in resp_json:
message = resp_json['error']
description = resp_json.get('error_description', '')
if any(description):
message = u'{}: {}'.format(message, description)
raise OAuthTokenExpiredError(message)
return OAuthToken(
access_token=resp_json['access_token'],
refresh_token=token.refresh_token,
consumer_key=token.consumer_key,
consumer_secret=token.consumer_secret
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_tex_table(inputlist, outputfile, close=False, fmt=None, **kwargs):
""" Parse table from inputlist Args: inputlist: list List to parse outputfile: file .tex file to write fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwargs: nonestring: string string when objecttype is None Returns: None """ |
output_str = ""
if fmt is None:
fmt = {}
for row in inputlist:
for key, val in enumerate(row):
if val is None:
output_str += r'\text{{{}}}'.format(
str(kwargs.get("nonestring", "None"))
)
else:
# get default
if np.isscalar(val):
temp_str_fmt = "$\\num{{" + fmt.get(
key, "{:g}") + "}}$"
else:
temp_str_fmt = fmt.get(key, "{}")
temp_str = temp_str_fmt.format(val).replace("+", "")
output_str += temp_str + "&"
output_str = output_str[:-1]
output_str += "\\\\\n"
outputfile.write(output_str)
if close:
outputfile.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_root_logger_from_verbosity(verbosity=0):
"""Configure root logger according to both application settings and verbosity level. """ |
kwargs = {}
if verbosity == 1:
kwargs.update(level=logging.INFO)
elif verbosity > 1:
kwargs.update(level=logging.DEBUG)
set_root_logger(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_loggers_from_config(config=None):
"""Set loggers configuration according to the `logging` section of Docido configuration file. :param nameddict config: overrides Docido configuration """ |
config = config or app_config.logging
for lname, lconfig in config.get('loggers', {}).iteritems():
if 'level' in lconfig:
level = getattr(logging, lconfig.level)
assert isinstance(level, int)
logger = logging.getLogger(lname)
logger.setLevel(level) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data(self):
"""Return parsed data structure.""" |
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _increment(self, n=1):
"""Move forward n tokens in the stream.""" |
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _skip_whitespace(self):
"""Increment over whitespace, counting characters.""" |
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _skip_newlines(self):
"""Increment over newlines.""" |
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse(self):
"""Parse the token stream into a nice dictionary data structure.""" |
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_value(self):
"""Parse the value of a key-value pair.""" |
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_key(self, indent):
"""Parse a series of key-value pairs.""" |
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_object_list(self):
"""Parse a list of data structures.""" |
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_literal_list(self, indent):
"""Parse a list of literals.""" |
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_comma_list(self):
"""Parse a comma seperated list.""" |
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_newline_list(self, indent):
"""Parse a newline seperated list.""" |
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_help(self, env, args):
""" Handles showing help information for arguments provided. `env` Runtime ``Environment`` instance. `args` List of argument strings passed. Returns ``False`` if nothing handled. * Raises ``HelpBanner`` exception if valid subcommand provided. """ |
if args:
# command help (focus help [command])
# get command plugin registered for command
active = env.task.active
plugin_obj = registration.get_command_hook(args[0], active)
if plugin_obj:
parser = self._get_plugin_parser(plugin_obj)
raise HelpBanner(parser.format_help(), code=0)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_command(self, command, env, args):
""" Handles calling appropriate command plugin based on the arguments provided. `command` Command string. `env` Runtime ``Environment`` instance. `args` List of argument strings passed. Returns ``False`` if nothing handled. * Raises ``HelpBanner`` exception if mismatched command arguments. """ |
# get command plugin registered for command
# note, we're guaranteed to have a command string by this point
plugin_obj = registration.get_command_hook(command, env.task.active)
# check if plugin is task-specific or has option hooks implying
# task-specific behavior
if plugin_obj and not env.task.active:
if plugin_obj.task_only or plugin_obj.options:
plugin_obj = None
if plugin_obj:
# plugin needs root, setup root access via sudo
if plugin_obj.needs_root:
registration.setup_sudo_access(plugin_obj)
# parse arguments
parser = self._get_plugin_parser(plugin_obj)
parsed_args = parser.parse_args(args)
# run plugin
plugin_obj.execute(env, parsed_args)
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_parser(self, env):
""" Creates base argument parser. `env` Runtime ``Environment`` instance. * Raises ``HelpBanner`` exception when certain conditions apply. Returns ``FocusArgumentParser`` object. """ |
version_str = 'focus version ' + __version__
usage_str = 'focus [-h] [-v] [--no-color] <command> [<args>]'
# setup parser
parser = FocusArgParser(description=("Command-line productivity tool "
"for improved task workflows."),
epilog=("See 'focus help <command>' for more "
"information on a specific command."),
usage=usage_str)
parser.add_argument('-v', '--version', action='version',
version=version_str)
parser.add_argument('--no-color', action='store_true',
help='disables colors')
# fetch command plugins
commands = []
active = env.task.active
command_hooks = registration.get_registered(command_hooks=True,
task_active=active)
# extract command name and docstrings as help text
for plugin in command_hooks:
help_text = (plugin.__doc__ or '').strip().rstrip('.').lower()
commands.append((plugin.command, help_text))
commands.sort(key=lambda x: x[0]) # command ordered
# install subparsers
subparsers = parser.add_subparsers(title='available commands')
# install 'help' subparser
help_parser = subparsers.add_parser('help', add_help=False)
help_parser.set_defaults(func=self._handle_help)
# install 'version' subparser
version_parser = subparsers.add_parser('version', add_help=False)
def _print_version(env, args):
env.io.write(version_str)
return True
version_parser.set_defaults(func=_print_version)
# install command subparsers based on registered command plugins.
# this allows for focus commands (e.g. focus on [...])
for command, help_ in commands:
cmd_parser = subparsers.add_parser(command, help=help_,
add_help=False)
# use wrapper to bind command value and passthru to _handle_command
# when executed later
def _run(command):
def _wrapper(env, args):
return self._handle_command(command, env, args)
return _wrapper
cmd_parser.set_defaults(func=_run(command))
return parser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_plugin_parser(self, plugin_obj):
""" Creates a plugin argument parser. `plugin_obj` ``Plugin`` object. Returns ``FocusArgParser`` object. """ |
prog_name = 'focus ' + plugin_obj.command
desc = (plugin_obj.__doc__ or '').strip()
parser = FocusArgParser(prog=prog_name, description=desc)
plugin_obj.setup_parser(parser)
return parser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self, env):
""" Executes basic flags and command plugins. `env` Runtime ``Environment`` instance. * Raises ``FocusError`` exception when certain conditions apply. """ |
# parse args
parser = self._get_parser(env)
parsed_args, cmd_args = parser.parse_known_args(env.args)
# disable colors
if parsed_args.no_color:
env.io.set_colored(False)
# run command handler passing any remaining args
if not parsed_args.func(env, cmd_args):
raise HelpBanner(parser.format_help()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _split_chglog(path, title):
"""Split a RST file text in two parts. The title argument determine the split point. The given title goes in the bottom part. If the title is not found everything goes in the top part. Return a tuple with the top and bottom parts. """ |
with path.open() as f:
doc = f.readlines()
has_title = False
for idx, curr_line in enumerate(doc):
if title in curr_line:
prev_line = doc[idx - 1] if idx - 1 < len(doc) else "\n"
next_line = doc[idx + 1] if idx + 1 < len(doc) else None
if is_title(prev_line, curr_line, next_line):
idx = idx if prev_line == "\n" else idx - 1
has_title = True
break
if has_title:
top, bottom = doc[:idx], doc[idx:]
else:
top, bottom = doc, []
return "".join(top), "".join(bottom) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loads(content):
"""Load a Procfile from a string.""" |
lines = _group_lines(line for line in content.split('\n'))
lines = [
(i, _parse_procfile_line(line))
for i, line in lines if line.strip()
]
errors = []
# Reject files with duplicate process types (no sane default).
duplicates = _find_duplicates(((i, line[0]) for i, line in lines))
for i, process_type, j in duplicates:
errors.append(''.join([
'Line %d: duplicate process type "%s": ',
'already appears on line %d.',
]) % (i + 1, process_type, j + 1)
)
# Reject commands with duplicate variables (no sane default).
for i, line in lines:
process_type, env = line[0], line[2]
duplicates = _find_duplicates(((0, var[0]) for var in env))
for _, variable, _ in duplicates:
errors.append(''.join([
'Line %d: duplicate variable "%s" ',
'for process type "%s".',
]) % (i + 1, variable, process_type)
)
# Done!
if errors:
raise ValueError(errors)
return {k: {'cmd': cmd, 'env': dict(env)} for _, (k, cmd, env) in lines} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_email_enabled(email):
""" Emails are activated by default. Returns false if an email has been disabled in settings.py """ |
s = get_settings(string="OVP_EMAILS")
email_settings = s.get(email, {})
enabled = True
if email_settings.get("disabled", False):
enabled = False
return enabled |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_email_subject(email, default):
""" Allows for email subject overriding from settings.py """ |
s = get_settings(string="OVP_EMAILS")
email_settings = s.get(email, {})
title = email_settings.get("subject", default)
return _(title) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def md_to_pdf(input_name, output_name):
""" Converts an input MarkDown file to a PDF of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .pdf can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.md To convert the document: .pdf can also be omitted from the second argument. """ |
if output_name[-4:] == '.pdf':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".pdf" ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def docx_to_md(input_name, output_name):
""" Converts an input docx file to MarkDown file of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .md can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.docx To convert the document: .md can also be omitted from the second argument. """ |
if output_name[-5:] == '.docx':
os.system("pandoc " + input_name + " -o " + output_name)
else:
os.system("pandoc " + input_name + " -o " + output_name + ".docx" ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report_failures(error=False, display=True, clear=True):
""" Print details of logged failures in expect function If no failures are detected, None is returned by the function. Parameters error:bool If true, will raise an Expectation of type 'FaliedValidationError' instead of printing to console display: bool If True, will print the failure report to console as well as returning it as a string. If error = True do nothing. clear: bool If True, all logged failured will be cleared after being reported. Returns ------- string The string formated failure report. list of dict The failed expectations. Each dictionary contains the keys: idx - the number of the failed expectation in the list starting at one, expression - Code that is evaluated file - the file name where the validation function was defined, funcname - the name of the validation function, line - the line of the validation function that the expression was on msg - the error message associated with the expression, if there was one. """ |
global _failed_expectations
output = []
# Copy as failures are returned
all_failed_expectations = _failed_expectations[:]
if all_failed_expectations:
output.append('\nFailed Expectations: %s\n\n' % len(all_failed_expectations))
for i, failure in enumerate(all_failed_expectations, start=1):
report_line = '{idx}: File {file}, line {line}, in {funcname}()\n "{expression}" is not True\n'
if failure['msg']:
report_line += ' -- {msg}\n'
report_line += '\n'
failure['idx'] = i
output.append(report_line.format(**failure))
if clear:
_failed_expectations = []
else:
output.append("All expectations met.")
if error:
raise FailedValidationError("\n" + ''.join(output))
elif display:
print(''.join(output))
if all_failed_expectations:
return (''.join(output), all_failed_expectations)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _log_failure(arg_num, msg=None):
""" Retrace stack and log the failed expresion information """ |
# stack() returns a list of frame records
# 0 is the _log_failure() function
# 1 is the expect() function
# 2 is the function that called expect(), that's what we want
#
# a frame record is a tuple like this:
# (frame, filename, line, funcname, contextlist, index)
# we're only interested in the first 4.
frame, filename, file_lineno, funcname = inspect.stack()[2][:4]
# Note that a frame object should be deleted once used to be safe and stop possible
# memory leak from circular referencing
try:
frame_source_lines, frame_start_lineno = (inspect.getsourcelines(frame))
finally:
del frame
filename = os.path.basename(filename)
# Build abstract syntax tree from source of frame
source_ast = ast.parse(''.join(frame_source_lines))
# Locate the executed expect function
func_body = source_ast.body[0].body
map_lineno_to_node = {}
for idx, node in enumerate(func_body):
map_lineno_to_node[node.lineno] = node
last_lineno = file_lineno - frame_start_lineno + 1
element_idx = [x for x in map_lineno_to_node.keys() if x <= last_lineno]
element_idx = max(element_idx)
expect_function_ast = map_lineno_to_node[element_idx]
# Return the source code of the numbered argument
arg = expect_function_ast.value.args[arg_num]
line = arg.lineno
if isinstance(arg, (ast.Tuple, ast.List)):
expr = astor.to_source(arg.elts[0])
else:
expr = astor.to_source(arg)
filename = os.path.basename(filename)
failure_info = {'file': filename, 'line': line, 'funcname': funcname, 'msg': msg, 'expression': expr}
_failed_expectations.append(failure_info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, path):
"""Add a path to the overlay filesytem. Any filesystem operation involving the this path or any sub-paths of it will be transparently redirected to temporary root dir. @path: An absolute path string. """ |
if not path.startswith(os.sep):
raise ValueError("Non-absolute path '{}'".format(path))
path = path.rstrip(os.sep)
while True:
self._paths[path] = None
path, _ = os.path.split(path)
if path == os.sep:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fchown(self, real, fileno, uid, gid):
"""Run fake fchown code if fileno points to a sub-path of our tree. The ownership set with this fake fchown can be inspected by looking at the self.uid/self.gid dictionaries. """ |
path = self._fake_path(self._path_from_fd(fileno))
self._chown_common(path, uid, gid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_grammatically_correct_vocabulary_subset(self, text, sent_filter='combined'):
""" Returns a subset of a given vocabulary based on whether its terms are "grammatically correct". """ |
tokens = word_tokenize(text)
sent_tokens = get_partial_sentence(tokens)
if not sent_tokens:
return self.vocabulary
if sent_filter == 'combined':
if len(sent_tokens) < 2:
return self.get_bigram_filtered_vocab(sent_tokens)
combined_filters = self.get_pos_filtered_vocab(sent_tokens) + \
self.get_trigram_filtered_vocab(sent_tokens) + \
self.get_bigram_filtered_vocab(sent_tokens)
return combined_filters
if sent_filter == 'pos' and len(sent_tokens) > 1:
return self.get_pos_filtered_vocab(sent_tokens)
elif sent_filter == 'bigram' or len(sent_tokens) < 2:
return self.get_bigram_filtered_vocab(sent_tokens)
elif sent_filter == 'trigram':
return self.get_trigram_filtered_vocab(sent_tokens) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self, *args, **kwargs):
""" Ensures that this manager always returns nodes in tree order. """ |
qs = super(TreeManager, self).get_queryset(*args, **kwargs)
# Restrict operations to pages on the current site if needed
if settings.PAGES_HIDE_SITES and settings.PAGES_USE_SITE_ID:
return qs.order_by(self.tree_id_attr, self.left_attr).filter(sites=settings.SITE_ID)
else:
return qs.order_by(self.tree_id_attr, self.left_attr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def center_start(r, window_size):
""" Center a region on its start and expand it to window_size bases. :return: the new region. """ |
res = copy.copy(r)
res.end = res.start + window_size / 2
res.start = res.end - window_size
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def center_end(r, window_size):
""" Center a region on its end and expand it to window_size bases. :return: the new region. """ |
res = copy.copy(r)
res.start = res.end - window_size / 2
res.end = res.start + window_size
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def center_middle(r, window_size):
""" Center a region on its middle and expand it to window_size bases. :return: the new region. """ |
res = copy.copy(r)
mid = res.start + (len(res) / 2)
res.start = mid - (window_size / 2)
res.end = res.start + window_size
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform_locus(region, window_center, window_size):
""" transform an input genomic region into one suitable for the profile. :param region: input region to transform. :param window_center: which part of the input region to center on. :param window_size: how large the resultant region should be. :return: a new genomic interval on the same chromosome, centered on the <window_center> (e.g. 3' end) of the input region and resized to be window_size long. """ |
if window_center == CENTRE:
region.transform_center(window_size)
else:
raise ValueError("Don't know how to do this transformation: " +
window_center) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pid(col, ignore_gaps=False):
""" Compute the percent identity of a an alignment column. Define PID as the frequency of the most frequent nucleotide in the column. :param col: an alignment column; a dictionary where keys are seq. names and values are the nucleotide in the column for that sequence. :param ignore_gaps: if True, do not count gaps towards the total number of sequences in the column (i.e. the denominator of the fraction). :raise ValueError: if the column contains only gaps. """ |
hist = {}
total = 0
found_non_gap = False
for v in col.values():
if v == sequence.GAP_CHAR:
if ignore_gaps:
continue
else:
total += 1
else:
found_non_gap = True
if v not in hist:
hist[v] = 0
hist[v] += 1
total += 1
if not found_non_gap:
raise ValueError("Cannot determine PID of column with only gaps")
return max(hist.values()) / float(total) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def conservtion_profile_pid(region, genome_alignment, mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None):
""" build a conservation profile for the given region using the genome alignment. The scores in the profile will be the percent of bases identical to the reference sequence. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: a list of the same length as the region where each entry is the PID at the corresponding locus. """ |
res = []
s = region.start if region.isPositiveStrand() else region.end - 1
e = region.end if region.isPositiveStrand() else region.start - 1
step = 1 if region.isPositiveStrand() else -1
for i in range(s, e, step):
try:
col = genome_alignment.get_column(region.chrom, i, mi_seqs, species)
res.append(pid(col))
except NoSuchAlignmentColumnError:
res.append(None)
except NoUniqueColumnError:
res.append(None)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_profile(mean_profile, new_profile):
"""Add a new list of values to a list of rolling means.""" |
for i in range(0, len(mean_profile)):
if new_profile[i] is None:
continue
mean_profile[i].add(new_profile[i]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def processBED(fh, genome_alig, window_size, window_centre, mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None, verbose=False):
""" Process BED file, produce profile of conservation using whole genome alig. :param fh: :param genome_alig: the whole-genome alignment to use to compute conservation scores :param window_size: length of the profile. :param window_center: which part of each interval to place at the center of the profile. Acceptable values are in the module constant WINDOW_CENTRE_OPTIONS. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :param verbose: if True, output progress messages to stderr. :return: """ |
mean_profile = []
while len(mean_profile) < window_size:
mean_profile.append(RollingMean())
for e in BEDIterator(fh, verbose=verbose, scoreType=float,
sortedby=ITERATOR_SORTED_START):
# figure out which interval to look at...
transform_locus(e, window_centre, window_size)
new_profile = conservtion_profile_pid(e, genome_alig, mi_seqs, species)
merge_profile(mean_profile, new_profile)
return [m.mean for m in mean_profile] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_error(context: RunContext) -> int: """ Runs after a sub-process exits Checks the return code; if it is different than 0, then a few things happen: - if the process was muted ("mute" is True), the process output is printed anyway - if "failure_ok" is True (default), then a SystemExist exception is raised :param context: run context :type context: _RunContext :return: process return code :rtype: int """ |
if context.return_code != 0:
if context.mute:
context.result_buffer += f': command failed: {context.return_code}'
else:
context.result_buffer += f'{context.cmd_as_string}: command failed: {context.return_code}'
_LOGGER_PROCESS.error(context.result_buffer)
_LOGGER_PROCESS.error(repr(context))
if not context.failure_ok:
_exit(context)
else:
if context.mute:
context.result_buffer += f': success: {context.return_code}'
else:
context.result_buffer += f'{context.cmd_as_string}: success: {context.return_code}'
_LOGGER_PROCESS.info(context.result_buffer)
return context.return_code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(cmd: str, *paths: str, cwd: str = '.', mute: bool = False, filters: typing.Optional[typing.Union[typing.Iterable[str], str]] = None, failure_ok: bool = False, timeout: float = _DEFAULT_PROCESS_TIMEOUT, ) -> typing.Tuple[str, int]: """ Executes a command and returns the result Args: cmd: command to execute paths: paths to search executable in cwd: working directory (defaults to ".") mute: if true, output will not be printed filters: gives a list of partial strings to filter out from the output (stdout or stderr) failure_ok: if False (default), a return code different than 0 will exit the application timeout: sub-process timeout Returns: command output """ |
filters = _sanitize_filters(filters)
exe_path, args_list = _parse_cmd(cmd, *paths)
context = RunContext( # type: ignore
exe_path=exe_path,
capture=sarge.Capture(),
failure_ok=failure_ok,
mute=mute,
args_list=args_list,
paths=paths,
cwd=cwd,
timeout=timeout,
filters=filters,
)
if mute:
context.result_buffer += f'{context.cmd_as_string}'
else:
_LOGGER_PROCESS.info('%s: running', context.cmd_as_string)
context.start_process()
monitor_running_process(context)
check_error(context)
return context.process_output_as_str, context.return_code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cli_certify_complex_dict( config, schema, key_certifier, value_certifier, allow_extra, include_collections, value, ):
"""Console script for certify_dict.""" |
schema = load_json_pickle(schema, config)
key_certifier = create_certifier(load_json_pickle(key_certifier, config))
value_certifier = create_certifier(load_json_pickle(value_certifier, config))
execute_cli_command(
'dict',
config,
lambda x: load_json_pickle(x, config),
certify_dict,
value,
allow_extra=allow_extra,
include_collections=include_collections,
key_certifier=key_certifier,
required=config['required'],
schema=schema,
value_certifier=value_certifier,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_text_csv_file(self, filename, separator=',', **kwargs):
"""Return a pair RDD where key is taken from first column, remaining columns are named after their column id as string""" |
rdd_input = self.sc.textFile(filename)
def load_csv_record(line):
input_stream = StringIO.StringIO(line)
reader = csv.reader(input_stream, delimiter=',')
# key in first column, remaining columns 1..n become dict key values
payload = reader.next()
key = payload[0]
rest = payload[1:]
# generate dict of "1": first value, "2": second value, ...
d = {}
for (cell,i) in izip(rest, range(1,1+len(rest))):
d[str(i)] = cell
# just in case, add "0": key
d["0"] = key
return (key, d)
rdd_parsed = rdd_input.map(load_csv_record)
return rdd_parsed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config(config_spec):
"""Like get_json_config but does not parse result as JSON""" |
config_file = None
if config_spec.startswith("http"):
# URL: fetch it
config_file = urllib.urlopen(config_spec)
else:
# string: open file with that name
config_file = open(config_spec)
config = json.load(config_file)
# Close any open files
try:
config_file.close()
except:
pass
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
"""set sensible defaults""" |
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.