text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def applied():
"""Command for showing all upgrades already applied.""" |
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
upgrades = upgrader.get_history()
if not upgrades:
logger.info("No upgrades have been applied.")
return
logger.info("Following upgrade(s) have been applied:")
for u_id, applied in upgrades:
logger.info(" * %s (%s)" % (u_id, applied))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def release(path, repository):
"""Create a new release upgrade recipe, for developers.""" |
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
endpoints = upgrader.find_endpoints()
if not endpoints:
logger.error("No upgrades found.")
click.Abort()
depends_on = []
for repo, upgrades in endpoints.items():
depends_on.extend(upgrades)
return recipe(path,
repository=repository,
depends_on=depends_on,
release=True,
output_path=output_path)
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recipe(package, repository=None, depends_on=None, release=False, output_path=None, auto=False, overwrite=False, name=None):
"""Create a new upgrade recipe, for developers.""" |
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
path, found_repository = _upgrade_recipe_find_path(package)
if output_path:
path = output_path
if not repository:
repository = found_repository
if not os.path.exists(path):
raise RuntimeError("Path does not exists: %s" % path)
if not os.path.isdir(path):
raise RuntimeError("Path is not a directory: %s" % path)
# Generate upgrade filename
if release:
filename = "%s_release_x_y_z.py" % repository
else:
filename = "%s_%s_%s.py" % (repository,
date.today().strftime("%Y_%m_%d"),
name or 'rename_me')
# Check if generated repository name can be parsed
test_repository = upgrader._parse_plugin_id(filename[:-3])
if repository != test_repository:
raise RuntimeError(
"Generated repository name cannot be parsed. "
"Please override it with --repository option."
)
upgrade_file = os.path.join(path, filename)
if os.path.exists(upgrade_file) and not overwrite:
raise RuntimeError(
"Could not generate upgrade - %s already exists."
% upgrade_file
)
# Determine latest installed upgrade
if depends_on is None:
depends_on = ["CHANGE_ME"]
u = upgrader.latest_applied_upgrade(repository=repository)
if u:
depends_on = [u]
# Write upgrade template file
_write_template(
upgrade_file, name or 'rename_me',
depends_on, repository, auto=auto)
logger.info("Created new upgrade %s" % upgrade_file)
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_template(upgrade_file, name, depends_on, repository, auto=False):
"""Write template to upgrade file.""" |
if auto:
# Ensure all models are loaded
from invenio_db import models
list(models)
template_args = produce_upgrade_operations()
operations_str = template_args['upgrades']
import_str = template_args['imports']
else:
operations_str = " pass"
import_str = ""
with open(upgrade_file, 'w') as f:
f.write(UPGRADE_TEMPLATE % {
'depends_on': depends_on,
'repository': repository,
'year': date.today().year,
'operations': operations_str,
'imports': import_str,
'cls': ''.join(w.capitalize() or '_' for w in name.split('_'))
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _upgrade_recipe_find_path(import_str, create=True):
"""Determine repository name and path for new upgrade. It is based on package import path. """ |
try:
# Import package
m = import_string(import_str)
# Check if package or module
if m.__package__ is not None and m.__package__ != m.__name__:
raise RuntimeError(
"Expected package but found module at '%s'." % import_str
)
# Create upgrade directory if it does not exists
path = os.path.join(os.path.dirname(m.__file__), "upgrades")
if not os.path.exists(path) and create:
os.makedirs(path)
# Create init file if it does not exists
init = os.path.join(path, "__init__.py")
if not os.path.exists(init) and create:
open(init, 'a').close()
repository = m.__name__.split(".")[-1]
return (path, repository)
except ImportError:
raise RuntimeError("Could not find module '%s'." % import_str)
except SyntaxError:
raise RuntimeError("Module '%s' has syntax errors." % import_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def youtube_meta(client, channel, nick, message, match):
""" Return meta information about a video """ |
if not API_KEY:
return 'You must set YOUTUBE_DATA_API_KEY in settings!'
identifier = match[0]
params = {
'id': identifier,
'key': API_KEY,
'part': 'snippet,statistics,contentDetails',
}
response = requests.get(API_ROOT, params=params)
if response.status_code != 200:
return 'Error in response, ' + str(response.status_code) + ' for identifier: ' + identifier
try:
data = response.json()['items'][0]
except:
print('Exception requesting info for identifier: ' + identifier)
traceback.print_exc()
response_dict = {
'title': data['snippet']['title'],
'poster': data['snippet']['channelTitle'],
'date': str(parse_date(data['snippet']['publishedAt'])),
'views': data['statistics']['viewCount'],
'likes': data['statistics']['likeCount'],
'dislikes': data['statistics']['dislikeCount'],
'duration': parse_duration(data['contentDetails']['duration']),
}
return RESPONSE_TEMPLATE.format(**response_dict).encode('utf-8').strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_duration(duration):
""" Parse and prettify duration from youtube duration format """ |
duration_dict = re.search(DURATION_REGEX, duration).groupdict()
converted_dict = {}
# convert all values to ints, remove nones
for a, x in duration_dict.iteritems():
if x is not None:
converted_dict[a] = int(NON_DECIMAL.sub('', x))
return str(timedelta(**converted_dict)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exists_course_list(curriculum_abbr, course_number, section_id, quarter, year, joint=False):
""" Return True if the corresponding mailman list exists for the course """ |
return exists(get_course_list_name(curriculum_abbr, course_number,
section_id, quarter, year, joint)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def get_launches(self):
"""Get launch information.""" |
from .common import CommonFunctions
common = CommonFunctions(self.loop, self.session)
all_launches = []
launches = {}
data = await common.api_call(BASE_URL)
if data is None:
LOGGER.error('Error getting launch information')
return
for launch in data['launches']:
lid = launch['id']
launches[lid] = {}
try:
launches[lid]['start'] = await common.iso(launch['wsstamp'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['start'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['wsstamp'] = launch['wsstamp']
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['wsstamp'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['name'] = launch['name']
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['name'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['agency'] = (launch['missions'][0]['agencies']
[0]['name'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['agency'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['agency_country_code'] = (launch['missions'][0]
['agencies'][0]
['countryCode'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['agency_country_code'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['stream'] = launch['vidURLs'][0]
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['stream'] = None
LOGGER.debug('Error getting launch information, %s', error)
all_launches.append(launches[lid])
self._launches = await common.sort_data(all_launches, 'start') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def web_search(self, query, start=0, limit=100, max_tries=3):
'''
encapsulates urllib retrieval for fetching JSON results from
Google's Custom Search API. Returns a deserialized result set.
'''
tries = 0
if isinstance(query, unicode):
query = query.encode('utf8')
url = self.url % dict(key=self.api_key,
query=urllib.quote(query.strip()),
num=min(10, limit - start),
start=start)
logger.info("fetching: %s" % url)
while 1:
try:
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
# We do we set this? Remnant from pre-API version?
request.add_header('User-Agent', USER_AGENT)
opener = urllib2.build_opener()
fh = opener.open(request, timeout=60)
data = fh.read()
if fh.headers.get('Content-Encoding') == 'gzip':
compressedstream = StringIO.StringIO(data)
fh = gzip.GzipFile(fileobj=compressedstream)
data = fh.read()
return json.loads(data)
except Exception, exc:
logger.info(traceback.format_exc(exc))
if tries >= max_tries:
sys.exit("failed %d times to fetch %s" % (max_tries, url))
else:
logger.info("failed to fetch\n\t%s\nwill try "
"%d more times" % (url, max_tries - tries))
tries += 1
time.sleep(2 ** tries) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def can_solve(cls, filter_):
"""Tells if the solver is able to resolve the given filter. Arguments --------- filter_ : subclass of dataql.resources.BaseFilter The subclass or ``BaseFilter`` to check if it is solvable by the current solver class. Returns ------- boolean ``True`` if the current solver class can solve the given filter, ``False`` otherwise. Example ------- (<class 'dataql.resources.Filter'>,) True False """ |
for solvable_filter in cls.solvable_filters:
if isinstance(filter_, solvable_filter):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve(self, value, filter_):
"""Returns the value of an attribute of the value, or the result of a call to a function. Arguments --------- value : ? A value to solve in combination with the given filter. filter_ : dataql.resource.Filter An instance of ``Filter`` to solve with the given value. Returns ------- Depending on the source, the filter may ask for an attribute of the value, or for the result of a call to a standalone function taking the value as first argument. This method returns this attribute or result. Example ------- 1 '2015-06-01' """ |
args, kwargs = filter_.get_args_and_kwargs()
source = self.registry[value]
return source.solve(value, filter_.name, args, kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve(self, value, filter_):
"""Get slice or entry defined by an index from the given value. Arguments --------- value : ? A value to solve in combination with the given filter. filter_ : dataql.resource.SliceFilter An instance of ``SliceFilter``to solve with the given value. Example ------- 2 [2, 3] [1] """ |
try:
return value[filter_.slice or filter_.index]
except IndexError:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_args(f, *args, **kwargs):
"""Normalize call arguments into keyword form and varargs. args can only be non-empty if there is *args in the argument specification. """ |
callargs = inspect.getcallargs(f, *args, **kwargs)
original_callargs = callargs.copy()
try:
argspec = inspect.getargspec(f)
except ValueError:
argspec = inspect.getfullargspec(f)
else:
argspec = fullargspec_from_argspec(argspec)
if hasattr(argspec, 'varkw'):
if argspec.varkw:
kwargs = callargs.pop(argspec.varkw, {})
callargs.update(kwargs)
if argspec.varargs:
varargs = callargs.pop(argspec.varargs, ())
else:
varargs = ()
# now callargs is all keywords
return NormalizedArgs(varargs=varargs,
normargs=callargs,
callargs=original_callargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, *args, **kwargs):
"""Update self with new content """ |
d = {}
d.update(*args, **kwargs)
for key, value in d.items():
self[key] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def asdict(self):
"""Return a recursive dict representation of self """ |
d = dict(self._odict)
for k,v in d.items():
if isinstance(v, Struct):
d[k] = v.asdict()
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _extract_authenticity_token(data):
"""Don't look, I'm hideous!""" |
# Super-cheap Python3 hack.
if not isinstance(data, str):
data = str(data, 'utf-8')
pos = data.find("authenticity_token")
# Super-gross.
authtok = str(data[pos + 41:pos + 41 + 88])
return authtok |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self):
"""Authenticate the session""" |
postdata = self.authentication_postdata
jar = requests.cookies.cookielib.CookieJar()
self.cookies = jar
resp = self.get(self.authentication_base_url)
authtok = _extract_authenticity_token(resp.content)
if postdata is None:
# This works for GitHub
postdata = {"login": self.oauth2_username,
"password": self._oauth2_password,
"authenticity_token": authtok,
"commit": "Sign+in",
"utf8": u"\u2713",
} # pylint: disable=bad-continuation
self.authentication_postdata = postdata
if self.authentication_session_url is None:
# This is also for GitHub
authentication_session_url = "https://github.com/session"
self.authentication_session_url = authentication_session_url
self.post(self.authentication_session_url, data=postdata) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cli(ctx, config, debug):
"""SnakTeX command line interface - write LaTeX faster through templating.""" |
ctx.obj['config'] = config
ctx.obj['engine'] = stex.SnakeTeX(config_file=config, debug=debug) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str: """ Write mission, dictionary etc. to a MIZ file Args: destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ" Returns: destination file """ |
if encode:
self._encode()
if destination is None:
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for root, _, items in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config_dict(data):
""" A function that takes a string of pair values, indicated by '=', separated by newline characters and returns a dict of those value pairs :param func context_func: A function which takes one argument, a string of value pairs with '= between them' separated by newline characters :returns: A dict containing the value pairs separated by newline characters """ |
lines = config_list(data)
return dict(re.split('\s*=\s*', value) for value in lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Forget(self, obj):
'''Forget we've seen this object.
'''
obj = _get_idstr(obj)
try:
self.memo.remove(obj)
except ValueError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close(self):
'''Invoke all the callbacks, and close off the SOAP message.
'''
if self.closed: return
for func,arglist in self.callbacks:
apply(func, arglist)
self.closed = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup(config_root=''):
""" Service configuration and logging setup. Configuration defined in ``gordon-janitor-user.toml`` will overwrite ``gordon-janitor.toml``. Args: config_root (str):
where configuration should load from, defaults to current working directory. Returns: A dict for Gordon service configuration """ |
config = _load_config(root=config_root)
logging_config = config.get('core', {}).get('logging', {})
log_level = logging_config.get('level', 'INFO').upper()
log_handlers = logging_config.get('handlers') or ['syslog']
ulogger.setup_logging(
progname='gordon-janitor', level=log_level, handlers=log_handlers)
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def by_value(self, value, default=None):
""" Returns the key for the given value """ |
try:
return [k for k, v in self.items() if v == value][0]
except IndexError:
if default is not None:
return default
raise ValueError('%s' % value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
""" Add a recursive folder scan using a linux-style patterns. :param pattern: pattern or list of patterns to match. :param root: root to start from (default to '.') :param depth: if provided will be depth limit. 0 = first level only. :param source_type: what to return; files only, folders only, or both. """ |
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
""" Add a folder source to scan recursively, with a regex filter on directories. :param regex: regex string to filter folders by. :param depth: if provided will be depth limit. 0 = first level only. :param source_type: what to return; files only, folders only, or both. """ |
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_filter(self, files_filter, filter_type=DefaultFilterType):
""" Add a files filter to this iterator. For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR. :param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI. :param filter_type: filter behavior, see FilterType for details. """ |
self.__filters.append((files_filter, filter_type))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
""" Add a files filter by linux-style pattern to this iterator. :param pattern: linux-style files pattern (or list of patterns) """ |
self.add_filter(FilterPattern(pattern), filter_type)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
""" Add a files filter by regex to this iterator. :param regex_expression: regex string to apply. """ |
self.add_filter(FilterRegex(regex_expression), filter_type)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
""" Add a files filter by extensions to this iterator. :param extensions: single extension or list of extensions to filter by. """ |
self.add_filter(FilterExtension(extensions), filter_type)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next(self, dryrun=False):
""" Iterate over files in all sources. Use this if you want to iterate files externally. :param dryrun: if true, will only return all filenames instead of processing them, eg will not call "process_file" at all, and just show all the files it will scan. """ |
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_filters(self, path):
""" Get filename and return True if file pass all filters and should be processed. :param path: path to check. :return: True if pass filters, false otherwise. """ |
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs):
"""set up pyclass metaclass for complexTypes""" |
from pyremotevbox.ZSI.generate.containers import ServiceHeaderContainer,\
TypecodeContainerBase, TypesHeaderContainer
TypecodeContainerBase.metaclass = kwargs['metaclass']
TypesHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
ServiceHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def averageValues(self):
""" return the averaged values in the grid """ |
assert self.opts['record_density'] and self.opts['method'] == 'sum'
# dont increase value of partly filled cells (density 0..1):
filled = self.density > 1
v = self.values.copy()
v[filled] /= self.density[filled]
# ONLY AS OPTION??:
v[~filled] *= self.density[~filled]
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deny_access(self, request, **kwargs):
""" Standard failure behaviour. Returns HTTP 403 (Forbidden) for non-GET requests. For GET requests, returns HTTP 302 (Redirect) pointing at either a URL specified in the class's unauthorised_redirect attribute, if one exists, or / if not. This version also adds a (translated) message if one is passed in. """ |
# Raise a 403 for POST/DELETE etc.
if request.method != 'GET':
return HttpResponseForbidden()
# Add a message, if one has been defined.
message = self.get_access_denied_message(request)
if message:
messages.info(request, _(message))
# Return a HTTP 302 redirect.
redirect_url = self.get_unauthorised_redirect_url(request)
return redirect_to_login(request.get_full_path(), login_url=redirect_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_request(self, request):
""" The actual middleware method, called on all incoming requests. This default implementation will ignore the middleware (return None) if the conditions specified in is_resource_protected aren't met. If they are, it then tests to see if the user should be denied access via the denied_access_condition method, and calls deny_access (which implements failure behaviour) if so. """ |
if not self.is_resource_protected(request):
return
if self.deny_access_condition(request):
return self.deny_access(request) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_cwd():
""" Use workdir.options.path as a temporary working directory """ |
_set_log_level()
owd = os.getcwd()
logger.debug('entering working directory: ' + options.path)
os.chdir(os.path.expanduser(options.path))
yield
logger.debug('returning to original directory: ' + owd)
os.chdir(owd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _gitignore_entry_to_regex(entry):
""" Take a path that you might find in a .gitignore file and turn it into a regex """ |
ret = entry.strip()
ret = ret.replace('.', '\.')
ret = ret.replace('*', '.*')
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sync(sourcedir=None, exclude_gitignore_entries=None, exclude_regex_list=None):
""" Create and populate workdir.options.path, memoized so that it only runs once """ |
_set_log_level()
sourcedir = sourcedir or options.sync_sourcedir or os.getcwd()
if exclude_gitignore_entries is None:
exclude_gitignore_entries = options.sync_exclude_gitignore_entries
exclude_regex_list = exclude_regex_list or copy.copy(options.sync_exclude_regex_list)
gitignore_path = os.path.join(sourcedir, '.gitignore')
if exclude_gitignore_entries and os.path.isfile(gitignore_path):
gitignore_lines = []
with open(gitignore_path) as gitignore:
for line in gitignore.readlines():
line = line.strip()
if line and not line.startswith('#'):
gitignore_lines.append(_gitignore_entry_to_regex(line))
exclude_regex_list += gitignore_lines
dirsync_logger = logging.getLogger('dirsync')
dirsync_logger.setLevel(logging.INFO if options.debug else logging.FATAL)
logger.info('syncing {} to {}'.format(sourcedir, options.path))
logger.debug('excluding {} from sync'.format(exclude_regex_list))
dirsync.sync(
sourcedir=sourcedir,
targetdir=options.path,
action='sync',
create=True,
exclude=exclude_regex_list,
logger=dirsync_logger
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create():
""" Create workdir.options.path """ |
if not os.path.isdir(options.path):
logger.info('creating working directory: ' + options.path)
os.makedirs(options.path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean():
""" Remove all of the files contained in workdir.options.path """ |
if os.path.isdir(options.path):
logger.info('cleaning working directory: ' + options.path)
for filename in os.listdir(options.path):
filepath = os.path.join(options.path, filename)
if os.path.isdir(filepath):
shutil.rmtree(os.path.join(options.path, filename))
else:
os.remove(filepath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove():
""" Remove workdir.options.path """ |
if os.path.isdir(options.path):
logger.info('removing working directory: ' + options.path)
shutil.rmtree(options.path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query(cls, *args, **kwargs):
""" Same as collection.find, but return Document then dict """ |
for doc in cls._coll.find(*args, **kwargs):
yield cls.from_storage(doc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query_one(cls, *args, **kwargs):
""" Same as collection.find_one, but return Document then dict """ |
doc = cls._coll.find_one(*args, **kwargs)
if doc:
return cls.from_storage(doc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upsert(self, null=False):
""" Insert or Update Document :param null: whether update null values Wisely select unique field values as filter, Update with upsert=True """ |
self._pre_save()
self.validate()
filter_ = self._upsert_filter()
if filter_:
update = self._upsert_update(filter_, null)
if update['$set']:
r = self._coll.find_one_and_update(filter_, update,
upsert=True, new=True)
self._data['_id'] = r['_id']
else:
r = self._coll.insert_one(self._data)
self._data['_id'] = r.inserted_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_info(self, client, info):
"""Fill out information about the gateway""" |
if 'identity' in info:
info['stages'] = client.get_stages(restApiId=info['identity'])['item']
info['resources'] = client.get_resources(restApiId=info['identity'])['items']
for resource in info['resources']:
for method in resource.get('resourceMethods', {}):
resource['resourceMethods'][method] = client.get_method(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method)
for status_code, options in resource['resourceMethods'][method]['methodResponses'].items():
options.update(client.get_method_response(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method, statusCode=status_code))
info['deployment'] = client.get_deployments(restApiId=info['identity'])['items']
else:
for key in ('stages', 'resources', 'deployment'):
info[key] = []
info['api_keys'] = client.get_api_keys()['items']
info['domains'] = client.get_domain_names()['items']
for domain in info['domains']:
domain['mappings'] = client.get_base_path_mappings(domainName=domain['domainName']).get('items', []) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user_ip(self, request):
""" get the client IP address bassed on a HTTPRequest """ |
client_ip_address = None
# searching the IP address
for key in self.configuration.network.ip_meta_precedence_order:
ip_meta_value = request.META.get(key, '').strip()
if ip_meta_value != '':
ips = [ip.strip().lower() for ip in ip_meta_value.split(',')]
for ip_str in ips:
if ip_str and is_valid_ip(ip_str):
if not ip_str.startswith(self.configuration.network.non_public_ip_prefixes):
return ip_str
elif not self.configuration.network.real_ip_only:
loopback = ('127.0.0.1', '::1')
if client_ip_address is None:
client_ip_address = ip_str
elif client_ip_address in loopback and ip_str not in loopback:
client_ip_address = ip_str
if client_ip_address is None and settings.DEBUG:
raise DobermanImproperlyConfigured(
"Unknown IP, maybe you are working on localhost/development, "
"so please set in your setting: DOBERMAN_REAL_IP_ONLY=False"
)
return client_ip_address |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raw(self):
"""Try to transform str to raw str" """ |
escape_dict = {'\a': r'\a',
'\b': r'\b',
'\c': r'\c',
'\f': r'\f',
'\n': r'\n',
'\r': r'\r',
'\t': r'\t',
'\v': r'\v',
#'\x':r'\x',#cannot do \x - otherwise exception
'\'': r'\'',
'\"': r'\"',
#'\0':r'\0', #doesnt work
'\1': r'\1',
'\2': r'\2',
'\3': r'\3',
'\4': r'\4',
'\5': r'\5',
'\6': r'\6',
#'\7':r'\7',#same as \a is ASCI
}
new_string = ''
for char in self:
try:
new_string += escape_dict[char]
except KeyError:
new_string += char
return new_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, size):
"""open and read the file is existent""" |
if self.exists() and self.isfile():
return eval(open(self).read(size)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def files(self, ftype=None):
""" return a first of path to all files within that folder """ |
a = [self.join(i) for i in self]
if ftype is not None:
return [i for i in a if i.isfile() and i.filetype() == ftype]
return [i for i in a if i.isfile()] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mergeDictionaries(sourceDictionary, destinationDictionary):
""" Deep merge dictionaries recursively. :param sourceDictionary: <dict> first dictionary with data :param destinationDictionary: <dict> second dictionary with data :return: <dict> merged dictionary """ |
log = Logger()
varNamePattern = re.compile(r"^((__((ENV)|(FILE))__[A-Z]{3,})|(__((ENV)|(FILE))))__(?P<name>.*)$")
varTypePattern = re.compile(r"^__((ENV)|(FILE))__(?P<type>[A-Z]{3,})__(.*)$")
for key, value in sourceDictionary.items():
# ignoring comments
if key == "//":
continue
if isinstance(value, dict):
# get node or create one
node = destinationDictionary.setdefault(key, {})
Config.mergeDictionaries(value, node)
elif isinstance(value, str) and (value.startswith("__ENV__") or value.startswith("__FILE__")):
# extracting environment variable name
nameMatch = varNamePattern.match(value)
if nameMatch is None:
log.warn("Invalid environmental variable specified: {name}", name=value)
continue
envVariableName = nameMatch.group("name")
# checking if environment variable is set
if envVariableName not in os.environ:
log.warn("No environment variable {name} is set.", name=envVariableName)
continue
if value.startswith("__ENV__"): # checking if value is set in the environment variable
# checking if variable has a defined cast type
typeMatch = varTypePattern.match(value)
if typeMatch is not None:
envVariableType = typeMatch.group("type")
# casting value to the specified type
if envVariableType == "STR":
destinationDictionary[key] = str(os.environ[envVariableName])
elif envVariableType == "BOOL":
if os.environ[envVariableName] == "1":
destinationDictionary[key] = True
elif os.environ[envVariableName] == "0":
destinationDictionary[key] = False
elif envVariableType == "INT":
destinationDictionary[key] = int(os.environ[envVariableName])
elif envVariableType == "FLOAT":
destinationDictionary[key] = float(os.environ[envVariableName])
elif envVariableType == "JSON":
try:
destinationDictionary[key] = json.loads(os.environ[envVariableName])
except Exception:
log.warn(
"Environment variable {name} contains an invalid JSON value.",
name=envVariableName
)
else:
log.warn(
"Unsupported type {type} specified for variable {name}.",
name=envVariableName,
type=envVariableType
)
continue
else:
destinationDictionary[key] = os.environ[envVariableName]
elif value.startswith("__FILE__"): # checking if value is set in a file
filePath = os.environ[envVariableName]
# checking if file exists
if not os.path.isfile(filePath):
log.warn(
"File {filePath} does not exist.",
filePath=filePath,
)
continue
# checking if file can be read
if not os.access(filePath, os.R_OK):
log.warn(
"File {filePath} cannot be read.",
filePath=filePath,
)
continue
# load file contents
filePointer = open(filePath, "r")
destinationDictionary[key] = filePointer.read().strip()
filePointer.close()
elif isinstance(value, str) and value.startswith("__FILE__"):
pass
else:
destinationDictionary[key] = value
return destinationDictionary |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_to_path(p):
'''
Adds a path to python paths and removes it after the 'with' block ends
'''
old_path = sys.path
if p not in sys.path:
sys.path = sys.path[:]
sys.path.insert(0, p)
try:
yield
finally:
sys.path = old_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def setUpImports(self):
'''set import statements
'''
i = self.imports
print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD'
print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct'
module = self.getTypesModuleName()
package = self.getTypesModulePath()
if package:
module = '%s.%s' %(package, module)
print >>i, 'from %s import *' %(module)
print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write(self, fd=sys.stdout):
'''write out to file descriptor,
should not need to override.
'''
print >>fd, self.header.getvalue()
print >>fd, self.imports.getvalue()
print >>fd, '# Messages ',
for m in self.messages:
print >>fd, m
print >>fd, ''
print >>fd, ''
print >>fd, '# Service Skeletons'
for k,v in self._services.items():
print >>fd, v.classdef.getvalue()
print >>fd, v.initdef.getvalue()
for s in v.methods:
print >>fd, s.getvalue() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def fromWSDL(self, wsdl):
'''setup the service description from WSDL,
should not need to override.
'''
assert isinstance(wsdl, WSDLTools.WSDL), 'expecting WSDL instance'
if len(wsdl.services) == 0:
raise WsdlGeneratorError, 'No service defined'
self.reset()
self.wsdl = wsdl
self.setUpHeader()
self.setUpImports()
for service in wsdl.services:
sd = self._service_class(service.name)
self._services[service.name] = sd
for port in service.ports:
desc = BindingDescription(wsdl=wsdl)
try:
desc.setUp(port.getBinding())
except Wsdl2PythonError, ex:
continue
for soc in desc.operations:
if not soc.hasInput(): continue
self.messages.append(MessageWriter())
self.messages[-1].setUp(soc, port, input=True)
if soc.hasOutput():
self.messages.append(MessageWriter())
self.messages[-1].setUp(soc, port, input=False)
for e in port.extensions:
if isinstance(e, WSDLTools.SoapAddressBinding):
sd.location = e.location
self.setUpMethods(port)
self.setUpClassDef(service)
self.setUpInitDef(service) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def setUpClassDef(self, service):
'''use soapAction dict for WS-Action input, setup wsAction
dict for grabbing WS-Action output values.
'''
assert isinstance(service, WSDLTools.Service), \
'expecting WSDLTools.Service instance'
s = self._services[service.name].classdef
print >>s, 'class %s(%s):' %(self.getClassName(service.name), self.base_class_name)
print >>s, '%ssoapAction = {}' % self.getIndent(level=1)
print >>s, '%swsAction = {}' % self.getIndent(level=1)
print >>s, '%sroot = {}' % self.getIndent(level=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def createRepoObjects():
"""Imports each 'plugin' in this package and creates a repo file from it""" |
repositories = {}
repodir = os.path.join(getScriptLocation())
for importer, name, ispkg in pkgutil.iter_modules([repodir]):
module = importer.find_module(name).load_module(name)
repo_name = module.name
if module.enabled:
repositories[repo_name] = module.getRepository()
return repositories |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def credibleregions(self, probs):
""" Calculates the credible regions. """ |
return [brentq(lambda l: self.pdf[self.pdf > l].sum() - p, 0.0, 1.0) for p in probs] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode(data):
""" Handles decoding of the YAML `data`. Args: data (str):
Data which will be decoded. Returns: dict: Dictionary with decoded data. """ |
decoded = None
try:
decoded = yaml.load(data)
except Exception, e:
e = e.message if e.message else str(e)
raise MetaParsingException("Can't parse your YAML data: %s" % e)
decoded = validator.check_structure(decoded)
return decoded |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _override_sugar(func):
'''Use this decorator to override an attribute that is specified in
blessings' sugar dict with your own function that adds some additional
functionality.
'''
attr_name = func.__name__
@property
@wraps(func)
def func_that_uses_terminal_sugar(self):
func(self)
return self.__getattr__(attr_name)
return func_that_uses_terminal_sugar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def unbuffered_input(self):
'''Context manager for setting the terminal to use unbuffered input.
Normally, your terminal will collect together a user's input
keystrokes and deliver them to you in one neat parcel when they hit
the return/enter key. In a real-time interactive application we instead
want to receive each keystroke as it happens.
This context manager achieves that by setting 'cbreak' mode on the
the output tty stream. cbreak is a mode inbetween 'cooked mode', where
all the user's input is preprocessed, and 'raw mode' where none of it
is. Basically, in cbreak mode input like :kbd:`Control-c` will still
interrupt (i.e. 'break') the process, hence the name. Wikipedia is your
friend on this one!
:meth:`Root.run` uses this context manager for you to make your
application work in the correct way.
'''
if self.is_a_tty:
orig_tty_attrs = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
try:
yield
finally:
termios.tcsetattr(
self.stream, termios.TCSADRAIN, orig_tty_attrs)
else:
yield |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raise_for_error(self):
""" raise `ShCmdError` if the proc's return_code is not 0 otherwise return self ..Usage:: True """ |
if self.ok:
return self
tip = "running {0} @<{1}> error, return code {2}".format(
" ".join(self.cmd), self.cwd, self.return_code
)
logger.error("{0}\nstdout:{1}\nstderr:{2}\n".format(
tip, self._stdout.decode("utf8"), self._stderr.decode("utf8")
))
raise ShCmdError(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _stream(self):
"""execute subprocess with timeout Usage:: """ |
timer = None
try:
proc = subprocess.Popen(
self.cmd, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
timer = threading.Timer(
self.timeout,
kill_proc, [proc, self.cmd, time.time()]
)
timer.start()
yield proc
finally:
if timer is not None:
timer.cancel() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_lines(self, warn_only=False):
"""yields stdout text, line by line.""" |
remain = ""
for data in self.iter_content(LINE_CHUNK_SIZE, warn_only=True):
line_break_found = data[-1] in (b"\n", b"\r")
lines = data.decode(self.codec).splitlines()
lines[0] = remain + lines[0]
if not line_break_found:
remain = lines.pop()
for line in lines:
yield line
if remain:
yield remain
self._state = FINISHED
if not warn_only:
self.raise_for_error() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_content(self, chunk_size=1, warn_only=False):
""" yields stdout data, chunk by chunk :param chunk_size: size of each chunk (in bytes) """ |
self._state = "not finished"
if self.return_code is not None:
stdout = io.BytesIO(self._stdout)
data = stdout.read(chunk_size)
while data:
yield data
data = stdout.read(chunk_size)
else:
data = b''
started_at = time.time()
with self._stream() as proc:
while proc.poll() is None:
chunk = proc.stdout.read(chunk_size)
if not chunk:
continue
yield chunk
data += chunk
if proc.returncode == -9:
elapsed = time.time() - started_at
self._state = "timeouted"
raise subprocess.TimeoutExpired(proc.args, elapsed)
chunk = proc.stdout.read(chunk_size)
while chunk:
yield chunk
data += chunk
chunk = proc.stdout.read(chunk_size)
self._return_code = proc.returncode
self._stderr = proc.stderr.read()
self._stdout = data
self._state = FINISHED
if not warn_only:
self.raise_for_error() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def block(self, warn_only=False):
"""blocked executation.""" |
self._state = "not finished"
if self._return_code is None:
proc = subprocess.Popen(
self.cmd, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self._stdout, self._stderr = proc.communicate(timeout=self.timeout)
self._return_code = proc.returncode
self._state = FINISHED
if not warn_only:
self.raise_for_error() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def metar(trans: MetarTrans) -> str: """ Condense the translation strings into a single report summary string """ |
summary = []
if trans.wind:
summary.append('Winds ' + trans.wind)
if trans.visibility:
summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower())
if trans.temperature:
summary.append('Temp ' + trans.temperature[:trans.temperature.find(' (')])
if trans.dewpoint:
summary.append('Dew ' + trans.dewpoint[:trans.dewpoint.find(' (')])
if trans.altimeter:
summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')])
if trans.other:
summary.append(trans.other)
if trans.clouds:
summary.append(trans.clouds.replace(' - Reported AGL', ''))
return ', '.join(summary) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def taf(trans: TafLineTrans) -> str: """ Condense the translation strings into a single forecast summary string """ |
summary = []
if trans.wind:
summary.append('Winds ' + trans.wind)
if trans.visibility:
summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower())
if trans.altimeter:
summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')])
if trans.other:
summary.append(trans.other)
if trans.clouds:
summary.append(trans.clouds.replace(' - Reported AGL', ''))
if trans.wind_shear:
summary.append(trans.wind_shear)
if trans.turbulance:
summary.append(trans.turbulance)
if trans.icing:
summary.append(trans.icing)
return ', '.join(summary) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(request, syntax_processor_name=None, var_name="text"):
""" Returns rendered HTML for source text """ |
if request.method != 'POST':
return HttpResponseNotAllowed("Only POST allowed")
source = request.POST.get(var_name)
if not source:
return HttpResponse('')
processor = TextProcessor.objects.get(name=syntax_processor_name or getattr(settings, "DEFAULT_MARKUP", "markdown"))
output = processor.convert(source)
try:
t = template.Template(output, name='markup_preview')
output = t.render(template.Context({'MEDIA_URL' : settings.MEDIA_URL}))
except template.TemplateSyntaxError, e:
log.warning('Error in preview rendering: %s' % e)
output = '<h3 style="color:red">%s</h3><p>%s</p>' % (ugettext('You have an errors in source text!'), e)
return HttpResponse(output) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rtype_to_model(rtype):
""" Return a model class object given a string resource type :param rtype: string resource type :return: model class object :raise: ValueError """ |
models = goldman.config.MODELS
for model in models:
if rtype.lower() == model.RTYPE.lower():
return model
raise ValueError('%s resource type not registered' % rtype) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spit(path, txt, encoding='UTF-8', append=False):
""" Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string """ |
mode = 'a' if append else 'w'
with io.open(path, mode, encoding=encoding) as f:
f.write(txt)
return txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def slurp(path, encoding='UTF-8'):
""" Reads file `path` and returns the entire contents as a unicode string By default assumes the file is encoded as UTF-8 Parameters path : str File path to file on disk encoding : str, default `UTF-8`, optional Encoding of the file Returns ------- The txt read from the file as a unicode string """ |
with io.open(path, 'r', encoding=encoding) as f:
return f.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_url(url, destination):
""" Download an external URL to the destination """ |
from settings import VALID_IMAGE_EXTENSIONS
base_name, ext = os.path.splitext(url)
ext = ext.lstrip('.')
if ext not in VALID_IMAGE_EXTENSIONS:
raise Exception("Invalid image extension")
base_path, filename = os.path.split(destination)
os.makedirs(base_path)
urllib.urlretrieve(url, destination) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_securityhash(action_tuples):
""" Create a SHA1 hash based on the KEY and action string """ |
from settings import SECRET_KEY
action_string = "".join(["_%s%s" % a for a in action_tuples])
security_hash = sha1(action_string + SECRET_KEY).hexdigest()
return security_hash |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_request_path(requested_uri):
""" Check for any aliases and alter the path accordingly. Returns resolved_uri """ |
from settings import PATH_ALIASES
for key, val in PATH_ALIASES.items():
if re.match(key, requested_uri):
return re.sub(key, val, requested_uri)
return requested_uri |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cached_files(url, server_name="", document_root=None):
""" Given a URL, return a list of paths of all cached variations of that file. Doesn't include the original file. """ |
import glob
url_info = process_url(url, server_name, document_root, check_security=False)
# get path to cache directory with basename of file (no extension)
filedir = os.path.dirname(url_info['requested_file'])
fileglob = '{0}*{1}'.format(url_info['base_filename'], url_info['ext'])
return glob.glob(os.path.join(filedir, fileglob)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Sets up the live server and databases, and then loops over handling http requests. """ |
server_address = (self.host, self.port)
threading = True
if threading:
httpd_cls = type('WSGIServer', (ThreadingMixIn, WSGIServer), {})
else:
httpd_cls = WSGIServer
self.httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=False)
wsgi_handler = get_internal_wsgi_application()
self.httpd.set_app(wsgi_handler)
self.is_ready.set()
self.httpd.serve_forever() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visit(self, node):
"""Rewrite original method to use lower-case method, and not "generic" function.""" |
try:
# Get the "visit_%s" method, using the lower case version of the rule.
method = getattr(self, 'visit_%s' % node.expr_name.lower())
except AttributeError:
# If the method is not defined, we do nothing for this node.
return
# Below is untouched code from the original ``visit`` method.
# Call that method, and show where in the tree it failed if it blows up.
try:
return method(node, [self.visit(n) for n in node])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception:
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, trace = sys.exc_info()
raise VisitationError(exc, exc_class, node).with_traceback(trace) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visit_str(self, node, _):
"""Regex rule for quoted string allowing escaped quotes inside. Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ str The wanted string, with quoted characters unquoted. Example ------- 'foo' 'foo' "foo b'ar" "foo b'ar" "foo b'ar" Notes ----- The regex works this way: Two quotes (single or double, the starting one and the ending one should be the same) surrounding zero or more of "any character that's not a quote (same as the starting/ending ones) or a backslash" or "a backslash followed by any character". """ |
# remove surrounding quotes and remove single backslashes
return self.visit_str.re_single_backslash.sub('', node.text[1:-1]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_chunk_path_from_string(string, chunk=3):
"""Return a chunked path from string.""" |
return os.path.join(
*list(generate_chunks(
string,
chunk
))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next(self):
""" Return all files in folder. """ |
# get depth of starting root directory
base_depth = self.__root.count(os.path.sep)
# walk files and folders
for root, subFolders, files in os.walk(self.__root):
# apply folder filter
if not self.filter_folder(root):
continue
# make sure we don't pass depth limit
if self.__depth_limit is not None:
curr_depth = root.count(os.path.sep)
if curr_depth - base_depth > self.__depth_limit:
continue
# if need to return folders return it
if self.__ret_folders:
yield root
# return files
if self.__ret_files:
for f in files:
yield os.path.join(root, f)
# end iterator
raise StopIteration |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ticks(
cls, request,
length: (Ptypes.path, Integer('Duration of the stream, in seconds.')),
style: (Ptypes.path, String('Tick style.', enum=['compact', 'extended']))
) -> [
(200, 'Ok', TickStream),
(400, 'Invalid parameters')
]:
'''A streaming Lord Vetinari clock...'''
try:
length = int(length)
style = cls._styles[style]
except (ValueError, KeyError):
Respond(400)
def vetinari_clock():
start = time()
while time() - start <= length:
sleep(randint(25, 400) / 100)
yield strftime(style, localtime())
Respond(200, vetinari_clock()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_blocks_to_string(self):
""" New method, only in MegaDatasetBlock class. :return: flattened data blocks as string """ |
taxa_ids = [[]] * int(self.data.number_taxa)
sequences = [''] * int(self.data.number_taxa)
for block in self._blocks:
for index, seq_record in enumerate(block):
taxa_ids[index] = '{0}_{1}_{2}'.format(seq_record.voucher_code,
seq_record.taxonomy['genus'],
seq_record.taxonomy['species'],
)
sequence = get_seq(seq_record, self.codon_positions,
aminoacids=self.aminoacids,
degenerate=self.degenerate)
sequences[index] += sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out = ''
for index, value in enumerate(taxa_ids):
out += '#{0}\n{1}\n'.format(taxa_ids[index], sequences[index])
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(station: str, txt: str) -> (MetarData, Units):
# type: ignore """ Returns MetarData and Units dataclasses with parsed data and their associated units """ |
core.valid_station(station)
return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_na(txt: str) -> (MetarData, Units):
# type: ignore """ Parser for the North American METAR variant """ |
units = Units(**NA_UNITS) # type: ignore
clean = core.sanitize_report_string(txt)
wxresp = {'raw': txt, 'sanitized': clean}
wxdata, wxresp['remarks'] = core.get_remarks(clean)
wxdata, wxresp['runway_visibility'], _ = core.sanitize_report_list(wxdata)
wxdata, wxresp['station'], wxresp['time'] = core.get_station_and_time(wxdata)
wxdata, wxresp['clouds'] = core.get_clouds(wxdata)
wxdata, wxresp['wind_direction'], wxresp['wind_speed'], \
wxresp['wind_gust'], wxresp['wind_variable_direction'] = core.get_wind(wxdata, units)
wxdata, wxresp['altimeter'] = core.get_altimeter(wxdata, units, 'NA')
wxdata, wxresp['visibility'] = core.get_visibility(wxdata, units)
wxresp['other'], wxresp['temperature'], wxresp['dewpoint'] = core.get_temp_and_dew(wxdata)
condition = core.get_flight_rules(wxresp['visibility'], core.get_ceiling(wxresp['clouds'])) # type: ignore
wxresp['flight_rules'] = FLIGHT_RULES[condition]
wxresp['remarks_info'] = remarks.parse(wxresp['remarks']) # type: ignore
wxresp['time'] = core.make_timestamp(wxresp['time']) # type: ignore
return MetarData(**wxresp), units |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def with_mfa(self, mfa_token):
"""Set the MFA token for the next request. `mfa_token`s are only good for one request. Use this method to chain into the protected action you want to perform. Note: Only useful for Application authentication. Usage: Args: mfa_token (str/function, optional):
TOTP token for the Application OR a callable/function which will generate such a token when called. Returns: self """ |
if hasattr(mfa_token, '__call__'): # callable() is unsupported by 3.1 and 3.2
self.context.mfa_token = mfa_token.__call__()
else:
self.context.mfa_token = mfa_token
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolved_row(objs, geomatcher):
"""Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``. Will overwrite any existing ``RoW``. On exiting the context manager, ``RoW`` is deleted.""" |
def get_locations(lst):
for elem in lst:
try:
yield elem['location']
except TypeError:
yield elem
geomatcher['RoW'] = geomatcher.faces.difference(
reduce(
set.union,
[geomatcher[obj] for obj in get_locations(objs)]
)
)
yield geomatcher
del geomatcher['RoW'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _actual_key(self, key):
"""Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``.""" |
if key in self or key in ("RoW", "GLO"):
return key
elif (self.default_namespace, key) in self:
return (self.default_namespace, key)
if isinstance(key, str) and self.coco:
new = coco.convert(names=[key], to='ISO2', not_found=None)
if new in self:
if new not in self.__seen:
self.__seen.add(key)
print("Geomatcher: Used '{}' for '{}'".format(new, key))
return new
raise KeyError("Can't find this location") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first):
"""Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.""" |
key = self._actual_key(key)
locations = [x[0] for x in lst]
if not include_self and key in locations:
lst.pop(locations.index(key))
lst.sort(key=lambda x: x[1], reverse=biggest_first)
lst = [x for x, y in lst]
# RoW in both key and lst, but not defined; only RoW remains if exclusive
if key == 'RoW' and 'RoW' not in self and exclusive:
return ['RoW'] if 'RoW' in lst else []
elif exclusive:
removed, remaining = set(), []
while lst:
current = lst.pop(0)
faces = self[current]
if not faces.intersection(removed):
removed.update(faces)
remaining.append(current)
lst = remaining
# If RoW not resolved, make it the smallest
if 'RoW' not in self and 'RoW' in lst:
lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW'))
return lst |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None):
"""Get all locations that intersect this location. Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing. """ |
possibles = self.topology if only is None else {k: self[k] for k in only}
if key == 'RoW' and 'RoW' not in self:
return ['RoW'] if 'RoW' in possibles else []
faces = self[key]
lst = [
(k, (len(v.intersection(faces)), len(v)))
for k, v in possibles.items()
if (faces.intersection(v))
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that are completely within this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing. """ |
if 'RoW' not in self:
if key == 'RoW':
return ['RoW'] if 'RoW' in (only or []) else []
elif only and 'RoW' in only:
only.pop(only.index('RoW'))
possibles = self.topology if only is None else {k: self[k] for k in only}
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if v and faces.issuperset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
"""Get all locations that completely contain this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``. """ |
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_face(self, face, number=None, ids=None):
"""Split a topological face into a number of small faces. * ``face``: The face to split. Must be in the topology. * ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces. * ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored. Returns the new face ids. """ |
assert face in self.faces
if ids:
ids = set(ids)
else:
max_int = max(x for x in self.faces if isinstance(x, int))
ids = set(range(max_int + 1, max_int + 1 + (number or 2)))
for obj in self.topology.values():
if face in obj:
obj.discard(face)
obj.update(ids)
self.faces.discard(face)
self.faces.update(ids)
return ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_definitions(self, data, namespace, relative=True):
"""Add new topological definitions to ``self.topology``. If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE: .. code-block:: python {"Russia Region": [ "AM", "AZ", "GE", "RU" ]} Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets: .. code-block:: python { 'A': {1, 2, 3}, 'B': {2, 3, 4}, } """ |
if not relative:
self.topology.update({(namespace, k): v for k, v in data.items()})
self.faces.update(set.union(*data.values()))
else:
self.topology.update({
(namespace, k): set.union(*[self[o] for o in v])
for k, v in data.items()
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_module(self, name):
"""Test that `name` is a module name""" |
if self.module_prefix.startswith(self.mount_prefix):
return name.startswith(self.module_prefix)
return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_mount(self, name):
"""Test that `name` is a mount name""" |
if self.mount_prefix.startswith(self.module_prefix):
return name.startswith(self.mount_prefix)
return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name2mount(self, name):
"""Convert a module name to a mount name""" |
if not self.is_module(name):
raise ValueError('%r is not a supported module name' % (name, ))
return name.replace(self.module_prefix, self.mount_prefix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mount2name(self, mount):
"""Convert a mount name to a module name""" |
if not self.is_mount(mount):
raise ValueError('%r is not a supported mount name' % (mount,))
return mount.replace(self.mount_prefix, self.module_prefix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def static_absolute_tag(context, path):
""" Return the absolute URL of a static file. Usage: ``{% %}`` """ |
request = context.get("request")
return urljoin(request.ABSOLUTE_ROOT, static_url(path)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.