text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_command_line(self, argv=None):
"""Parse the jhubctl command line arguments. This overwrites traitlets' default `parse_command_line` method and tailors it to jhubctl's needs. """ |
argv = sys.argv[1:] if argv is None else argv
self.argv = [py3compat.cast_unicode(arg) for arg in argv]
# Append Provider Class to the list of configurable items.
ProviderClass = getattr(providers, self.provider_type)
self.classes.append(ProviderClass)
if any(x in self.argv for x in ('-h', '--help-all', '--help')):
self.print_help('--help-all' in self.argv)
self.exit(0)
if '--version' in self.argv or '-V' in self.argv:
self.print_version()
self.exit(0)
# Generate a configuration file if flag is given.
if '--generate-config' in self.argv:
conf = self.generate_config_file()
with open(self.config_file, 'w') as f:
f.write(conf)
self.exit(0)
# If not config, parse commands.
## Run sanity checks.
# Check that the minimum number of arguments have been called.
if len(self.argv) < 2:
raise JhubctlError(
"Not enough arguments. \n\n"
"Expected: jhubctl <action> <resource> <name>")
# Check action
self.resource_action = self.argv[0]
if self.resource_action not in self.subcommands:
raise JhubctlError(
f"Subcommand is not recognized; must be one of these: {self.subcommands}")
# Check resource
self.resource_type = self.argv[1]
if self.resource_type not in self.resources:
raise JhubctlError(
f"First argument after a subcommand must one of these"
f"resources: {self.resources}"
)
# Get name of resource.
try:
self.resource_name = self.argv[2]
except IndexError:
if self.resource_action != "get":
raise JhubctlError(
"Not enough arguments. \n\n"
"Expected: jhubctl <action> <resource> <name>")
else:
self.resource_name = None
# flatten flags&aliases, so cl-args get appropriate priority:
flags, aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags, log=self.log)
config = loader.load_config()
self.update_config(config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize(self, argv=None):
"""Handle specific configurations.""" |
# Parse configuration items on command line.
self.parse_command_line(argv)
if self.config_file:
self.load_config_file(self.config_file)
# Initialize objects to interact with.
self.kubeconf = KubeConf()
self.cluster_list = ClusterList(kubeconf=self.kubeconf)
self.hub_list = HubList(kubeconf=self.kubeconf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Execution happening on jhubctl.""" |
# Get specified resource.
resource_list = getattr(self, f'{self.resource_type}_list')
resource_action = getattr(resource_list, self.resource_action)
resource_action(self.resource_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_children(self, children):
"""Adds new children nodes after filtering for duplicates Args: children (list):
list of OmniTree nodes to add as children """ |
self._children += [c for c in children if c not in self._children] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_parents(self, parents):
"""Adds new parent nodes after filtering for duplicates Args: parents (list):
list of OmniTree nodes to add as parents """ |
self._parents += [p for p in parents if p not in self._parents] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_loops(self, _path=None):
"""Crappy function that finds a single loop in the tree""" |
if _path is None:
_path = []
if self in _path:
return _path + [self]
elif self._children == []:
return None
else:
for child in self._children:
return child.find_loops(_path + [self]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_branches(self, labels=False, unique=False):
"""Recursively constructs a list of pointers of the tree's structure Args: labels (bool):
If True, returned lists consist of node labels. If False (default), lists consist of node pointers. This option is mostly intended for debugging purposes. unique (bool):
If True, return lists of all unique, linear branches of the tree. More accurately, it returns a list of lists where each list contains a single, unique, linear path from the calling node to the tree's leaf nodes. If False (default), a highly-nested list is returned where each nested list represents a branch point in the tree. See Examples for more. Examples: ['a', ['b', ['c', ['e']], ['d', ['e']]]] [['a', 'b', 'c', 'e'], ['a', 'b', 'd', 'e']] """ |
branches = []
# Assign proper item, pointer or label, to return
if labels is True:
identifier = [self.label]
else:
identifier = [self]
if self._children == []: # Base Case: current node is a leaf/end node
return identifier
else: # Recursive Case: all other nodes
for child in self._children:
if unique is True:
for branch in child.find_branches(labels=labels,
unique=True):
# I don't know why this 'if' is necessary, but it is
if type(branch) is not list:
branch = list(branch)
branches.append(identifier + branch)
else:
branches.append(child.find_branches(labels=labels))
# Proper construction of list depends on 'unique'
if unique is True:
return branches
else:
return identifier + branches |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install(self):
""" Installation procedure, it writes basic smb.conf and uses samba-tool to provision the domain """ |
domain_settings = DomainSettings.get()
with root():
if os.path.exists(self.SMBCONF_FILE):
os.remove(self.SMBCONF_FILE)
if domain_settings.mode == 'ad':
domain_settings.adminpass = make_password(15)
domain_settings.save()
run("samba-tool domain provision "
"--domain='%s' "
"--workgroup='%s' "
"--realm='%s' "
"--use-xattrs=yes "
"--use-rfc2307 "
"--server-role='domain controller' "
"--use-ntvfs "
"--adminpass='%s'" %
(domain_settings.domain,
domain_settings.workgroup,
domain_settings.realm,
domain_settings.adminpass))
self.smbconf.write()
shutil.copy2(self.SMB_KRB5CONF_FILE, self.KRB5CONF_FILE)
# XXX FIXME move this to network
run("echo 'nameserver 127.0.0.1' > /etc/resolv.conf")
# TODO manage shares
run("touch /etc/samba/shares.conf")
elif domain_settings.mode == 'member':
# TODO
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop_other_daemons(self):
""" Stop services already provided by main samba daemon """ |
if self.smbd.running:
self.smbd.stop()
if self.nmbd.running:
self.nmbd.stop() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(cls):
""" Verifies that all necessary values for the package to be used have been provided :raises: `elib_config._exc.IncompleteSetupError` """ |
attribs = [
'app_version',
'app_name',
'config_file_path',
'config_sep_str',
]
for attrib in attribs:
if getattr(cls, attrib) == 'not_set':
raise IncompleteSetupError(f'elib_config setup is incomplete; missing: {attrib}') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup( cls, app_version: str, app_name: str, config_file_path: str, config_sep_str: str, root_path: typing.Optional[typing.List[str]] = None, ):
""" Configures elib_config in one fell swoop :param app_version: version of the application :param app_name:name of the application :param config_file_path: path to the config file to use :param config_sep_str: separator for config values paths :param root_path: list of strings that will be pre-pended to *all* config values paths (useful to setup a prefix for the whole app) """ |
cls.app_version = app_version
cls.app_name = app_name
cls.config_file_path = config_file_path
cls.config_sep_str = config_sep_str
cls.root_path = root_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def sortBy(self, *args):
'''sort results by given criteria'''
criteria = ['seeds', 'size']
for k in args:
if k in criteria:
self.results = sorted(
self.results, key=attrgetter(k), reverse=True)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def filterBy(self, **kwargs):
''' filter results by given criteria'''
criteria = ['seeds', 'size']
for k, v in kwargs.iteritems():
if k in criteria:
self.results = filter(
lambda x: getattr(x, k) >= v, self.results)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def perms_check(perms, prefix, ambiguous=False):
""" Return the user's perms for the specified prefix perms <dict> permissions dict prefix <string> namespace to check for perms ambiguous <bool=False> if True reverse wildcard matching is active and a perm check for a.b.* will be matched by the user having perms to a.b.c or a.b.d - only use this if you know what you are doing. """ |
try:
token = prefix.split(".")
i = 1
l = len(token)
r = 0
# collect permission rules with a wildcard in them, so we dont do unecessary
# regex searches later on
perms_wc = {}
for ns, p in perms.items():
if ns.find("*") > -1:
perms_wc[re.escape(ns).replace("\*", "[^\.]+")] = p
while i <= l:
k = ".".join(token[:i])
matched = False
# check for exact match
if perms.has_key(k):
r = perms.get(k)
# check for wildcard matches (if any wildcard rules exist)
elif perms_wc:
for ns, p in perms_wc.items():
a = "^%s$" % ns
b = "^%s\." % ns
j = len(a)
u = len(b)
if j > matched and re.match(a, k):
r = p
matched = j
elif u > matched and re.match(b, k):
r = p
matched = u
# if not matched at all and ambiguous flag is true, do ambiguous matching
if not matched and ambiguous:
m = "^%s" % re.escape(k).replace("\*", "[^\.]+")
for ns, p in perms.items():
if re.match(m, ns) and p > r:
r = p
break
i += 1
return r
except:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def discover_config_path(self, config_filename: str) -> str: """ Search for config file in a number of places. If there is no config file found, will return None. :param config_filename: Config file name or custom path to filename with config. :return: Path to the discovered config file or None. """ |
if config_filename and os.path.isfile(config_filename):
return config_filename
for place in _common_places:
config_path = os.path.join(place, config_filename)
if os.path.isfile(config_path):
return config_path
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_from_yaml_config(self, config_path: str) -> Iterator: """ Convention is to uppercase first level keys. :param config_path: Valid path to the yml config file. :return: Config loaded from yml file """ |
if not config_path:
return {}
with open(config_path, 'r') as f:
yaml_config = yaml.load(f)
gen = map(lambda x: (x[0].upper(), x[1]), yaml_config.items())
return gen |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_handler_if_not_configured(self, dict_config, requested_handlers, handler_name, check_key) -> None: """ Remove ``handler_name`` from ``dict_config`` and ``requested_handlers`` if ``check_key`` is empty. """ |
try:
if not dict_config["handlers"][handler_name][check_key]:
dict_config["handlers"].pop(handler_name)
if handler_name in requested_handlers:
requested_handlers.remove(handler_name)
except KeyError:
# Ignore key errors
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open(self, name, attrs=None, *, close=False):
"""Writes an opening element. :name: the name of the element :attrs: a dict of attributes :close: if True, close will be called immediately after writing the element """ |
self._pad()
self._writer.startElement(_normalize_name(name), _normalize_attrs(attrs))
self._newline()
self._open_elements.append(name)
if close:
self.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self, name=None):
"""Closes the most recently opened element. :name: if given, this value must match the name given for the most recently opened element. This is primarily here for providing quick error checking for applications """ |
tag = self._open_elements.pop()
if name is not None and name != tag:
raise Exception("Tag closing mismatch")
self._pad()
self._writer.endElement(_normalize_name(tag))
self._newline() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def characters(self, characters):
"""Writes content for a tag. :characters: the characters to write """ |
self._pad()
self._writer.characters(str(characters))
self._newline() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def element(self, name, attrs=None):
"""This method is a context manager for writing and closing an element.""" |
self.open(name, attrs)
yield
self.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def no_inner_space(self, *, outer=True):
"""Default spacing for all things written is ignored in this context. :outer: boolean, if True the typical padding and newline are added before the first and after the last things written """ |
if outer:
self._pad()
indent_was = self._indent
self._indent = None
try:
yield
finally:
self._indent = indent_was
if outer:
self._newline() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def content(self, name, attrs=None, characters=None):
"""Writes an element, some content for the element, and then closes the element, all without indentation. :name: the name of the element :attrs: a dict of attributes :characters: the characters to write """ |
with self.no_inner_space(outer=True):
with self.element(name, attrs):
if characters:
self.characters(characters) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pad(self):
"""Pads the output with an amount of indentation appropriate for the number of open element. This method does nothing if the indent value passed to the constructor is falsy. """ |
if self._indent:
self.whitespace(self._indent * len(self._open_elements)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dump_order(self, names):
""" Return ordered dump names required for a given dump names list """ |
finded_names = self.get_dump_names(names)
return [item for item in self if item in finded_names] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def log_web_failure(err, err_lid=''):
'Try to print meaningful info about wrapped twisted.web exceptions.'
if err_lid and not err_lid.endswith(' '): err_lid += ' '
try: err.value.reasons # multiple levels of fail
except AttributeError: pass
else: err = err.value
if hasattr(err, 'reasons'):
for err in err.reasons:
lid = ' '
if isinstance(err, Failure):
log.error('{}{} {}: {}'.format(err_lid, lid, err.type, err.getErrorMessage()))
for line in err.getTraceback().splitlines():
log.error('{}{} {}'.format(err_lid, lid, line))
else: log.error('{}{} {}: {}'.format(err_lid, lid, type(err), err)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def force_bytes(bytes_or_unicode, encoding='utf-8', errors='backslashreplace'):
'Convert passed string type to bytes, if necessary.'
if isinstance(bytes_or_unicode, bytes): return bytes_or_unicode
return bytes_or_unicode.encode(encoding, errors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def volume_info(self, vol=None, type_filter=None):
'Get list of all volumes or info for the one specified.'
vols = yield self(join('volumes', vol))
if not isinstance(vols, list): vols = [vols]
if type_filter is not None:
vols = list(vol for vol in vols if vol['type'] == type_filter)
if vol is not None: defer.returnValue(vols[0] if vols else None)
defer.returnValue(vols) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def node_mkdir(self, path=''):
'Does not raise any errors if dir already exists.'
return self(path, data=dict(kind='directory'), encode='json', method='put') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_event(data, attendees=None, photos=None):
""" Parse a ``MeetupEvent`` from the given response data. Returns ------- A ``pythonkc_meetups.types.MeetupEvent``. """ |
return MeetupEvent(
id=data.get('id', None),
name=data.get('name', None),
description=data.get('description', None),
time=parse_datetime(data.get('time', None),
data.get('utc_offset', None)),
status=data.get('status', None),
yes_rsvp_count=data.get('yes_rsvp_count', None),
maybe_rsvp_count=data.get('maybe_rsvp_count', None),
event_url=data.get('event_url', None),
photo_url=data.get('photo_url', None),
venue=parse_venue(data['venue']) if 'venue' in data else None,
attendees=attendees,
photos=photos
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_venue(data):
""" Parse a ``MeetupVenue`` from the given response data. Returns ------- A `pythonkc_meetups.types.`MeetupVenue``. """ |
return MeetupVenue(
id=data.get('id', None),
name=data.get('name', None),
address_1=data.get('address_1', None),
address_2=data.get('address_2', None),
address_3=data.get('address_3', None),
city=data.get('city', None),
state=data.get('state', None),
zip=data.get('zip', None),
country=data.get('country', None),
lat=data.get('lat', None),
lon=data.get('lon', None)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_member_from_rsvp(data):
""" Parse a ``MeetupMember`` from the given RSVP response data. Returns ------- A ``pythonkc_meetups.types.MeetupMember``. """ |
return MeetupMember(
id=data['member'].get('member_id', None),
name=data['member'].get('name', None),
photo=(parse_photo(data['member_photo'])
if 'member_photo' in data else None)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_photo(data):
""" Parse a ``MeetupPhoto`` from the given response data. Returns ------- A `pythonkc_meetups.types.`MeetupPhoto``. """ |
return MeetupPhoto(
id=data.get('photo_id', data.get('id', None)),
url=data.get('photo_link', None),
highres_url=data.get('highres_link', None),
thumb_url=data.get('thumb_link', None)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _mysql_isval(val):
""" These types should either be ignored or have already been inserted into the SQL directly and dont need sqlalchemy to do it for us. """ |
if _mysql_is_list(val):
return False
elif isinstance(val, mysql_col):
return False
elif val in [None, mysql_now, mysql_ignore]:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_registry_from_json(mongo, filename, clear_collection=False):
"""Initialize a model registry with a list of model definitions that are stored in a given file in Json format. Parameters mongo : scodata.MongoDBFactory Connector for MongoDB filename : string Path to file containing model definitions clear_collection : boolean If true, collection will be dropped before models are created """ |
# Read model definition file (JSON)
with open(filename, 'r') as f:
models = json.load(f)
init_registry(mongo, models, clear_collection) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_json(self, document):
"""Create a model database object from a given Json document. Parameters document : JSON Json representation of the object Returns ModelHandle """ |
# The timestamp is optional (e.g., in cases where model definitions are
# loaded from file).
if 'timestamp' in document:
timestamp = datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
)
else:
timestamp = None
# Create handle for database object
return ModelHandle(
document['_id'],
document['properties'],
[AttributeDefinition.from_json(el) for el in document['parameters']],
ModelOutputs.from_json(document['outputs']),
timestamp=timestamp
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and offset for pagination. Parameters limit : int Limit number of models in the result set offset : int Set offset in list (order as defined by object store) Returns ------- ObjectListing """ |
return self.list_objects(limit=limit, offset=offset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_json(self, model):
"""Create a Json-like object for a model. Parameters model : ModelHandle Returns ------- dict Json-like object representation """ |
# Get the basic Json object from the super class
obj = super(DefaultModelRegistry, self).to_json(model)
# Add model parameter
obj['parameters'] = [
para.to_json() for para in model.parameters
]
obj['outputs'] = model.outputs.to_json()
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def end_anonymous_session_view(request):
''' End the anonymous session if the user is a superuser. '''
request.session['ANONYMOUS_SESSION'] = False
messages.add_message(request, messages.INFO, MESSAGES['ANONYMOUS_SESSION_ENDED'])
return HttpResponseRedirect(reverse('utilities')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def list_managers_view(request):
''' Show a list of manager positions with links to view in detail. '''
managerset = Manager.objects.filter(active=True)
return render_to_response('list_managers.html', {
'page_name': "Managers",
'managerset': managerset,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def meta_manager_view(request):
'''
A manager of managers. Display a list of current managers, with links to modify them.
Also display a link to add a new manager. Restricted to presidents and superadmins.
'''
managers = Manager.objects.all()
return render_to_response('meta_manager.html', {
'page_name': "Admin - Meta-Manager",
'managerset': managers,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_manager_view(request):
''' View to add a new manager position. Restricted to superadmins and presidents. '''
form = ManagerForm(request.POST or None)
if form.is_valid():
manager = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['MANAGER_ADDED'].format(managerTitle=manager.title))
return HttpResponseRedirect(reverse('managers:add_manager'))
return render_to_response('edit_manager.html', {
'page_name': "Admin - Add Manager",
'managerset': Manager.objects.all(),
'form': form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def manage_request_types_view(request):
''' Manage requests. Display a list of request types with links to edit them.
Also display a link to add a new request type. Restricted to presidents and superadmins.
'''
request_types = RequestType.objects.all()
return render_to_response('manage_request_types.html', {
'page_name': "Admin - Manage Request Types",
'request_types': request_types
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_request_type_view(request):
''' View to add a new request type. Restricted to presidents and superadmins. '''
form = RequestTypeForm(request.POST or None)
if form.is_valid():
rtype = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['REQUEST_TYPE_ADDED'].format(typeName=rtype.name))
return HttpResponseRedirect(reverse('managers:manage_request_types'))
return render_to_response('edit_request_type.html', {
'page_name': "Admin - Add Request Type",
'request_types': RequestType.objects.all(),
'form': form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def all_requests_view(request):
'''
Show user a list of enabled request types, the number of requests of each
type and a link to see them all.
'''
# Pseudo-dictionary, actually a list with items of form
# (request_type.name.title(), number_of_type_requests, name, enabled,
# glyphicon)
types_dict = list()
for request_type in RequestType.objects.all():
requests = Request.objects.filter(request_type=request_type)
# Hide the count for private requests
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user), private=True,
)
number_of_requests = requests.count()
types_dict.append((
request_type.name.title(), number_of_requests,
request_type.url_name, request_type.enabled,
request_type.glyphicon,
))
return render_to_response('all_requests.html', {
'page_name': "Archives - All Requests",
'types_dict': types_dict,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def list_all_requests_view(request, requestType):
'''
Show all the requests for a given type in list form.
'''
request_type = get_object_or_404(RequestType, url_name=requestType)
requests = Request.objects.filter(request_type=request_type)
# Hide the count for private requests
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user), private=True,
)
page_name = "Archives - All {0} Requests".format(request_type.name.title())
return render_to_response('list_requests.html', {
'page_name': page_name,
'requests': requests,
'request_type': request_type,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def installed():
""" Returns whether or not Google Chrome is installed Determines the application data path for Google Chrome and checks if the path exists. If so, returns True, otherwise it will return False. Returns bool - True if Chrome is installed """ |
try:
path = ChromeCookies._getPath()
with open(path) as f: pass
return True
except Exception as e:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getPath():
""" Returns Chrome's cookie database path Returns str - Google Chrome's cookie database path """ |
if os.name == "posix":
path = os.getenv("HOME") + "/.config/google-chrome/Default/Cookies"
return path
import _winreg
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')
path = _winreg.QueryValueEx(key, 'Local AppData')[0]
path = os.path.join(path, 'Google\\Chrome\\User Data\\Default\\Cookies')
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deprecated_function(func_, replacement="(see docs)", *args, **kwargs):
""" decorator to annotate deprecated functions usage @decorator(replacement="brain.whatever.new_function") :param func_: <callable> :param replacement: <str> :param args: positional arguments :param kwargs: :return: <func_'s return value> """ |
msg = "{} is deprecated, use {}\n".format(func_.__name__,
replacement)
stderr.write(msg)
return func_(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(cls, html_file):
"""Writes the HTML report to the given file.""" |
f = open(html_file, 'w')
f.write('<html>')
f.write('<head>')
f.write('</head>')
f.write('<body>')
f.write('<h1>Test times</h1>')
fmt_test = '<tr><td>{:.05f}</td><td>{}</td></tr><tr><td> </td><td>{}</td></tr>'
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in TestTime.get_slowest_tests(10):
f.write(fmt_test.format(row['elapsed'], row['file'], '{}.{}.{}'.format(row['module'], row['class'], row['func'])))
f.write('</table>')
fmt_file = '<tr><td>{:.05f}</td><td>{}</td></tr>'
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in TestTime.get_slowest_files(10):
f.write(fmt_file.format(row['sum_elapsed'], row['file']))
f.write('</table>')
f.write('<h1>Setup times</h1>')
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in SetupTime.get_slowest_tests(10):
f.write(fmt_test.format(row['elapsed'], row['file'], '{}.{}.{}'.format(row['module'], row['class'], row['func'])))
f.write('</table>')
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in SetupTime.get_slowest_files(10):
f.write(fmt_file.format(row['sum_elapsed'], row['file']))
f.write('</table>')
f.write('</body>')
f.write('</html>')
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_includes(include_packages, freezer=None, optional=None):
""" Iterate the list of packages to build a complete list of those packages as well as all subpackages. :param include_packages: list of package names :type: include_pacakges: list of basestr :param freezer: The freezer to use (See FREEZER constants) :param optional: Optional pacakge names to include (will only issue a warning if they don't exist) :return: complete set of package includes """ |
freezer = resolve_freezer(freezer)
# Import (or get reference to) all listed packages to ensure that they exist.
package_references = _import_packages(include_packages, optional=optional)
# Find all includes for the given freezer type
includes = freezer.build_includes(package_references)
return includes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def locale_title(locale_name):
""" Giving a locale name return its title, taken from the settings.EXTRA_COUNTRY_LOCALES If the locale is not in the settings.EXTRA_COUNTRY_LOCALES, return it unchanged """ |
l = dict(settings.EXTRA_COUNTRY_LOCALES)
if locale_name not in l:
return locale_name
return l.get(locale_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_opt_list(opts, group):
"""Generate a list of tuple containing group, options :param opts: option lists associated with a group :type opts: list :param group: name of an option group :type group: str :return: a list of (group_name, opts) tuples :rtype: list """ |
import copy
import itertools
_opts = [(group, list(itertools.chain(*opts)))]
return [(g, copy.deepcopy(o)) for g, o in _opts] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _request(self, lat_min, lon_min, lat_max, lon_max, start, end, picture_size=None, set_=None, map_filter=None):
""" Internal method to send requests to the Panoramio data API. :param lat_min: Minimum latitude of the bounding box :type lat_min: float :param lon_min: Minimum longitude of the bounding box :type lon_min: float :param lat_max: Maximum latitude of the bounding box :type lat_max: float :param lon_max: Maximum longitude of the bounding box :type lon_max: float :param start: Start number of the number of photo's to retrieve, where 0 is the most popular picture :type start: int :param end: Last number of the number of photo's to retrieve, where 0 is the most popular picture :type end: int :param picture_size: This can be: original, medium (*default*), small, thumbnail, square, mini_square :type picture_size: basestring :param set_: This can be: public, popular or user-id; where user-id is the specific id of a user (as integer) :type set_: basestring/int :param map_filter: Whether to return photos that look better together; when True, tries to avoid returning photos of the same location :type map_filter: bool :return: JSON response of the request formatted as a dictionary. """ |
if not isinstance(lat_min, float):
raise PynoramioException(
'{0}._request requires the lat_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_min, float):
raise PynoramioException(
'{0}._request requires the lon_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lat_max, float):
raise PynoramioException(
'{0}._request requires the lat_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_max, float):
raise PynoramioException(
'{0}._request requires the lon_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(start, int):
raise PynoramioException(
'{0}._request requires the start parameter to be an int.'.format(self.__class__.__name__))
if not isinstance(end, int):
raise PynoramioException(
'{0}._request requires the end parameter to be an int.'.format(self.__class__.__name__))
url = self.base_url + '&minx={0}&miny={1}&maxx={2}&maxy={3}&from={4}&to={5}'.format(lon_min, lat_min,
lon_max, lat_max,
start, end)
if picture_size is not None and isinstance(picture_size, basestring) \
and picture_size in ['original', 'medium', 'small', 'thumbnail', 'square', 'mini_square']:
url += '&size={0}'.format(picture_size)
if set_ is not None and (isinstance(set_, basestring) and set_ in ['public', 'full']) \
or (isinstance(set_, int)):
url += '&set={0}'.format(set_)
else:
url += '&set=public'
if map_filter is not None and isinstance(map_filter, bool) and not map_filter:
url += '&map_filter=false'
r = requests.get(url)
try:
return r.json()
except ValueError:
# add your debugging lines here, for example, print(r.url)
raise PynoramioException(
'An invalid or malformed url was passed to {0}._request'.format(self.__class__.__name__)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_from_area(self, lat_min, lon_min, lat_max, lon_max, picture_size=None, set_=None, map_filter=None):
""" Get all available photos for a specific bounding box :param lat_min: Minimum latitude of the bounding box :type lat_min: float :param lon_min: Minimum longitude of the bounding box :type lon_min: float :param lat_max: Maximum latitude of the bounding box :type lat_max: float :param lon_max: Maximum longitude of the bounding box :type lon_max: float :param picture_size: This can be: original, medium (*default*), small, thumbnail, square, mini_square :type picture_size: basestring :param set_: This can be: public, popular or user-id; where user-id is the specific id of a user (as integer) :type set_: basestring/int :param map_filter: Whether to return photos that look better together; when True, tries to avoid returning photos of the same location :type map_filter: bool :return: Returns the full dataset of all available photos """ |
page_size = 100
page = 0
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
total_photos = result['count']
if total_photos < page_size:
return result
page += 1
pages = (total_photos / page_size) + 1
while page < pages:
new_result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
result['photos'].extend(new_result['photos'])
page += 1
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all_pictures_cursor(self, lat_min, lon_min, lat_max, lon_max, picture_size=None, set_=None, map_filter=None):
""" Generator to get all available photos for a given bounding box :param lat_min: Minimum latitude of the bounding box :type lat_min: float :param lon_min: Minimum longitude of the bounding box :type lon_min: float :param lat_max: Maximum latitude of the bounding box :type lat_max: float :param lon_max: Maximum longitude of the bounding box :type lon_max: float :param picture_size: This can be: original, medium (*default*), small, thumbnail, square, mini_square :type picture_size: basestring :param set_: This can be: public, popular or user-id; where user-id is the specific id of a user (as integer) :type set_: basestring/int :param map_filter: Whether to return photos that look better together; when True, tries to avoid returning photos of the same location :type map_filter: bool :return: Yields individual dicts of photos """ |
page_size = 100
page = 0
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
total_photos = result['count']
for photo in result['photos']:
yield photo
if total_photos < page_size:
raise StopIteration()
page += 1
pages = (total_photos / page_size) + 1
while page < pages:
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
for photo in result['photos']:
yield photo
page += 1
raise StopIteration() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_column(self, position, missing_seqs=MissingSequenceHandler.SKIP):
""" return a column from an alignment as a dictionary indexed by seq. name. :param position: the index to extract; these are in alignment co-ordinates, which are one-based, so the first column has index 1, and the final column has index == size(self). :param missing_seqs: how to treat sequence with no actual sequence data for the column. :return: dictionary where keys are sequence names and values are nucleotides (raw strings). """ |
res = {}
for k in self.sequences:
if isinstance(self.sequences[k], UnknownSequence):
if missing_seqs is MissingSequenceHandler.TREAT_AS_ALL_GAPS:
res[k] = "-"
elif missing_seqs is MissingSequenceHandler.SKIP:
continue
else:
res[k] = self.sequences[k][position - 1]
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def liftover(self, origin, dest, o_start, o_end, trim=False):
"""liftover interval in one seq. of this pairwise alignment to the other. :param origin: name of the origin seq (seq the input coordinates are for) :param dest: name of the dest. seq (seq the result will be for) :param o_start: start of the interval (in sequence co-ordinates) to lift. :param o_end: end of the interval (in seq. coords) to lift. """ |
alig_cols = self.sequence_to_alignment_coords(origin, o_start,
o_end, trim=trim)
res = []
for s, e in alig_cols:
t = self.alignment_to_sequence_coords(dest, s, e)
if t is None:
continue
res.append(t)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _after_event(self, e):
'''
Checks to see if the callback is registered for, after this event is completed.
'''
''' my patch, serialize to redis '''
self.r.set(self.rhname, self.current)
for fnname in ['onafter' + e.event, 'on' + e.event]:
if hasattr(self, fnname):
return getattr(self, fnname)(e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coerce(value):
""" Turns value into a string """ |
if isinstance(value, StringCell):
return value
elif isinstance(value, (str, unicode)):
return StringCell(value)
else:
raise CoercionFailure("Cannot coerce %s to StringCell" % (value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_contradictory(self, other):
""" Can these two strings coexist ? """ |
other = StringCell.coerce(other)
if self.value is None or other.value is None:
# None = empty, and won't contradict anything
return False
def sequence_in(s1, s2):
"""Does `s1` appear in sequence in `s2`?"""
return bool(re.search(".*".join(s1), s2))
return not sequence_in(self.value, other.value) and \
not sequence_in(other.value, self.value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_equal(self, other):
""" Whether two strings are equal """ |
other = StringCell.coerce(other)
empties = [None,'']
if self.value in empties and other.value in empties:
return True
return self.value == other.value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other):
""" Merges two strings """ |
other = StringCell.coerce(other)
if self.is_equal(other):
# pick among dependencies
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.value = other.value
elif self.is_contradictory(other):
raise Contradiction("Cannot merge string '%s' with '%s'" % \
(self, other))
else:
self._perform_merge(other)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _perform_merge(self, other):
""" Merges the longer string """ |
if len(other.value) > len(self.value):
self.value = other.value[:]
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_locations_from_coords(self, longitude, latitude, levels=None):
""" Returns a list of geographies containing this point. """ |
resp = requests.get(SETTINGS['url'] + '/point/4326/%s,%s?generation=%s' % (longitude, latitude, SETTINGS['generation']))
resp.raise_for_status()
geos = []
for feature in resp.json().itervalues():
try:
geo = self.get_geography(feature['codes']['MDB'],
feature['type_name'].lower())
if not levels or geo.geo_level in levels:
geos.append(geo)
except LocationNotFound as e:
log.warn("Couldn't find geo that Mapit gave us: %s" % feature, exc_info=e)
return geos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_folder(self, folder):
""" Add a folder scan images there """ |
if folder in self.folders:
return
self.folders.add(folder)
for subfolder, junk, filenames in os.walk(folder):
for filename in filenames:
name, ext = os.path.splitext(filename)
if ext in self.exts:
self.images.append(
os.path.join(subfolder, filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def interpret(self, msg):
""" Try and find the image file some magic here would be good. FIXME move elsewhere and make so everyone can use. interpreter that finds things? """ |
for gallery in msg.get('galleries', []):
self.add_folder(gallery)
image_file = msg.get('image')
if not image_file: return
return self.find_image(image_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def subscriberSocket(self, host, port, filt=b'', conflate=False):
'''
Create a SUB-style socket for data receivers
'''
socket = self._context.socket(zmq.SUB)
if conflate:
socket.setsockopt(zmq.CONFLATE, 1)
socket.connect(self.tcpAddress(host, port))
socket.setsockopt(zmq.SUBSCRIBE, filt)
return socket |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def replySocket(self, port, host='*'):
'''
Create a REP-style socket for servers
'''
try:
socket = self._context.socket(zmq.REP)
socket.bind(self.tcpAddress(host, port))
except Exception as e:
newMsg= str("%s %s:%d" % (str(e), host, port))
raise (type(e))(newMsg)
return socket |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def requestSocket(self, host, port):
'''
Create a REQ-style socket for clients
'''
socket = self._context.socket(zmq.REQ)
socket.connect(self.tcpAddress(host, port))
return socket |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_accept_header(header):
"""Parse accept headers.""" |
result = []
for match in accept_re.finditer(header):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def startup(self, app):
"""Initialize a local namespace and setup Jinja2.""" |
self.local = slocal(app.loop)
if self.cfg.configure_jinja2 and 'jinja2' in app.ps:
app.ps.jinja2.env.add_extension('jinja2.ext.i18n')
app.ps.jinja2.env.install_gettext_callables(
lambda x: self.get_translations().ugettext(x),
lambda s, p, n: self.get_translations().ungettext(s, p, n),
newstyle=True
)
if self.locale_selector_func:
app.middlewares.append(self._middleware) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_translations(self, domain=None, locale=None):
"""Load translations for given or configuration domain. :param domain: Messages domain (str) :param locale: Locale object """ |
if locale is None:
if self.locale is None:
return support.NullTranslations()
locale = self.locale
if domain is None:
domain = self.cfg.domain
if (domain, locale.language) not in self.translations:
translations = None
for locales_dir in reversed(self.cfg.locales_dirs):
trans = support.Translations.load(
locales_dir, locales=locale, domain=domain)
if translations:
translations._catalog.update(trans._catalog)
else:
translations = trans
self.translations[(domain, locale.language)] = translations
return self.translations[(domain, locale.language)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def locale(self, value):
"""Set current locale.""" |
if not isinstance(value, Locale):
value = Locale.parse(value)
self.local.babel_locale = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select_locale_by_request(self, request, locales=()):
"""Choose an user's locales by request.""" |
default_locale = locales and locales[0] or self.cfg.default_locale
if len(locales) == 1 or 'ACCEPT-LANGUAGE' not in request.headers:
return default_locale
ulocales = [
(q, locale_delim_re.split(v)[0])
for v, q in parse_accept_header(request.headers['ACCEPT-LANGUAGE'])
]
ulocales.sort()
ulocales.reverse()
for locale in locales:
for _, ulocale in ulocales:
ulocale = locale_delim_re.split(ulocale)[0]
if ulocale.lower() == locale.lower():
return ulocale
return ulocales[0][1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gettext(self, string, domain=None, **variables):
"""Translate a string with the current locale.""" |
t = self.get_translations(domain)
return t.ugettext(string) % variables |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ngettext(self, singular, plural, num, domain=None, **variables):
"""Translate a string wity the current locale. The `num` parameter is used to dispatch between singular and various plural forms of the message. """ |
variables.setdefault('num', num)
t = self.get_translations(domain)
return t.ungettext(singular, plural, num) % variables |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_vocab(self, vocab_name, **kwargs):
""" loads a vocabulary into the defintion triplestore args: vocab_name: the prefix, uri or filename of a vocabulary """ |
log.setLevel(kwargs.get("log_level", self.log_level))
vocab = self.get_vocab(vocab_name , **kwargs)
if vocab['filename'] in self.loaded:
if self.loaded_times.get(vocab['filename'],
datetime.datetime(2001,1,1)).timestamp() \
< vocab['modified']:
self.drop_file(vocab['filename'], **kwargs)
else:
return
conn = kwargs.get("conn", self.conn)
conn.load_data(graph=getattr(__NSM__.kdr, vocab['filename']).clean_uri,
data=vocab['data'],
datatype=vocab['filename'].split(".")[-1],
log_level=logging.WARNING)
self.__update_time__(vocab['filename'], **kwargs)
log.warning("\n\tvocab: '%s' loaded \n\tconn: '%s'",
vocab['filename'],
conn)
self.loaded.append(vocab['filename']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_vocab(self, vocab_name, **kwargs):
""" Returns data stream of an rdf vocabulary args: vocab_name: the name or uri of the vocab to return """ |
vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)
filepaths = list(set([os.path.join(self.cache_dir,
vocab_dict['filename']),
os.path.join(self.vocab_dir,
vocab_dict['filename'])]))
for path in filepaths:
if os.path.exists(path):
with open(path, 'rb') as f_obj:
vocab_dict.update({"name": vocab_name,
"data": f_obj.read(),
"modified": os.path.getmtime(path)})
return vocab_dict
download_locs = make_list(vocab_dict.get('download',[]))
for loc in download_locs:
loc_web = urllib.request.urlopen(loc)
# loc_file_date = date_parse(loc_web.info()['Last-Modified'])
urllib.request.urlretrieve(loc, filepaths[0])
with open(filepaths[0], 'rb') as f_obj:
vocab_dict.update({"name": vocab_name,
"data": f_obj.read(),
"modified": os.path.getmtime(filepaths[0])})
return vocab_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drop_vocab(self, vocab_name, **kwargs):
""" Removes the vocab from the definiton triplestore args: vocab_name: the name or uri of the vocab to return """ |
vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)
return self.drop_file(vocab_dict['filename'], **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quick(config, mysterious=None, only_authenticated=None):
""" Decides whether this user is allowed to access this view or not. :param config - Decides if the setting is on globally. :callable_name - The function which will return the list of users which are eligible for proceeding further after this action. """ |
def decorator(func):
@wraps(func)
def _quick(request, *args, **kwargs):
# Check if the request is ajax.
is_ajax = request.is_ajax()
# Check if the config is available globally and return '' or raise
# 404 as per the nature of the request.
if not config:
return _return_blank_or_raise_404(is_ajax)
callable_name = None
_only_authenticated = None
if mysterious is not None:
if type(mysterious) == bool:
_only_authenticated = mysterious
else:
callable_name = mysterious
elif only_authenticated is not None:
_only_authenticated = only_authenticated
user = request.user
if callable_name is None:
if (_only_authenticated is not None and
_only_authenticated and
not user.is_authenticated()):
return _return_blank_or_raise_404(is_ajax)
else:
return func(request, *args, **kwargs)
else:
if not user.is_authenticated():
return _return_blank_or_raise_404(is_ajax)
else:
_callable = function_from_string(callable_name)
if user.id in _callable():
return func(request, *args, **kwargs)
return _return_blank_or_raise_404(is_ajax)
return _return_blank_or_raise_404(is_ajax)
return _quick
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def catch(cls, catch_exception, config='default'):
"""Decorator class method catching exceptions raised by the wrapped member function. When exception is caught, the decorator waits for an amount of time specified in the `ha_config`. :param catch_exception: Exception class or tuple of exception classes. """ |
def wrap(method):
@functools.wraps(method)
def wrapped_method(self, *args, **kwargs):
assert isinstance(self, HA)
delay_policy = self.ha_get_delay_policy(config)
max_retries = self.ha_get_config(config).max_retries
for retries in itertools.count():
try:
return method(self, *args, **kwargs)
except catch_exception as e:
res = self.ha_on_error(method, e, args, kwargs)
if res is not None:
args, kwargs = res
if max_retries and retries >= max_retries:
raise
tts = next(delay_policy)
time.sleep(tts)
return wrapped_method
return wrap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ha_get_delay_policy(self, config_name):
"""Build generator of delays to wait between each call :param string config_name: configuration name """ |
config = self.ha_get_config(config_name)
delay_policy_conf = config.delay_config
delay_policy_conf = copy.deepcopy(delay_policy_conf)
delay_policy_conf.update(delay=config.delay)
return self._retry_delays_class.get(
config.delay_policy,
**delay_policy_conf
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on error.""" |
if isinstance(value, six.string_types):
super(JSONField, self).validate(value, model_instance)
try:
json.loads(value)
except Exception as err:
raise ValidationError(str(err)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value_to_string(self, obj):
"""Return value from object converted to string properly""" |
value = getattr(obj, self.attname)
return self.get_prep_value(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_down(self, arg):
"""Run down migration with name or numeric id matching arg""" |
print "running down migration"
self.manager.run_one(arg, Direction.DOWN) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_up(self, arg):
"""Run up migration with name or numeric id matching arg""" |
print "running up migration"
self.manager.run(arg, Direction.UP) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subclass_exception(name, parents, module, attached_to=None):
""" Create exception subclass. If 'attached_to' is supplied, the exception will be created in a way that allows it to be pickled, assuming the returned exception class will be added as an attribute to the 'attached_to' class. """ |
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all_files(self):
"""Return a set of all the files under git control""" |
return set([entry.decode() for entry, _ in self.git.open_index().items()]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file_commit_times(self, use_files_paths, debug=False):
""" Traverse the commits in the repository, starting from HEAD until we have found the commit times for all the files we care about. Yield each file once, only when it is found to be changed in some commit. If self.debug is true, also output log.debug for the speed we are going through commits (output commits/second every 1000 commits and every 100000 commits) """ |
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def entries_in_tree_oid(self, prefix, tree_oid):
"""Find the tree at this oid and return entries prefixed with ``prefix``""" |
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(args):
""" main entry point for the GenomicIntJaccard script. :param args: the arguments for this script, as a list of string. Should already have had things like the script name stripped. That is, if there are no args provided, this should be an empty list. """ |
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# we required two input files, so we know these will be present...
regions_1 = [e for e in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [e for e in BEDIterator(ui.getArgument(1), verbose=verbose)]
print jaccardIndex(regions_1, regions_2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_doc(doc=''):
"""Parse a docstring into title and description. Args ---- doc : str A docstring, optionally with a title line, separated from a description line by at least one blank line. Returns ------- title : str The first line of the docstring. description : str The rest of a docstring. """ |
title, description = '', ''
if doc:
sp = doc.split('\n', 1)
title = sp[0].strip()
if len(sp) > 1:
description = textwrap.dedent(sp[1]).strip()
return (title, description) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def command(name):
"""Create a command, using the wrapped function as the handler. Args ---- name : str Name given to the created Command instance. Returns ------- Command A new instance of Command, with handler set to the wrapped function. """ |
# TODO(nick): It would be nice if this didn't transform the handler. That
# way, handlers could be used and tested independently of this system.
# Unfortunately that's one of the better properties of the previous
# system that wasn't preserved in this rewrite.
def wrapper(func):
title, description = _parse_doc(func.__doc__)
command = Command(name=name, title=title, description=description)
command.add_handler(func)
argparse_args_list = getattr(func, 'ARGPARSE_ARGS_LIST', [])
for args, kwargs in argparse_args_list:
command.add_argument_tuple((args, kwargs))
return command
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_command(parent_command, name):
"""Create and register a command with a parent command. Args ---- parent_comand : Command The parent command. name : str Name given to the created Command instance. Example ------- .. testcode:: mygit = Command(name='status') @register_command(mygit, 'status') def status():
print 'Nothing to commit.' .. doctest:: :hide: Nothing to commit. """ |
def wrapper(func):
c = command(name)(func)
parent_command.add_subcommand(c)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _attach_arguments(self):
"""Add the registered arguments to the parser.""" |
for arg in self.arguments:
self.parser.add_argument(*arg[0], **arg[1]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _attach_subcommands(self):
"""Create a subparser and add the registered commands to it. This will also call ``_init`` on each subcommand (in turn invoking its ``_attach_subcommands`` method). """ |
if self.subcommands:
self.subparsers = self.parser.add_subparsers()
for subcommand in self.subcommands:
subparser = self.subparsers.add_parser(subcommand.name,
help=subcommand.title)
if subcommand.handler:
self._register_handler(subparser, subcommand.handler)
subcommand._init(subparser) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_args(self, args=None, namespace=None):
"""Parse the command-line arguments and call the associated handler. The signature is the same as `argparse.ArgumentParser.parse_args <https://docs.python.org/2/library/argparse.html#argparse.ArgumentParser.parse_args>`_. Args ---- args : list A list of argument strings. If ``None`` the list is taken from ``sys.argv``. namespace : argparse.Namespace A Namespace instance. Defaults to a new empty Namespace. Returns ------- The return value of the handler called with the populated Namespace as kwargs. """ |
assert self.initialized, '`init` must be called before `parse_args`.'
namespace = self.parser.parse_args(args, namespace)
handler = self._get_handler(namespace, remove_handler=True)
if handler:
return handler(**vars(namespace)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _print_jobs(jobs, token_manager, app_url, options):
""" internal method to print the provided jobs array in a nice tabular format """ |
accountids = set()
for job in jobs:
if job['user'] != 'jut.internal.user':
accountids.add(job['user'])
account_lookup = {
'jut.internal.user': {
'username': 'Jut Internal'
}
}
if accountids:
accounts_details = accounts.get_accounts(accountids,
token_manager=token_manager,
app_url=app_url)
for account in accounts_details['accounts']:
account_lookup[account['id']] = account
if options.format == 'text':
labels = OrderedDict()
labels['id'] = 'Job ID'
labels['alias'] = 'Juttle Name'
labels['username'] = 'Owner'
labels['_start_time'] = 'Start Date'
labels['persistent'] = 'Persistent'
max_lengths = {
'id': 0,
'alias': 0,
'username': 0,
'_start_time': 0,
'persistent': 0,
}
for key in max_lengths.keys():
max_lengths[key] = len(labels[key]) + 1
# retrieve username and fix up persistent marker
for job in jobs:
job['username'] = account_lookup[job['user']]['username']
job['persistent'] = 'YES' if job['timeout'] == 0 else 'NO'
# calculate max length of each column
for job in jobs:
for key in labels.keys():
if max_lengths[key] < len(job[key]):
max_lengths[key] = len(job[key]) + 1
# print labels
header = ''
for key in labels.keys():
header += (labels[key] + ' ' * (max_lengths[key] - len(labels[key])))
info(header)
for job in jobs:
line = ''
for key in labels.keys():
line += (job[key] + ' ' * (max_lengths[key] - len(job[key])))
info(line)
elif options.format == 'table':
headers = ['Job ID', 'Juttle Name', 'Owner', 'Start Date', 'Persistent']
table = []
for job in jobs:
owner = account_lookup[job['user']]['username']
persistent = 'YES' if job['timeout'] == 0 else 'NO'
name = ''
if 'alias' in job:
name = job['alias']
table.append([job['id'],
name,
owner,
job['_start_time'],
persistent])
info(tabulate.tabulate(table, headers, tablefmt="orgtbl"))
else:
raise JutException('Unsupported output format "%s"' %
options.format) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(options):
""" show all currently running jobs """ |
configuration = config.get_default()
app_url = configuration['app_url']
if options.deployment != None:
deployment_name = options.deployment
else:
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
jobs = data_engine.get_jobs(deployment_name,
token_manager=token_manager,
app_url=app_url)
if len(jobs) == 0:
error('No running jobs')
else:
_print_jobs(jobs, token_manager, app_url, options) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.