text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def activate(self, prefix=None, backend=None):
""" A decorator used to activate the mocker. :param prefix: :param backend: An instance of a storage backend. """ |
if isinstance(prefix, compat.string_types):
self.prefix = prefix
if isinstance(backend, RmoqStorageBackend):
self.backend = backend
def activate(func):
if isinstance(func, type):
return self._decorate_class(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
return activate |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process_call(self, req, resource):
"""This is were all callbacks are made and the req is processed.""" |
if resource == "ports":
if req.method.upper() in ('PUT', 'POST'):
# Pass the request back to be processed by other filters
# and Neutron first
resp = req.get_response(self.app)
if resp.status_code not in (200, 204):
return resp
resp_body = resp.json
# Variables for Nova Call, obtained from Neutron response
action = "create"
address = resp_body['port']['mac_address']
fixed_ips = resp_body['port']['fixed_ips']
instance_id = resp_body['port']['instance_id']
network_id = resp_body['port']['network_id']
port_id = resp_body['port']['id']
tenant_id = resp_body['port']['tenant_id']
elif req.method.upper() == "DELETE":
action = "delete"
port_id = req.path.split("/")
port_id = port_id[port_id.index("ports") + 1]
# DELETEs do not have all the port info that we need, so a
# call to Neutron must be made first.
neutron_conn = NeutronConn(log=self.log,
port=self.neutron_port,
url=self.neutron_url,
verify_ssl=self.neutron_verify_ssl)
status, neutron_resp = neutron_conn.ports(port_id=port_id)
if isinstance(neutron_resp, Exception):
return neutron_resp
elif status not in (200, 204):
resp = Response()
resp.status = 500
new_body = {"neutron_callback":
{"port_id": port_id,
"status": "error",
"error": neutron_resp}}
resp.body = json.dumps(new_body)
return resp
# Now that we have the port info, we can make the variables
# for the Nova Call
address = neutron_resp['port']['mac_address']
fixed_ips = neutron_resp['port']['fixed_ips']
instance_id = neutron_resp['port']['instance_id']
network_id = neutron_resp['port']['network_id']
tenant_id = neutron_resp['port']['tenant_id']
# Port info saved, now send the request back to processed by
# other filters and Neutron
resp = req.get_response(self.app)
if resp.status_code not in (200, 204):
return resp
else:
new_body = resp.json
new_body['neutron_callback'] = {"port_id": port_id,
"status": "success"}
resp.body = json.dumps(new_body)
nova_conn = NovaConn(log=self.log, url=self.nova_url,
verify_ssl=self.nova_verify_ssl)
status, nova_resp = nova_conn.admin_virtual_interfaces(
action=action, address=address, fixed_ips=fixed_ips,
network_id=network_id, port_id=port_id, tenant_id=tenant_id,
instance_id=instance_id)
if isinstance(nova_resp, Exception):
return nova_resp
elif status not in (200, 204):
# We'll likely want to provide the customer with a call here
# such as virtual-interface-delete/virtual-interface-update
resp.status = 500
new_body = resp.json
new_body['nova_callback'] = {"instance_id": instance_id,
"status": "error",
"error": nova_resp}
resp.body = json.dumps(new_body)
else:
new_body = resp.json
new_body['nova_callback'] = {"instance_id": instance_id,
"status": "success"}
resp.body = json.dumps(new_body)
return resp
elif resource == "ip_addresses":
pass # Insert logic to call Nova for ip_addresses changes here
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keep_on_one_line():
""" Keep all the output generated within a with-block on one line. Whenever a new line would be printed, instead reset the cursor to the beginning of the line and print the new line without a line break. """ |
class CondensedStream:
def __init__(self):
self.sys_stdout = sys.stdout
def write(self, string):
with swap_streams(self.sys_stdout):
string = string.replace('\n', ' ')
string = truncate_to_fit_terminal(string)
if string.strip():
update(string)
def flush(self):
with swap_streams(self.sys_stdout):
flush()
with swap_streams(CondensedStream()):
yield |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_color(string, name, style='normal', when='auto'):
""" Write the given colored string to standard out. """ |
write(color(string, name, style, when)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_color(string, name, style='normal', when='auto'):
""" Replace the existing line with the given colored string. """ |
clear()
write_color(string, name, style, when) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def progress_color(current, total, name, style='normal', when='auto'):
""" Display a simple, colored progress report. """ |
update_color('[%d/%d] ' % (current, total), name, style, when) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def color(string, name, style='normal', when='auto'):
""" Change the color of the given string. """ |
if name not in colors:
from .text import oxford_comma
raise ValueError("unknown color '{}'.\nknown colors are: {}".format(
name, oxford_comma(["'{}'".format(x) for x in sorted(colors)])))
if style not in styles:
from .text import oxford_comma
raise ValueError("unknown style '{}'.\nknown styles are: {}".format(
style, oxford_comma(["'{}'".format(x) for x in sorted(styles)])))
prefix = '\033[%d;%dm' % (styles[style], colors[name])
suffix = '\033[%d;%dm' % (styles['normal'], colors['normal'])
color_string = prefix + string + suffix
if when == 'always':
return color_string
elif when == 'auto':
return color_string if sys.stdout.isatty() else string
elif when == 'never':
return string
else:
raise ValueError("when must be one of: 'always', 'auto', 'never'") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(objects):
""" Retrieve remote file object """ |
def exists(object):
if os.path.exists(TMPDIR + '/' + filename):
return True
else:
msg = 'File object %s failed to download to %s. Exit' % (filename, TMPDIR)
logger.warning(msg)
stdout_message('%s: %s' % (inspect.stack()[0][3], msg))
return False
try:
for file_path in objects:
filename = file_path.split('/')[-1]
r = urllib.request.urlretrieve(file_path, TMPDIR + '/' + filename)
if not exists(filename):
return False
except urllib.error.HTTPError as e:
logger.exception(
'%s: Failed to retrive file object: %s. Exception: %s, data: %s' %
(inspect.stack()[0][3], file_path, str(e), e.read()))
raise e
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def precheck():
""" Pre-run dependency check """ |
binaries = ['make']
for bin in binaries:
if not which(bin):
msg = 'Dependency fail -- Unable to locate rquired binary: '
stdout_message('%s: %s' % (msg, ACCENT + bin + RESET))
return False
elif not root():
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_semester_view(request):
""" Initiates a semester"s worth of workshift, with the option to copy workshift types from the previous semester. """ |
page_name = "Start Semester"
year, season = utils.get_year_season()
start_date, end_date = utils.get_semester_start_end(year, season)
semester_form = SemesterForm(
data=request.POST or None,
initial={
"year": year,
"season": season,
"start_date": start_date.strftime(date_formats[0]),
"end_date": end_date.strftime(date_formats[0]),
},
prefix="semester",
)
pool_forms = []
try:
prev_semester = Semester.objects.latest("end_date")
except Semester.DoesNotExist:
pass
else:
pools = WorkshiftPool.objects.filter(
semester=prev_semester,
is_primary=False,
)
for pool in pools:
form = StartPoolForm(
data=request.POST or None,
initial={
"title": pool.title,
"hours": pool.hours,
},
prefix="pool-{}".format(pool.pk),
)
pool_forms.append(form)
if semester_form.is_valid() and all(i.is_valid() for i in pool_forms):
# And save this semester
semester = semester_form.save()
for pool_form in pool_forms:
pool_form.save(semester=semester)
return HttpResponseRedirect(wurl("workshift:manage",
sem_url=semester.sem_url))
return render_to_response("start_semester.html", {
"page_name": page_name,
"semester_form": semester_form,
"pool_forms": pool_forms,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_preferred(instance, profile):
""" Check if a user has marked an instance's workshift type as preferred. """ |
if not instance.weekly_workshift:
return False
if profile and profile.ratings.filter(
workshift_type=instance.weekly_workshift.workshift_type,
rating=WorkshiftRating.LIKE,
).count() == 0:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def profile_view(request, semester, targetUsername, profile=None):
""" Show the user their workshift history for the current semester as well as upcoming shifts. """ |
wprofile = get_object_or_404(
WorkshiftProfile,
user__username=targetUsername,
semester=semester
)
if wprofile == profile:
page_name = "My Workshift Profile"
else:
page_name = "{}'s Workshift Profile".format(wprofile.user.get_full_name())
past_shifts = WorkshiftInstance.objects.filter(
Q(workshifter=wprofile) | Q(liable=wprofile),
closed=True,
)
regular_shifts = RegularWorkshift.objects.filter(
active=True, current_assignees=wprofile,
)
assigned_instances = WorkshiftInstance.objects.filter(
Q(workshifter=wprofile) | Q(liable=wprofile),
closed=False,
).exclude(
weekly_workshift__current_assignees=wprofile,
)
pool_hours = wprofile.pool_hours.order_by(
"-pool__is_primary", "pool__title",
)
first_standing, second_standing, third_standing = \
any(pool_hours.first_date_standing for pool_hours in wprofile.pool_hours.all()), \
any(pool_hours.second_date_standing for pool_hours in wprofile.pool_hours.all()), \
any(pool_hours.third_date_standing for pool_hours in wprofile.pool_hours.all())
full_management = utils.can_manage(request.user, semester=semester)
any_management = utils.can_manage(request.user, semester, any_pool=True)
view_note = wprofile == profile or full_management
return render_to_response("profile.html", {
"page_name": page_name,
"profile": wprofile,
"view_note": view_note,
"past_shifts": past_shifts,
"regular_shifts": regular_shifts,
"assigned_instances": assigned_instances,
"pool_hours": pool_hours,
"first_standing": first_standing,
"second_standing": second_standing,
"third_standing": third_standing,
"can_edit": any_management,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preferences_view(request, semester, targetUsername, profile=None):
""" Show the user their preferences for the given semester. """ |
# TODO: Change template to show descriptions in tooltip / ajax show box?
wprofile = get_object_or_404(
WorkshiftProfile,
user__username=targetUsername,
)
full_management = utils.can_manage(request.user, semester=semester)
if wprofile.user != request.user and \
not full_management:
messages.add_message(
request,
messages.ERROR,
MESSAGES["ADMINS_ONLY"],
)
return HttpResponseRedirect(semester.get_view_url())
rating_forms = []
for wtype in WorkshiftType.objects.filter(rateable=True):
try:
rating = wprofile.ratings.get(workshift_type=wtype)
except WorkshiftRating.DoesNotExist:
rating = WorkshiftRating(workshift_type=wtype)
form = WorkshiftRatingForm(
data=request.POST or None,
prefix="rating-{}".format(wtype.pk),
instance=rating,
profile=wprofile,
)
rating_forms.append(form)
time_formset = TimeBlockFormSet(
data=request.POST or None,
prefix="time",
profile=wprofile,
)
note_form = ProfileNoteForm(
data=request.POST or None,
instance=wprofile,
prefix="note",
)
if all(i.is_valid() for i in rating_forms) and time_formset.is_valid() and \
note_form.is_valid():
for form in rating_forms:
form.save()
time_formset.save()
note_form.save()
if wprofile.preference_save_time is None:
wprofile.preference_save_time = now()
wprofile.save()
messages.add_message(
request,
messages.INFO,
"Preferences saved.",
)
return HttpResponseRedirect(wurl(
"workshift:preferences",
sem_url=semester.sem_url,
targetUsername=request.user.username,
))
if wprofile == profile:
page_name = "My Workshift Preferences"
else:
page_name = "{}'s Workshift Preferences".format(
wprofile.user.get_full_name(),
)
return render_to_response("preferences.html", {
"page_name": page_name,
"profile": wprofile,
"rating_forms": rating_forms,
"time_formset": time_formset,
"note_form": note_form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adjust_hours_view(request, semester):
""" Adjust members' workshift hours requirements. """ |
page_name = "Adjust Hours"
pools = WorkshiftPool.objects.filter(semester=semester).order_by(
"-is_primary", "title",
)
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pool_hour_forms = []
for workshifter in workshifters:
forms_list = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
forms_list.append((
AdjustHoursForm(
data=request.POST or None,
prefix="pool_hours-{}".format(hours.pk),
instance=hours,
),
hours,
))
pool_hour_forms.append(forms_list)
if all(
form.is_valid()
for workshifter_forms in pool_hour_forms
for form, pool_hours in workshifter_forms
):
for workshifter_forms in pool_hour_forms:
for form, pool_hours in workshifter_forms:
form.save()
messages.add_message(request, messages.INFO, "Updated hours.")
return HttpResponseRedirect(wurl(
"workshift:adjust_hours",
sem_url=semester.sem_url,
))
return render_to_response("adjust_hours.html", {
"page_name": page_name,
"pools": pools,
"workshifters_tuples": zip(workshifters, pool_hour_forms),
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_workshifter_view(request, semester):
""" Add a new member workshift profile, for people who join mid-semester. """ |
page_name = "Add Workshifter"
existing = [
i.user.pk for i in WorkshiftProfile.objects.filter(semester=semester)
]
users = User.objects.exclude(
Q(pk__in=existing) |
Q(is_active=False) |
Q(userprofile__status=UserProfile.ALUMNUS)
)
add_workshifter_forms = []
for user in users:
form = AddWorkshifterForm(
data=request.POST or None,
prefix="user-{}".format(user.pk),
user=user,
semester=semester,
)
add_workshifter_forms.append(form)
if add_workshifter_forms and \
all(form.is_valid() for form in add_workshifter_forms):
for form in add_workshifter_forms:
form.save()
messages.add_message(
request,
messages.INFO,
"Workshifters added.",
)
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
return render_to_response("add_workshifter.html", {
"page_name": page_name,
"add_workshifter_forms": add_workshifter_forms,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill_shifts_view(request, semester):
""" Allows managers to quickly fill in the default workshifts for a few given workshift pools. """ |
page_name = "Fill Shifts"
fill_regular_shifts_form = None
fill_social_shifts_form = None
fill_humor_shifts_form = None
fill_bathroom_shifts_form = None
fill_hi_shifts_form = None
reset_all_shifts_form = None
managers = Manager.objects.filter(incumbent__user=request.user)
admin = utils.can_manage(request.user, semester=semester)
if admin:
fill_regular_shifts_form = FillRegularShiftsForm(
data=request.POST,
semester=semester,
)
fill_humor_shifts_form = FillHumorShiftsForm(
data=request.POST,
semester=semester,
)
fill_bathroom_shifts_form = FillBathroomShiftsForm(
data=request.POST,
semester=semester,
)
reset_all_shifts_form = ResetAllShiftsForm(
data=request.POST,
semester=semester,
)
# XXX: BAD! We should filter by pool owners? By Manager bool flags? By
# arbitrary django permissions?
if admin or managers.filter(title="Social Manager"):
fill_social_shifts_form = FillSocialShiftsForm(
data=request.POST,
semester=semester,
)
# XXX: See above
if admin or managers.filter(title="Maintenance Manager"):
fill_hi_shifts_form = FillHIShiftsForm(
data=request.POST,
semester=semester,
)
fill_forms = [
fill_regular_shifts_form, fill_social_shifts_form,
fill_humor_shifts_form, fill_bathroom_shifts_form,
fill_hi_shifts_form, reset_all_shifts_form,
]
fill_forms = [
form
for form in fill_forms
if form is not None
]
for form in fill_forms:
if form and form.is_valid():
count = form.save()
messages.add_message(
request,
messages.INFO,
"{} {} {}".format(
form.message, count, p.plural("workshift", count),
),
)
return HttpResponseRedirect(wurl(
"workshift:fill_shifts",
sem_url=semester.sem_url,
))
return render_to_response("fill_shifts.html", {
"page_name": page_name,
"forms": fill_forms,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_shift_view(request, semester):
""" View for the workshift manager to create new types of workshifts. """ |
page_name = "Add Workshift"
any_management = utils.can_manage(request.user, semester, any_pool=True)
if not any_management:
messages.add_message(
request,
messages.ERROR,
MESSAGES["ADMINS_ONLY"],
)
return HttpResponseRedirect(semester.get_view_url())
# Check what pools this person can manage
pools = WorkshiftPool.objects.filter(semester=semester)
full_management = utils.can_manage(request.user, semester=semester)
if not full_management:
pools = pools.filter(managers__incumbent__user=request.user)
# Forms
add_type_form = WorkshiftTypeForm(
data=request.POST if "add_type" in request.POST else None,
prefix="type",
)
shifts_formset = RegularWorkshiftFormSet(
data=request.POST if "add_type" in request.POST else None,
prefix="shifts",
queryset=RegularWorkshift.objects.none(),
pools=pools,
)
if add_type_form.is_valid() and shifts_formset.is_valid():
wtype = add_type_form.save()
shifts_formset.save(workshift_type=wtype)
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
add_instance_form = WorkshiftInstanceForm(
data=request.POST if "add_instance" in request.POST else None,
pools=pools,
semester=semester,
)
if add_instance_form.is_valid():
add_instance_form.save()
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
return render_to_response("add_shift.html", {
"page_name": page_name,
"add_type_form": add_type_form,
"shifts_formset": shifts_formset,
"add_instance_form": add_instance_form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shift_view(request, semester, pk, profile=None):
""" View the details of a particular RegularWorkshift. """ |
shift = get_object_or_404(RegularWorkshift, pk=pk)
page_name = shift.workshift_type.title
if shift.is_manager_shift:
president = Manager.objects.filter(
incumbent__user=request.user,
president=True,
).count() > 0
can_edit = request.user.is_superuser or president
else:
can_edit = utils.can_manage(
request.user,
semester=semester,
pool=shift.pool,
)
instances = WorkshiftInstance.objects.filter(
weekly_workshift=shift,
date__gte=localtime(now()).date(),
)
instance_tuples = [
(
instance,
_get_forms(profile, instance, request, undo=can_edit),
)
for instance in instances
]
# Save any forms that were submitted
all_forms = [form for instance, forms in instance_tuples for form in forms]
for form in all_forms:
if form.is_valid():
form.save()
return HttpResponseRedirect(shift.get_view_url())
else:
for error in form.errors.values():
messages.add_message(request, messages.ERROR, error)
return render_to_response("view_shift.html", {
"page_name": page_name,
"shift": shift,
"instance_tuples": instance_tuples,
"can_edit": can_edit,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_shift_view(request, semester, pk, profile=None):
""" View for a manager to edit the details of a particular RegularWorkshift. """ |
shift = get_object_or_404(RegularWorkshift, pk=pk)
if shift.is_manager_shift:
# XXX: Bad way of doing this, we should make manager_shift point to
# the related Manager object directly
try:
manager = Manager.objects.get(title=shift.workshift_type.title)
except Manager.DoesNotExist:
pass
else:
return HttpResponseRedirect(manager.get_edit_url())
if not utils.can_manage(request.user, semester=semester, pool=shift.pool):
messages.add_message(
request,
messages.ERROR,
MESSAGES["ADMINS_ONLY"],
)
return HttpResponseRedirect(semester.get_view_url())
edit_form = RegularWorkshiftForm(
data=request.POST if "edit" in request.POST else None,
instance=shift,
semester=semester,
)
if "delete" in request.POST:
# Open instances are deleted automatically
shift.delete()
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
elif edit_form.is_valid():
shift = edit_form.save()
return HttpResponseRedirect(shift.get_view_url())
page_name = "Edit {}".format(shift)
return render_to_response("edit_shift.html", {
"page_name": page_name,
"shift": shift,
"edit_form": edit_form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def instance_view(request, semester, pk, profile=None):
""" View the details of a particular WorkshiftInstance. """ |
instance = get_object_or_404(WorkshiftInstance, pk=pk)
page_name = instance.title
management = utils.can_manage(
request.user,
semester=semester,
pool=instance.pool,
)
interact_forms = _get_forms(
profile, instance, request,
undo=management,
prefix="interact",
)
note_form = NoteForm(
data=request.POST or None,
prefix="note",
)
# Save any forms that were submitted
if note_form.is_valid():
for form in interact_forms:
if form.is_valid():
note = note_form.save()
form.save(note=note)
return HttpResponseRedirect(instance.get_view_url())
else:
for error in form.errors.values():
messages.add_message(request, messages.ERROR, error)
edit_hours_form = None
if instance.weekly_workshift and instance.weekly_workshift.is_manager_shift:
president = Manager.objects.filter(
incumbent__user=request.user,
president=True
).count() > 0
can_edit = request.user.is_superuser or president
else:
can_edit = utils.can_manage(
request.user, semester=instance.pool.semester, pool=instance.pool,
)
if can_edit:
edit_hours_form = EditHoursForm(
data=request.POST if "edit_hours" in request.POST else None,
instance=instance,
profile=profile,
)
if edit_hours_form.is_valid():
edit_hours_form.save()
messages.add_message(
request,
messages.INFO,
"Updated instance's hours.",
)
return HttpResponseRedirect(instance.get_view_url())
return render_to_response("view_instance.html", {
"page_name": page_name,
"can_edit": can_edit,
"instance": instance,
"interact_forms": interact_forms,
"note_form": note_form,
"edit_hours_form": edit_hours_form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_instance_view(request, semester, pk, profile=None):
""" View for a manager to edit the details of a particular WorkshiftInstance. """ |
instance = get_object_or_404(WorkshiftInstance, pk=pk)
if instance.weekly_workshift and instance.weekly_workshift.is_manager_shift:
president = Manager.objects.filter(
incumbent__user=request.user,
president=True
).count() > 0
can_edit = request.user.is_superuser or president
message = MESSAGES["PRESIDENTS_ONLY"]
else:
can_edit = utils.can_manage(
request.user, semester=semester, pool=instance.pool,
)
message = MESSAGES["ADMINS_ONLY"]
if not can_edit:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(semester.get_view_url())
page_name = "Edit " + instance.title
edit_form = WorkshiftInstanceForm(
data=request.POST if "edit" in request.POST else None,
instance=instance,
semester=semester,
edit_hours=False,
)
if "delete" in request.POST:
instance.delete()
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
elif edit_form.is_valid():
instance = edit_form.save()
return HttpResponseRedirect(instance.get_view_url())
return render_to_response("edit_instance.html", {
"page_name": page_name,
"instance": instance,
"edit_form": edit_form,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_type_view(request, semester, pk, profile=None):
""" View for a manager to edit the details of a particular WorkshiftType. """ |
wtype = get_object_or_404(WorkshiftType, pk=pk)
full_management = utils.can_manage(request.user, semester)
any_management = utils.can_manage(request.user, semester, any_pool=True)
if not any_management:
messages.add_message(
request,
messages.ERROR,
MESSAGES["ADMINS_ONLY"],
)
return HttpResponseRedirect(semester.get_view_url())
if full_management:
if "delete" in request.POST:
messages.add_message(
request,
messages.INFO,
"Workshift type deleted.",
)
wtype.delete()
return HttpResponseRedirect(wurl(
"workshift:list_types",
sem_url=semester.sem_url,
))
edit_form = WorkshiftTypeForm(
data=request.POST if "edit" in request.POST else None,
instance=wtype,
prefix="edit",
read_only=not full_management,
)
queryset = RegularWorkshift.objects.filter(
workshift_type=wtype,
)
if not full_management:
queryset = queryset.filter(
pool__managers__incumbent__user=request.user,
)
shifts_formset = RegularWorkshiftFormSet(
data=request.POST if "edit" in request.POST else None,
prefix="shifts",
queryset=queryset,
)
if edit_form.is_valid() and shifts_formset.is_valid():
if full_management:
wtype = edit_form.save()
shifts_formset.save(wtype)
return HttpResponseRedirect(wtype.get_view_url())
page_name = "Edit {}".format(wtype.title)
return render_to_response("edit_type.html", {
"page_name": page_name,
"shift": wtype,
"edit_form": edit_form,
"shifts_formset": shifts_formset,
}, context_instance=RequestContext(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _import_modules(dir_path):
""" Attempts to import modules in the specified directory path. `dir_path` Base directory path to attempt to import modules. """ |
def _import_module(module):
""" Imports the specified module.
"""
# already loaded, skip
if module in mods_loaded:
return False
__import__(module)
mods_loaded.append(module)
mods_loaded = []
# check if provided path exists
if not os.path.isdir(dir_path):
return
try:
# update import search path
sys.path.insert(0, dir_path)
# check for modules in the dir path
for entry in os.listdir(dir_path):
path = os.path.join(dir_path, entry)
if os.path.isdir(path): # directory
_import_module(entry)
elif _RE_PY_EXT.search(entry): # python file
if not _RE_INIT_PY.match(entry): # exclude init
name = _RE_PY_EXT.sub('', entry)
_import_module(name)
finally:
# remove inserted path
sys.path.pop(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_directories(self):
""" Creates data directory structure. * Raises a ``DirectorySetupFail`` exception if error occurs while creating directories. """ |
dirs = [self._data_dir]
dirs += [os.path.join(self._data_dir, name) for name
in self.DATA_SUBDIRS]
for path in dirs:
if not os.path.isdir(path):
try:
os.makedirs(path) # recursive mkdir
os.chmod(path, 0755) # rwxr-xr-x
except OSError:
raise errors.DirectorySetupFail()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_task(self, load):
""" Sets up the ``Task`` object and loads active file for task. `load` Set to ``True`` to load task after setup. """ |
if not self._task:
self._task = Task(self._data_dir)
if load:
self._task.load() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_plugins(self):
""" Attempts to load plugin modules according to the order of available plugin directories. """ |
# import base plugin modules
try:
__import__('focus.plugin.modules')
#import focus.plugin.modules
except ImportError as exc:
raise errors.PluginImport(unicode(exc))
# load user defined plugin modules
try:
user_plugin_dir = os.path.join(self._data_dir, 'plugins')
_import_modules(user_plugin_dir)
except Exception as exc:
raise errors.UserPluginImport(unicode(exc)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self):
""" Loads in resources needed for this environment, including loading a new or existing task, establishing directory structures, and importing plugin modules. """ |
self._setup_directories()
self._load_plugins()
self._setup_task(load=True)
self._loaded = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resize(image, width=None, height=None, crop=False, namespace="resized"):
""" Returns the url of the resized image """ |
return resize_lazy(image=image, width=width, height=height, crop=crop,
namespace=namespace, as_url=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def conditional_resize(image, ratio, width=None, height=None, upcrop=True, namespace="resized"):
""" Crop the image based on a ratio If upcrop is true, crops the images that have a higher ratio than the given ratio, if false crops the images that have a lower ratio """ |
aspect = float(image.width) / float(image.height)
crop = False
if (aspect > ratio and upcrop) or (aspect <= ratio and not upcrop):
crop = True
return resize_lazy(image=image, width=width, height=height, crop=crop,
namespace=namespace, as_url=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def choose(s, possibilities, threshold=.6):
""" Returns the closest match to string s if exceeds threshold, else returns None """ |
if not possibilities: return None
if s in possibilities: return s
if s == '': return None
startswith = [x for x in possibilities if x.lower().startswith(s.lower())]
if len(startswith) == 1: return startswith[0]
contained = [x for x in possibilities if s.lower() in x.lower()]
if len(contained) == 1: return contained[0]
best = max([(x, Levenshtein.jaro_winkler(s, x, .05)) for x in possibilities], key=itemgetter(1))
if best[1] < threshold:
#print 'did you mean %s?' % best[0]
return None
return best[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def normalized(a, axis=-1, order=2):
'''Return normalized vector for arbitrary axis
Args
----
a: ndarray (n,3)
Tri-axial vector data
axis: int
Axis index to overwhich to normalize
order: int
Order of nomalization to calculate
Notes
-----
This function was adapted from the following StackOverflow answer:
http://stackoverflow.com/a/21032099/943773
'''
import numpy
l2 = numpy.atleast_1d(numpy.linalg.norm(a, order, axis))
l2[l2==0] = 1
return a / numpy.expand_dims(l2, axis) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def findzc(x, thresh, t_max=None):
'''
Find cues to each zero-crossing in vector x.
To be accepted as a zero-crossing, the signal must pass from below
-thresh to above thresh, or vice versa, in no more than t_max samples.
Args
----
thresh: (float)
magnitude threshold for detecting a zero-crossing.
t_max: (int)
maximum duration in samples between threshold crossings.
Returns
-------
zc: ndarray
Array containing the start **zc_s**, finish **zc_f** and direction **S**
of zero crossings
where:
* zc_s: the cue of the first threshold-crossing in samples
* zc_f: the cue of the second threshold-crossing in samples
* S: the sign of each zero-crossing (1 = positive-going, -1 = negative-going).
Notes
-----
This routine is a reimplementation of Mark Johnson's Dtag toolbox method
and tested against the Matlab version to be sure it has the same result.
'''
import numpy
# positive threshold: p (over) n (under)
pt_p = x > thresh
pt_n = ~pt_p
# negative threshold: p (over) n (under)
nt_n = x < -thresh
nt_p = ~nt_n
# Over positive threshold +thresh
# neg to pos
pt_np = (pt_p[:-1] & pt_n[1:]).nonzero()[0]
# pos to neg
pt_pn = (pt_n[:-1] & pt_p[1:]).nonzero()[0] + 1
# Over positive threshold +thresh
# neg to pos
nt_np = (nt_p[:-1] & nt_n[1:]).nonzero()[0] + 1
# pos to neg
nt_pn = (nt_n[:-1] & nt_p[1:]).nonzero()[0]
# Concat indices, order sequentially
ind_all = numpy.hstack((pt_np, nt_np, pt_pn, nt_pn))
ind_all.sort()
# Omit rows where just touching but not crossing
crossing_mask = ~(numpy.diff(numpy.sign(x[ind_all])) == 0)
# Append a False to make the same length as ind_all
crossing_mask = numpy.hstack((crossing_mask, False))
# Get 1st and 2nd crossings
ind_1stx = ind_all[crossing_mask]
ind_2ndx = ind_all[numpy.where(crossing_mask)[0]+1]
# TODO odd option to replace with NaNs rather than delete?
# Delete indices that do not have a second crossing
del_ind = numpy.where(ind_2ndx > len(x)-1)[0]
for i in del_ind:
ind_1stx = numpy.delete(ind_1stx, i)
ind_2ndx = numpy.delete(ind_1stx, i)
# Get direction/sign of crossing
signs = numpy.sign(x[ind_1stx])*-1
# Add column of direction and transpose
zc = numpy.vstack((ind_1stx, ind_2ndx, signs)).T
# TODO not mentioned in docstring, remove?
#x_norm? = ((x[:, 1] * zc[:, 0]) - (x[:, 0] * zc[:, 1])) / x[:, 1] - x[:, 0]
if t_max:
zc = zc[zc[:, 1] - zc[:, 0] <= t_max, :]
return zc.astype(int) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def butter_filter(cutoff, fs, order=5, btype='low'):
'''Create a digital butter fileter with cutoff frequency in Hz
Args
----
cutoff: float
Cutoff frequency where filter should separate signals
fs: float
sampling frequency
btype: str
Type of filter type to create. 'low' creates a low-frequency filter and
'high' creates a high-frequency filter (Default 'low).
Returns
-------
b: ndarray
Numerator polynomials of the IIR butter filter
a: ndarray
Denominator polynomials of the IIR butter filter
Notes
-----
This function was adapted from the following StackOverflow answer:
http://stackoverflow.com/a/25192640/943773
'''
import scipy.signal
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = scipy.signal.butter(order, normal_cutoff, btype=btype, analog=False)
return b, a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def butter_apply(b, a, data):
'''Apply filter with filtfilt to allign filtereted data with input
The filter is applied once forward and once backward to give it linear
phase, using Gustafsson's method to give the same length as the original
signal.
Args
----
b: ndarray
Numerator polynomials of the IIR butter filter
a: ndarray
Denominator polynomials of the IIR butter filter
Returns
-------
x: ndarray
Filtered data with linear phase
Notes
-----
This function was adapted from the following StackOverflow answer:
http://stackoverflow.com/a/25192640/943773
'''
import scipy.signal
return scipy.signal.filtfilt(b, a, data, method='gust') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def calc_PSD_welch(x, fs, nperseg):
'''Caclulate power spectral density with Welch's method
Args
----
x: ndarray
sample array
fs: float
sampling frequency (1/dt)
Returns
-------
f_welch: ndarray
Discrete frequencies
S_xx_welch: ndarray
Estimated PSD at discrete frequencies `f_welch`
P_welch: ndarray
Signal power (integrated PSD)
df_welch: ndarray
Delta between discreet frequencies `f_welch`
'''
import numpy
import scipy.signal
# Code source and description of FFT, DFT, etc.
# http://stackoverflow.com/a/33251324/943773
dt = 1/fs
N = len(x)
times = numpy.arange(N) / fs
# Estimate PSD `S_xx_welch` at discrete frequencies `f_welch`
f_welch, S_xx_welch = scipy.signal.welch(x, fs=fs, nperseg=nperseg)
# Integrate PSD over spectral bandwidth
# to obtain signal power `P_welch`
df_welch = f_welch[1] - f_welch[0]
P_welch = numpy.sum(S_xx_welch) * df_welch
return f_welch, S_xx_welch, P_welch, df_welch |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def simple_peakfinder(x, y, delta):
'''Detect local maxima and minima in a vector
A point is considered a maximum peak if it has the maximal value, and was
preceded (to the left) by a value lower by `delta`.
Args
----
y: ndarray
array of values to find local maxima and minima in
delta: float
minimum change in `y` since previous peak to be considered new peak.
It should be positive and it's absolute value taken to ensure this.
x: ndarray
corresponding x-axis positions to y array
Returns
-------
max_ind: ndarray
Indices of local maxima
min_ind: ndarray
Indices of local minima
Example
-------
max_ind, min_ind = simple_peakfinder(x, y, delta)
# get values of `y` at local maxima
local_max = y[max_ind]
Notes
-----
Matlab Author: Eli Billauer http://billauer.co.il/peakdet.html
Python translation: Chris Muktar https://gist.github.com/endolith/250860
Python cleanup: Ryan J. Dillon
'''
import numpy
y = numpy.asarray(y)
max_ind = list()
min_ind = list()
local_min = numpy.inf
local_max = -numpy.inf
local_min_pos = numpy.nan
local_max_pos = numpy.nan
lookformax = True
for i in range(len(y)):
if y[i] > local_max:
local_max = y[i]
local_max_pos = x[i]
if y[i] < local_min:
local_min = y[i]
local_min_pos = x[i]
if lookformax:
if y[i] < local_max-abs(delta):
max_ind.append(local_max_pos)
local_min = y[i]
local_min_pos = x[i]
lookformax = False
else:
if y[i] > local_min+abs(delta):
min_ind.append(local_min_pos)
local_max = y[i]
local_max_pos = x[i]
lookformax = True
return numpy.array(max_ind), numpy.array(min_ind) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_near(lat, lon, *, n=10, session=None):
"""Return n results for a given latitude and longitude""" |
search_params = {'npoints': n, 'clat': lat, 'clon': lon,
'Columns[]': ['Subregion', 'Notes', 'CollectionYear',
'ReservoirAge', 'ReservoirErr', 'C14age',
'C14err', 'LabID', 'Delta13C', 'nextime',
'Genus', 'Species', 'Feeding', 'Name']}
resp = _query_near(session=session, **search_params)
df = _response_to_dataframe(resp)
df_clean = _clean_dataframe(df)
return df_clean |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _query_near(*, session=None, **kwargs):
"""Query marine database with given query string values and keys""" |
url_endpoint = 'http://calib.org/marine/index.html'
if session is not None:
resp = session.get(url_endpoint, params=kwargs)
else:
with requests.Session() as s:
# Need to get the index page before query. Otherwise get bad query response that seems legit.
s.get('http://calib.org/marine/index.html')
resp = s.get(url_endpoint, params=kwargs)
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_content(self):
"""Render the panel's content.""" |
if not self.has_content:
return ""
template = self.template
if isinstance(self.template, str):
template = self.app.ps.jinja2.env.get_template(self.template)
context = self.render_vars()
content = template.render(app=self.app, request=self.request, **context)
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_response(self, response):
"""Store response headers.""" |
self.response_headers = [(k, v) for k, v in sorted(response.headers.items())] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def retry(exception_to_check, tries=5, delay=5, multiplier=2):
'''Tries to call the wrapped function again, after an incremental delay
:param exception_to_check: Exception(s) to check for, before retrying.
:type exception_to_check: Exception
:param tries: Number of time to retry before failling.
:type tries: int
:param delay: time in second to sleep before retrying.
:type delay: int
:param multiplier: multiply the delay each time the exception_to_check
occurs.
:type multiplier: int
'''
def deco_retry(func):
'''Creates the retry decorator'''
@wraps(func)
def func_retry(*args, **kwargs):
'''Actual wrapped function'''
if multiplier >= 1 is not True:
raise ValueError(
'multiplier = {}. It has to be superior to 1.'.format(
multiplier
)
)
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except exception_to_check as err:
message = "%s, retrying in %d seconds..." % (
str(err), mdelay)
print(message)
sleep(mdelay)
mtries -= 1
mdelay *= multiplier
return func(*args, **kwargs)
return func_retry
return deco_retry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_average_measure(dirname, measure_func, t_steady=None):
""" Calculate a measure of a model in an output directory, averaged over all times when the model is at steady-state. Parameters dirname: str Output directory measure_func: function Function which takes a :class:`Model` instance as a single argument, and returns the measure of interest, and its uncertainty. t_steady: None or float Time to consider the model to be at steady-state. `None` means just consider the latest time. Returns ------- measure: numpy.ndarray Measure. measure_errs: numpy.ndarray Measure uncertainty. If no averaging is done, this is taken from the measure_func. Otherwise, the standard error over all samples is used. """ |
if t_steady is None:
meas, meas_err = measure_func(get_recent_model(dirname))
return meas, meas_err
else:
ms = [filename_to_model(fname) for fname in get_filenames(dirname)]
ms_steady = [m for m in ms if m.t > t_steady]
meas_list = [measure_func(m) for m in ms_steady]
meases, meas_errs = zip(*meas_list)
return np.mean(meases), sem(meases) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def measures(dirnames, measure_func, t_steady=None):
"""Calculate a measure of a set of model output directories, for a measure function which returns an associated uncertainty. Parameters dirnames: list[str] Model output directory paths. measure_func: function Function which takes a :class:`Model` instance as a single argument, and returns the measure of interest, and its uncertainty. t_steady: None or float Time to consider the model to be at steady-state. The measure will be averaged over all later times. `None` means just consider the latest time. Returns ------- measures: numpy.ndarray Measures. measure_errs: numpy.ndarray Uncertainties. """ |
measures, measure_errs = [], []
for dirname in dirnames:
meas, meas_err = get_average_measure(dirname, measure_func, t_steady)
measures.append(meas)
measure_errs.append(meas_err)
return np.array(measures), np.array(measure_errs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def params(dirnames, param_func, t_steady=None):
"""Calculate a parameter of a set of model output directories, for a measure function which returns an associated uncertainty. Parameters dirnames: list[str] Model output directory paths. param_func: function Function which takes a :class:`Model` instance as a single argument, and returns the parameter of interest. Returns ------- params: numpy.ndarray Parameters. """ |
return np.array([param_func(get_recent_model(d)) for d in dirnames]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def t_measures(dirname, time_func, measure_func):
"""Calculate a measure over time for a single output directory, and its uncertainty. Parameters dirname: str Path to a model output directory. time_func: function Function which takes a :class:`Model` instance as a single argument, and returns its time. measure_func: function Function which takes a :class:`Model` instance as a single argument, and returns the measure of interest, and its uncertainty. Returns ------- ts: np.ndarray Times. measures: np.ndarray Measures. measure_errs: np.ndarray Measure uncertainties. """ |
ts, measures, measure_errs = [], [], []
for fname in get_filenames(dirname):
m = filename_to_model(fname)
ts.append(time_func(m))
meas, meas_err = measure_func(m)
measures.append(meas)
measure_errs.append(meas_err)
return np.array(ts), np.array(measures), np.array(measure_errs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def group_by_key(dirnames, key):
"""Group a set of output directories according to a model parameter. Parameters dirnames: list[str] Output directories key: various A field of a :class:`Model` instance. Returns ------- groups: dict[various: list[str]] For each value of `key` that is found at least once in the models, a list of the output directories where `key` is that value. """ |
groups = defaultdict(lambda: [])
for dirname in dirnames:
m = get_recent_model(dirname)
groups[m.__dict__[key]].append(dirname)
return dict(groups) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def pearson_correlation(self):
x, y, dt = self.data
X, Y = np.array(x), np.array(y)
''' Compute Pearson Correlation Coefficient. '''
# Normalise X and Y
X -= X.mean(0)
Y -= Y.mean(0)
# Standardise X and Y
X /= X.std(0)
Y /= Y.std(0)
# Compute mean product
return (np.mean(X*Y) ** 2) * 100 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def intervalTrees(reffh, scoreType=int, verbose=False):
""" Build a dictionary of interval trees indexed by chrom from a BED stream or file :param reffh: This can be either a string, or a stream-like object. In the former case, it is treated as a filename. The format of the file/stream must be BED. :param scoreType: The data type for scores (the fifth column) in the BED file. :param verbose: output progress messages to sys.stderr if True """ |
if type(reffh).__name__ == "str":
fh = open(reffh)
else:
fh = reffh
# load all the regions and split them into lists for each chrom
elements = {}
if verbose and fh != sys.stdin:
totalLines = linesInFile(fh.name)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of loading " + fh.name)
for element in BEDIterator(fh, scoreType=scoreType, verbose=verbose):
if element.chrom not in elements:
elements[element.chrom] = []
elements[element.chrom].append(element)
if verbose and fh != sys.stdin:
pind.done += 1
pind.showProgress()
# create an interval tree for each list
trees = {}
if verbose:
totalLines = len(elements)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of making interval trees")
for chrom in elements:
trees[chrom] = IntervalTree(elements[chrom], openEnded=True)
if verbose:
pind.done += 1
pind.showProgress()
return trees |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def BEDIterator(filehandle, sortedby=None, verbose=False, scoreType=int, dropAfter=None):
""" Get an iterator for a BED file :param filehandle: this can be either a string, or a stream-like object. In the former case, it is treated as a filename. The format of the file/stream must be BED. :param sortedby: if None, order is not checked. if == ITERATOR_SORTED_START, elements in file must be sorted by chrom and start index (an exception is raised if they are not) if == ITERATOR_SORTED_END, element must be sorted by chrom and end index. :param verbose: if True, output additional progress messages to stderr :param scoreType: The data type for scores (the fifth column) in the BED file. :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: iterator where subsequent calls to next() yield the next BED element in the stream as a GenomicInterval object. """ |
chromsSeen = set()
prev = None
if type(filehandle).__name__ == "str":
filehandle = open(filehandle)
if verbose:
try:
pind = ProgressIndicator(totalToDo=os.path.getsize(filehandle.name),
messagePrefix="completed",
messageSuffix="of processing " +
filehandle.name)
except (AttributeError, OSError) as e:
sys.stderr.write("BEDIterator -- warning: " +
"unable to show progress for stream")
verbose = False
for line in filehandle:
if verbose:
pind.done = filehandle.tell()
pind.showProgress()
if line.strip() == "":
continue
try:
e = parseBEDString(line, scoreType, dropAfter=dropAfter)
except GenomicIntervalError as e:
raise BEDError(str(e) + " on line " + line)
# sorting by name?
if ((sortedby == ITERATOR_SORTED_NAME and prev is not None) and
(prev.name > e.name)):
raise BEDError("bed file " + filehandle.name +
" not sorted by element name" +
" found " + e.name + " after " +
prev.name)
# first item
if prev is None:
chromsSeen.add(e.chrom)
# on same chrom as the prev item, make sure order is right
if prev is not None and sortedby is not None and e.chrom == prev.chrom:
if sortedby == ITERATOR_SORTED_START and prev.start > e.start:
raise BEDError("bed file " + filehandle.name +
" not sorted by start index - saw item " +
str(prev) + " before " + str(e))
if sortedby == ITERATOR_SORTED_END and prev.end > e.end:
raise BEDError("bed file " + filehandle.name +
" not sorted by end index - saw item " +
str(prev) + " before " + str(e))
# starting a new chrom.. make sure we haven't already seen it
if prev is not None and prev.chrom != e.chrom:
if (sortedby == ITERATOR_SORTED_START or
sortedby == ITERATOR_SORTED_END or
sortedby == ITERATOR_SORTED_CHROM) and\
(e.chrom in chromsSeen or prev.chrom > e.chrom):
try:
e_fn = filehandle.name
except AttributeError:
e_fn = "UNNAMED STREAM"
raise BEDError("BED file " + e_fn + " not sorted by chrom")
chromsSeen.add(e.chrom)
# all good..
yield e
prev = e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_forum_votes(sender, **kwargs):
"""
When a Vote is added, re-saves the topic or post to update vote count.
Since Votes can be assigned
to any content type, first makes sure we are dealing with a forum post or topic.
Deprecated 1-6-14 by storing score as cached property
""" |
vote = kwargs['instance']
if vote.content_type.app_label != "fretboard":
return
if vote.content_type.model == "topic":
t = get_model('fretboard', 'Topic').objects.get(id=vote.object.id)
t.votes = t.score()
t.save(update_fields=['votes'])
elif vote.content_type.model == "post":
p = get_model('fretboard', 'Post').objects.get(id=vote.object.id)
p.votes = p.score()
p.save(update_fields=['votes']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close(self):
''' terminate the connection '''
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if self.sftp is not None:
self.sftp.close()
self.ssh.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, line=None):
"""parses the line provided, if None then uses sys.argv""" |
args = self.parser.parse_args(args=line)
return args.func(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vertical_layout(self, draw, slide):
""" Augment slide with vertical layout info """ |
padding = self.padding
heading = slide['heading']
width, height = draw.textsize(heading['text'])
top = padding
left = padding
# Calculate size and location of heading
heading.update(dict(
width = width,
height = height,
top = self.padding,
left = self.padding))
top += height + padding
# count how many rows just text and how many have image
rows = slide['rows']
text_rows = 0
image_rows = 0
# calculate size of all text objects
total_height = top
for row in rows:
row_height = 0
images = 0
for item in row['items']:
if item.get('image'):
images += 1
text = item.get('text')
if text is None: continue
width, height = draw.textsize(text)
item.update(dict(
width = width,
height = height))
row_height = max(row_height, height)
if images:
image_rows += 1
row['images'] = images
else:
row['height'] = row_height
text_rows += 1
total_height += row_height + padding
# Calculate average height for image rows
if image_rows:
available = HEIGHT - total_height
image_height = available // image_rows
image_text_offset = image_height // 2
# now spin through rows again setting top
# (and height for images)
for row in rows:
text_top = top
images = row.get('images', 0)
if images:
text_top += image_text_offset
for item in row['items']:
if item.get('text') is not None:
item['top'] = text_top
else:
# image
item['top'] = top
item['height'] = image_height
row['height'] = image_height
top += row.get('height', 0) + padding
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def horizontal_layout(self, draw, slide):
""" Augment slide with horizontal layout info """ |
padding = self.padding
heading = slide['heading']
top = padding
left = padding
top += heading['height'] + padding
rows = slide['rows']
for row in rows:
images = row.get('images', 0)
items = row['items']
used_width = sum(x.get('width', 0) for x in items)
available_width = WIDTH - (
used_width + ((1 + len(items)) * padding))
if images:
image_width = available_width // images
# OK, now set left for all items and image_width for images
left = padding
for item in row['items']:
if item.get('image'):
item['width'] = image_width
item['left'] = left
left += item['width'] + padding
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gp_xfac():
"""example using QM12 enhancement factors - uses `gpcalls` kwarg to reset xtics - numpy.loadtxt needs reshaping for input files w/ only one datapoint - according poster presentations see QM12_ & NSD_ review .. _QM12: http://indico.cern.ch/getFile.py/access?contribId=268&sessionId=10&resId=0&materialId=slides&confId=181055 .. _NSD: http://rnc.lbl.gov/~xdong/RNC/DirectorReview2012/posters/Huck.pdf .. image:: pics/xfac.png :width: 450 px :ivar key: translates filename into legend/key label :ivar shift: slightly shift selected data points """ |
# prepare data
inDir, outDir = getWorkDirs()
data = OrderedDict()
# TODO: "really" reproduce plot using spectral data
for file in os.listdir(inDir):
info = os.path.splitext(file)[0].split('_')
key = ' '.join(info[:2] + [':',
' - '.join([
str(float(s)/1e3) for s in info[-1][:7].split('-')
]) + ' GeV'
])
file_url = os.path.join(inDir, file)
data[key] = np.loadtxt(open(file_url, 'rb')).reshape((-1,5))
data[key][:, 0] *= shift.get(key, 1)
logging.debug(data) # shown if --log flag given on command line
# generate plot
nSets = len(data)
make_plot(
data = data.values(),
properties = [ getOpts(i) for i in xrange(nSets) ],
titles = data.keys(), # use data keys as legend titles
name = os.path.join(outDir, 'xfac'),
key = [ 'top center', 'maxcols 2', 'width -7', 'font ",20"' ],
ylabel = 'LMR Enhancement Factor',
xlabel = '{/Symbol \326}s_{NN} (GeV)',
yr = [0.5, 6.5], size = '8.5in,8in',
rmargin = 0.99, tmargin = 0.98, bmargin = 0.14,
xlog = True, gpcalls = [
'format x "%g"',
'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
'boxwidth 0.015 absolute'
],
labels = { 'STAR Preliminary': [0.5, 0.5, False] },
lines = { 'x=1': 'lc 0 lw 4 lt 2' }
)
return 'done' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify(value, msg):
""" C-style validator Keyword arguments: value -- dictionary to validate (required) msg -- the protobuf schema to validate against (required) Returns: True: If valid input False: If invalid input """ |
return bool(value) and \
converts_to_proto(value, msg) and \
successfuly_encodes(msg) and \
special_typechecking(value, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def converts_to_proto(value, msg, raise_err=False):
""" Boolean response if a dictionary can convert into the proto's schema :param value: <dict> :param msg: <proto object> :param raise_err: <bool> (default false) raise for troubleshooting :return: <bool> whether the dict can covert """ |
result = True
try:
dict_to_protobuf.dict_to_protobuf(value, msg)
except TypeError as type_error:
if raise_err:
raise type_error
result = False
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def successfuly_encodes(msg, raise_err=False):
""" boolean response if a message contains correct information to serialize :param msg: <proto object> :param raise_err: <bool> :return: <bool> """ |
result = True
try:
msg.SerializeToString()
except EncodeError as encode_error:
if raise_err:
raise encode_error
result = False
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strip(value, msg):
""" Strips all non-essential keys from the value dictionary given the message format protobuf raises ValueError exception if value does not have all required keys :param value: <dict> with arbitrary keys :param msg: <protobuf> with a defined schema :return: NEW <dict> with keys defined by msg, omits any other key """ |
dict_to_protobuf.dict_to_protobuf(value, msg)
try:
msg.SerializeToString() #raise error for insufficient input
except EncodeError as encode_error:
raise ValueError(str(encode_error))
output = dict_to_protobuf.protobuf_to_dict(msg)
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_german_number(x):
"""Convert a string with a German number into a Decimal Parameters x : str, list, tuple, numpy.ndarray, pandas.DataFrame A string with a number with German formatting, or an array of these strings, e.g. list, ndarray, df. Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A string or array of strings that can be converted to a numeric data type (e.g. Decimal, float, int). Example ------- The function aims to convert a string as follows '1.234' => '1234' '1234' => '1234' '1.234,56' => '1234.56' '1.234.560' => '1234560' '+123' => '123' '-123' => '-123' Code Example print(clean_german_number('1.234,56')) '1234.56' Behavior -------- - The function will return None if the element is not a string - The function assumes that provided string are German numbers. - There will NO check if it is a regular number. - No conversion to a numeric data type (have to be done afterwards) Notes ----- The command `x.dropna().apply(proc_elem)` is not working for pandas dataframes. Maybe the `proc_elem` sub function is too big or complex for pandas' apply method. """ |
import numpy as np
import pandas as pd
import re
def proc_elem(e):
# abort if it is not a string
if not isinstance(e, str):
return None
# strip all char except digits, ".", "," and "-"
s = re.sub('[^0-9\.\,\-]+', '', e)
# abort if nothing is left
if len(s) is 0:
return None
# extra check regarding "-" modifier
m = ""
if s[0] is "-":
if len(s) > 1:
m = "-"
s = s[1:]
else:
return None
# remove the "-" from the string
s = re.sub('[^0-9\.\,]+', '', s)
# abort if nothing is left
if len(s) is 0:
return None
# abort if the number of "," (decimal sep) is bigger than 1
if s.count(',') > 1:
return None
# about if the decimal sep "," occurs before a 000' sep "."
if s.count('.') > 0 and s.count(',') > 0:
rev = s[::-1]
if rev.find(",") > rev.find("."):
return None
# remove 000' seperators "."
s = s.replace('.', '')
# convert comma to dot
s = s.replace(',', '.')
# if just a dot is left "."
if s == ".":
return None
# reattach the "-" modifier
return m + s
def proc_list(x):
return [proc_elem(e) for e in x]
def proc_ndarray(x):
tmp = proc_list(list(x.reshape((x.size,))))
return np.array(tmp).reshape(x.shape)
# transform string, list/tuple, numpy array, pandas dataframe
if isinstance(x, str):
return proc_elem(x)
elif isinstance(x, (list, tuple)):
return proc_list(x)
elif isinstance(x, np.ndarray):
return proc_ndarray(x)
elif isinstance(x, pd.DataFrame):
return pd.DataFrame(proc_ndarray(x.values),
columns=x.columns, index=x.index)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(package_path):
""" Runs pylint on a package and returns a score Lower score is better :param package_path: path of the package to score :return: number of score """ |
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def discover(url, options={}):
""" Retrieve the API definition from the given URL and construct a Patchboard to interface with it. """ |
try:
resp = requests.get(url, headers=Patchboard.default_headers)
except Exception as e:
raise PatchboardError("Problem discovering API: {0}".format(e))
# Parse as JSON (Requests uses json.loads())
try:
api_spec = resp.json()
except ValueError as e:
raise PatchboardError("Unparseable API description: {0}".format(e))
# Return core handle object
return Patchboard(api_spec, options) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spawn(self, context=None):
""" context may be a callable or a dict. """ |
if context is None:
context = self.default_context
if isinstance(context, collections.Callable):
context = context()
if not isinstance(context, collections.Mapping):
raise PatchboardError('Cannot determine a valid context')
return Client(self, context, self.api, self.endpoint_classes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_version(form='short'):
""" Returns the version string. Takes single argument ``form``, which should be one of the following strings: * ``short`` Returns major + minor branch version string with the format of B.b.t. * ``normal`` Returns human readable version string with the format of B.b.t _type type_num. * ``verbose`` Returns a verbose version string with the format of B.b.t _type type_num@git_sha * ``all`` Returns a dict of all versions. """ |
versions = {}
branch = "%s.%s" % (VERSION[0], VERSION[1])
tertiary = VERSION[2]
type_ = VERSION[3]
type_num = VERSION[4]
versions["branch"] = branch
v = versions["branch"]
if tertiary:
versions["tertiary"] = "." + str(tertiary)
v += versions["tertiary"]
versions['short'] = v
if form is "short":
return v
v += " " + type_ + " " + str(type_num)
versions["normal"] = v
if form is "normal":
return v
v += " @" + git_sha()
versions["verbose"] = v
if form is "verbose":
return v
if form is "all":
return versions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _download_article(self, article_number, max_retries=10):
"""Download a given article. :type article_number: str :param article_number: the article number to download. :type group: str :param group: the group that contains the article to be downloaded. :returns: nntplib article response object if successful, else False. """ |
log.debug('downloading article {0} from {1}'.format(article_number, self.name))
_connection = self.session.connections.get()
try:
i = 0
while True:
if i >= max_retries:
return False
try:
_connection.group(self.name)
resp = _connection.article(article_number)
log.debug('downloaded article {0} from {1}'.format(article_number,
self.name))
return resp
# Connection closed, transient error, retry forever.
except EOFError:
log.warning('EOFError, refreshing connection retrying -- '
'article={0}, group={1}'.format(article_number, self.name))
self.session.refresh_connection(_connection)
time.sleep(2)
_connection = self.session.connections.get()
# NNTP Error.
except nntplib.NNTPError as exc:
log.warning('NNTPError: {0} -- article={1}, '
'group={2}'.format(exc, article_number, self.name))
if any(s in exc.response for s in ['430', '423']):
# Don't retry, article probably doesn't exist.
i = max_retries
else:
i += 1
except:
self.session.refresh_connection(_connection)
time.sleep(2)
_connection = self.session.connections.get()
# Always return connection back to the pool!
finally:
self.session.connections.put(_connection) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index_article(self, msg_str, article_number, start, length):
"""Add article to index file. :type msg_str: str :param msg_str: the message string to index. :type article_number: str :param article_number: the article number to index. :type start: int :param start: the byte-offset where a given message starts in the corresponding mbox file. :type length: int :param length: the byte-length of the message. :rtype: bool :returns: True """ |
f = cStringIO.StringIO(msg_str)
message = rfc822.Message(f)
f.close()
# Replace header dict None values with '', and any tabs or
# newlines with ' '.
h = dict()
for key in message.dict:
if not message.dict[key]:
h[key] = ''
h[key] = message.dict[key]
h[key] = utf8_encode_str(message.dict[key])
if '\n' in h[key]:
h[key] = h[key].replace('\n', ' ')
if '\t' in h[key]:
h[key] = h[key].replace('\t', ' ')
date = h.get('NNTP-Posting-Date')
if not date:
date = h.get('date', '')
date = get_utc_iso_date(date)
idx_line = (date, h.get('message-id'), h.get('from'), h.get('newsgroups'),
h.get('subject'), h.get('references', ''), start, length)
idx_fname = '{name}.{date}.mbox.csv'.format(**self.__dict__)
s = cStringIO.StringIO()
writer = csv.writer(s, dialect='excel-tab')
writer.writerow(idx_line)
with self._idx_lock:
with open(idx_fname, 'a') as fp:
fp.write(s.getvalue())
s.close()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compress_and_sort_index(self):
"""Sort index, add header, and compress. :rtype: bool :returns: True """ |
idx_fname = '{name}.{date}.mbox.csv'.format(**self.__dict__)
try:
reader = csv.reader(open(idx_fname), dialect='excel-tab')
except IOError:
return False
index = [x for x in reader if x]
sorted_index = sorted(index, key=itemgetter(0))
gzip_idx_fname = idx_fname + '.gz'
# Include UTF-8 BOM in header.
header = [
'\xef\xbb\xbf#date', 'msg_id', 'from', 'newsgroups', 'subject', 'references',
'start', 'length',
]
s = cStringIO.StringIO()
writer = csv.writer(s, dialect='excel-tab')
writer.writerow(header)
for line in sorted_index:
writer.writerow(line)
compressed_index = inline_compress_chunk(s.getvalue())
s.close()
with open(gzip_idx_fname, 'ab') as fp:
fp.write(compressed_index)
os.remove(idx_fname)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, data):
"""Save a document or list of documents""" |
if not self.is_connected:
raise Exception("No database selected")
if not data:
return False
if isinstance(data, dict):
doc = couchdb.Document()
doc.update(data)
self.db.create(doc)
elif isinstance(data, couchdb.Document):
self.db.update(data)
elif isinstance(data, list):
self.db.update(data)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def win_register():
"need to be admin"
try:
with winreg.CreateKey(winreg.HKEY_CLASSES_ROOT, "austere.HTTP") as k:
# winreg.SetValue(k, None, winreg.REG_SZ, "{} austere".format(sys.argv[0]))
logger.debug("\shell")
with winreg.CreateKey(k, "shell") as shellkey:
logger.debug("\open")
with winreg.CreateKey(shellkey, "open") as openkey:
logger.debug("\command")
with winreg.CreateKey(openkey, "command") as cmdkey:
winreg.SetValue(cmdkey, None, winreg.REG_SZ, '"{} austere" "%1"'.format(sys.argv[0]))
# with winreg.CreateKey(winreg.HKEY_CLASSES_ROOT, "austere.HTTPS") as kssl:
# winreg.SetValue(kssl, None, winreg.REG_SZ, "{} austere".format(sys.argv[0]))
except OSError as e:
logger.error(e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_config(path=None, defaults=None):
""" Loads and parses an INI style configuration file using Python's built-in ConfigParser module. If path is specified, load it. If ``defaults`` (a list of strings) is given, try to load each entry as a file, without throwing any error if the operation fails. If ``defaults`` is not given, the following locations listed in the DEFAULT_FILES constant are tried. To completely disable defaults loading, pass in an empty list or ``False``. Returns the SafeConfigParser instance used to load and parse the files. """ |
if defaults is None:
defaults = DEFAULT_FILES
config = configparser.SafeConfigParser(allow_no_value=True)
if defaults:
config.read(defaults)
if path:
with open(path) as fh:
config.readfp(fh)
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_value_from_schema(v):
""" Load a value from a schema defined string. """ |
x = urllib.parse.urlparse(v)
if x.scheme.lower() == 'decimal':
v = Decimal(x.netloc)
elif x.scheme.lower() in ['int', 'integer']:
v = int(x.netloc)
elif x.scheme.lower() == 'float':
v = float(x.netloc)
elif x.scheme.lower() in ['s', 'str', 'string']:
v = str(x.netloc)
elif x.scheme.lower() in ['u', 'unicode']:
v = six.u(x.netloc)
elif x.scheme.lower() == 'email':
v = six.u(x.netloc)
elif x.scheme.lower() == 'bool':
v = bool(x.netloc)
elif x.scheme.lower() in ['b', 'bytes']:
v = six.b(x.netloc)
elif x.scheme.lower() in ['ts.iso8601', 'timestamp.iso8601']:
v = MayaDT.from_iso8601(x.netloc).datetime()
elif x.scheme.lower() in ['ts.rfc2822', 'timestamp.rfc2822']:
v = MayaDT.from_rfc2822(x.netloc).datetime()
elif x.scheme.lower() in ['ts.rfc3339', 'timestamp.rfx3339']:
v = MayaDT.from_rfc3339(x.netloc).datetime()
elif x.scheme.lower() in ['ts', 'timestamp']:
v = maya.parse(x.netloc).datetime()
elif x.scheme.lower() == 'date':
v = datetime.date.fromtimestamp(float(x.netloc))
elif x.scheme.lower() == 'time':
v = time.gmtime(float(x.netloc))
else:
v = None
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_value(v, parser, config, description):
""" Convert a string received on the command-line into a value or None. :param str v: The value to parse. :param parser: The fallback callable to load the value if loading from scheme fails. :param dict config: The config to use. :param str description: Description (for debugging) :return: The parsed value :rtype: object """ |
val = None
if v == '':
return
if v is not None:
try:
val = load_value_from_schema(v)
except Exception as e:
six.raise_from(
CertifierTypeError(
message='{kind}'.format(
description=description,
kind=type(v).__name__,
),
required=config['required'],
value=v,
),
e)
else:
if val is None:
try:
return parser(v)
except CertifierTypeError:
raise
except CertifierValueError:
raise
except TypeError as e:
six.raise_from(
CertifierTypeError(
message='{kind}'.format(
description=description,
kind=type(v).__name__,
),
required=config['required'],
value=v,
),
e)
except ValueError as e:
six.raise_from(
CertifierValueError(
message='{value}'.format(
description=description,
value=v,
),
required=config['required'],
value=v,
),
e)
return val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_simple_date(datestring):
"""Transforms a datestring into shorter date 7.9.2017 > 07.09 Expects the datestring to be format 07.09.2017. If this is not the case, returns string "Failed". Keyword arguments: datestring -- a string Returns: String -- The date in format "dd.MM." or "Failed" """ |
simple_date = re.compile(r"\d{1,2}(\.)\d{1,2}")
date = simple_date.search(datestring)
if date:
dates = date.group().split(".")
if len(dates[0]) == 1:
dates[0] = add_zero(dates[0])
if len(dates[1]) == 1:
dates[1] = add_zero(dates[1])
if date_is_valid(dates):
return '.'.join(dates) + '.'
return "Failed" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_month(datestring):
"""Transforms a written month into corresponding month number. E.g. November -> 11, or May -> 05. Keyword arguments: datestring -- a string Returns: String, or None if the transformation fails """ |
convert_written = re.compile(r"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec", re.IGNORECASE)
month = convert_written.search(datestring)
month_number = None
# If there's a match, convert the month to its corresponding number
if month:
month_number = strptime(month.group(), "%b").tm_mon
if month_number < 10:
month_number = add_zero(month_number)
return str(month_number) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_day_of_month(datestring):
"""Transforms an ordinal number into plain number with padding zero. E.g. 3rd -> 03, or 12th -> 12 Keyword arguments: datestring -- a string Returns: String, or None if the transformation fails """ |
get_day = re.compile(r"\d{1,2}(st|nd|rd|th)?", re.IGNORECASE)
day = get_day.search(datestring)
the_day = None
if day:
if bool(re.search(r"[st|nd|rd|th]", day.group().lower())):
the_day = day.group()[:-2]
else:
the_day = day.group()
if int(the_day) < 10:
the_day = add_zero(the_day)
return str(the_day) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strip_string(self, string, *args):
"""Strips matching regular expressions from string Keyword arguments: string -- The given string, that will be stripped *args -- List of regex strings, that are used in parsing Returns: String with *args removed from string """ |
res = string
for r in args:
res = re.sub(r, "", res.strip(),
flags=re.IGNORECASE|re.MULTILINE)
return res.strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strip_between(self, string, start, end):
"""Deletes everything between regexes start and end from string""" |
regex = start + r'.*?' + end + r'\s*'
res = re.sub(regex, '', string,
flags=re.DOTALL|re.IGNORECASE|re.MULTILINE)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance_between(self, string, start, end):
"""Returns number of lines between start and end""" |
count = 0
started = False
for line in string.split("\n"):
if self.scan_line(line, start) and not started:
started = True
if self.scan_line(line, end):
return count
if started:
count += 1
return count |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_line(self, line, regex):
"""Checks if regex is in line, returns bool""" |
return bool(re.search(regex, line, flags=re.IGNORECASE)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_message(self, message, regex):
"""Scans regex from msg and returns the line that matches Keyword arguments: message -- A (long) string, e.g. email body that will be scanned. regex -- A regular expression string that the message will be scanned against. Returns: Matching line or empty string """ |
for line in message.split("\n"):
if bool(re.search(
regex,
line,
flags=re.IGNORECASE|re.MULTILINE)):
return line
return "" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_date(self, dl_string):
"""Formats various date formats to dd.MM. Examples - January 15th --> 15.01. - 15.01.2017 --> 15.01. - 15th of January --> 15.01. - 15.1. --> 15.01. Keyword arguments: dl_string -- a string to be formatted Returns: Date string in format dd.MM. or "None.None" """ |
thedate = get_simple_date(dl_string)
if thedate != "Failed" and thedate:
return thedate
day = get_day_of_month(dl_string)
month = get_month(dl_string)
return day + '.' + month + '.' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_user(self, user_id, custom_properties=None, headers=None, endpoint_url=None):
""" Creates a new identified user if he doesn't exist. :param str user_id: identified user's ID :param dict custom_properties: user properties :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ |
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + '/users'
headers = headers or self._default_headers()
payload = {"user_id": user_id}
if custom_properties is not None:
payload["user_properties"] = custom_properties
response = requests.post(url, headers=headers, json=payload)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_event(self, user_id, event_name, event_properties=None, headers=None, endpoint_url=None):
""" Send an identified event. If a user doesn't exist it will create one. :param str user_id: identified user's ID :param str event_name: event name (e.g. "visit_website") :param dict event_properties: properties that describe the event's details :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ |
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + '/users/' + user_id + '/events'
headers = headers or self._default_headers()
event_properties = event_properties or {}
payload = {
"event_name": event_name,
"custom_properties": event_properties
}
response = requests.post(url, headers=headers, json=payload)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decrease_user_property(self, user_id, property_name, value=0, headers=None, endpoint_url=None):
""" Decrease a user's property by a value. :param str user_id: identified user's ID :param str property_name: user property name to increase :param number value: amount by which to decrease the property :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ |
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + "/users/" + user_id + "/properties/" + property_name + "/decrease/" + value.__str__()
headers = headers or self._default_headers(content_type="")
response = requests.post(url, headers=headers)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_user_properties(self, user_id, user_properties, headers=None, endpoint_url=None):
""" Update a user's properties with values provided in "user_properties" dictionary :param str user_id: identified user's ID :param dict user_properties: user properties to update with a new values :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ |
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + '/users/' + user_id + '/properties'
headers = headers or self._default_headers()
payload = user_properties
response = requests.put(url, headers=headers, json=payload)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def link_user_to_group(self, user_id, group_id, headers=None, endpoint_url=None):
""" Links a user to a group :param str user_id: identified user's ID :param str group_id: group ID :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ |
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + '/groups/' + group_id + '/link/' + user_id
headers = headers or self._default_headers()
response = requests.post(url, headers=headers)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_group_properties(self, group_id, group_properties, headers=None, endpoint_url=None):
""" Update a group's properties with values provided in "group_properties" dictionary :param str group_id: group ID :param dict group_properties: group properties to update with a new values :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ |
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + '/groups/' + group_id + '/properties'
headers = headers or self._default_headers()
payload = group_properties
response = requests.put(url, headers=headers, json=payload)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timing(self, stats, value):
""" Log timing information """ |
self.update_stats(stats, value, self.SC_TIMING) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count(self, stats, value, sample_rate=1):
""" Updates one or more stats counters by arbitrary value """ |
self.update_stats(stats, value, self.SC_COUNT, sample_rate) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timeit(self, metric, func, *args, **kwargs):
""" Times given function and log metric in ms for duration of execution. """ |
(res, seconds) = timeit(func, *args, **kwargs)
self.timing(metric, seconds * 1000.0)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format(keys, value, _type, prefix=""):
""" General format function. {'example.format': '2|T'} {'example.format31': '2|T', 'example.format37': '2|T'} {'prefix.example.format': '2|T'} """ |
data = {}
value = "{0}|{1}".format(value, _type)
# TODO: Allow any iterable except strings
if not isinstance(keys, (list, tuple)):
keys = [keys]
for key in keys:
data[prefix + key] = value
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def jars(self, absolute=True):
'''
List of jars in the jar path
'''
jars = glob(os.path.join(self._jar_path, '*.jar'))
return jars if absolute else map(lambda j: os.path.abspath(j), jars) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def download_jars_to(cls, folder):
'''
Download missing jars to a specific folder
'''
if not os.path.exists(folder):
os.makedirs(folder)
for info in JARS:
jar = MavenJar(info[0], info[1], info[2])
path = os.path.join(folder, jar.filename)
if os.path.isfile(path):
print("Skipping already downloaded file: %s" % jar.filename)
continue
print("Downloading %s..." % jar.filename)
jar.download_to(folder) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def attributes_from_dict(document):
"""Convert a Json representation of a set of attribute instances into a dictionary. Parameters document : Json object Json serialization of attribute instances Returns ------- dict(Attribute) Dictionary of attribute instance objects keyed by their name """ |
attributes = dict()
for attr in document:
name = str(attr['name'])
attributes[name] = Attribute(
name,
attr['value']
)
return attributes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def attributes_to_dict(attributes):
"""Transform a dictionary of attribute instances into a list of Json objects, i.e., list of key-value pairs. Parameters attributes : dict(Attribute) Dictionary of attribute instances Returns ------- List of key-value pairs. """ |
result = []
for key in attributes:
result.append({
'name' : key,
'value' : attributes[key].value
})
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_dict(attributes, definitions):
"""Create a dictionary of attributes from a given list of key-value pairs. Detects duplicate definitions of the same attribute and raises an exception. Expects a list of dictionaries (e.g., Json object) objects having 'name' and 'value' keys. The type of the element associated with the 'value' key is arbitrary. Raises a ValueError exception if the given array violates the expected format. The list of attribute definitions defines the set valid attribute names . Raises an ValueError exception if an attribute with an invalid name is in the attributes array. If the list of attributes is None an empty dictionary will be returned. Parameters attributes : list() Expects a list of Attributes of dictionaries with 'name' and 'value' elements. List of attribute definitons. Returns ------- Dictionary Dictionary of attribute instances keyed by their name """ |
# Create a list of valis parameter names
valid_names = {}
for para in definitions:
valid_names[para.identifier] = para
result = {}
if not attributes is None:
for element in attributes:
if isinstance(element, dict):
# Create attribute from dictionary
for key in ['name', 'value']:
if not key in element:
raise ValueError('object has no key ' + key + ': ' + str(element))
name = str(element['name'])
if not name in valid_names:
raise ValueError('invalid parameter name: ' + name)
try:
value = valid_names[name].data_type.from_string(
element['value']
)
except ValueError as ex:
raise ValueError(str(ex))
attr = Attribute(name, value)
else:
# Element is expected to be an attribute object
attr = element
# Make sure that the attribute value is of valid type. Make
# sure that attr.name is valid.
if attr.name in valid_names:
valid_names[attr.name].data_type.test_value(attr.value)
else:
raise ValueError('invalid parameter name: ' + attr.name)
if attr.name in result:
raise ValueError('duplicate attribute: ' + attr.name)
result[attr.name] = attr
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_dict(document):
"""Create attribute definition form Json-like object represenation. Parameters document : dict Json-like object represenation Returns ------- AttributeDefinition """ |
if 'default' in document:
default = document['default']
else:
default = None
return AttributeDefinition(
document['id'],
document['name'],
document['description'],
AttributeType.from_dict(document['type']),
default=default
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""Convert attribute definition into a dictionary. Returns ------- dict Json-like dictionary representation of the attribute definition """ |
obj = {
'id' : self.identifier,
'name' : self.name,
'description' : self.description,
'type' : self.data_type.to_dict()
}
if not self.default is None:
obj['default'] = self.default
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_dict(document):
"""Create data type definition form Json-like object represenation. Parameters document : dict Json-like object represenation Returns ------- AttributeType """ |
# Get the type name from the document
type_name = document['name']
if type_name == ATTR_TYPE_INT:
return IntType()
elif type_name == ATTR_TYPE_FLOAT:
return FloatType()
elif type_name == ATTR_TYPE_ENUM:
return EnumType(document['values'])
elif type_name == ATTR_TYPE_DICT:
return DictType()
elif type_name == ATTR_TYPE_LIST:
return ListType()
else:
raise ValueError('invalid attribute type: ' + str(type_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_string(self, value):
"""Convert string to dictionary.""" |
# Remove optional {}
if value.startswith('{') and value.endswith('}'):
text = value[1:-1].strip()
else:
text = value.strip()
# Result is a dictionary
result = {}
# Convert each pair of <int>:<float> into a key, value pair.
for val in text.split(','):
tokens = val.split(':')
if len(tokens) != 2:
raise ValueError('invalid entry in dictionary: ' + val)
result[str(int(tokens[0].strip()))] = float(tokens[1].strip())
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.