hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2ec0f20aafdc10453e7ffdfe838dced43ab3ef | 50,440 | py | Python | wagtail/admin/views/pages.py | kjartansverrisson/wagtail | d202195333e11faf5e1c42fc9a154cbe88d5e689 | [
"BSD-3-Clause"
] | null | null | null | wagtail/admin/views/pages.py | kjartansverrisson/wagtail | d202195333e11faf5e1c42fc9a154cbe88d5e689 | [
"BSD-3-Clause"
] | 2 | 2020-05-01T06:02:28.000Z | 2020-09-24T09:27:08.000Z | wagtail/admin/views/pages.py | kjartansverrisson/wagtail | d202195333e11faf5e1c42fc9a154cbe88d5e689 | [
"BSD-3-Clause"
] | 1 | 2020-03-09T08:05:56.000Z | 2020-03-09T08:05:56.000Z | from time import time
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator
from django.db import transaction
from django.db.models import Count
from django.http import Http404, HttpResponse, JsonResponse
from django.http.request import QueryDict
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.html import format_html
from django.utils.http import is_safe_url, urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.vary import vary_on_headers
from django.views.generic import View
from wagtail.admin import messages, signals
from wagtail.admin.action_menu import PageActionMenu
from wagtail.admin.auth import user_has_any_page_permission, user_passes_test
from wagtail.admin.forms.pages import CopyForm
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.mail import send_notification
from wagtail.admin.navigation import get_explorable_root_page
from wagtail.core import hooks
from wagtail.core.models import Page, PageRevision, UserPagePermissionsProxy
from wagtail.search.query import MATCH_ALL
def get_valid_next_url_from_request(request):
next_url = request.POST.get('next') or request.GET.get('next')
if not next_url or not is_safe_url(url=next_url, allowed_hosts={request.get_host()}):
return ''
return next_url
@user_passes_test(user_has_any_page_permission)
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
else:
parent_page = Page.get_first_root_node()
# This will always succeed because of the @user_passes_test above.
root_page = get_explorable_root_page(request.user)
# If this page isn't a descendant of the user's explorable root page,
# then redirect to that explorable root page instead.
if not (
parent_page.pk == root_page.pk
or parent_page.is_descendant_of(root_page)
):
return redirect('wagtailadmin_explore', root_page.pk)
parent_page = parent_page.specific
user_perms = UserPagePermissionsProxy(request.user)
pages = (
parent_page.get_children().prefetch_related(
"content_type", "sites_rooted_here"
)
& user_perms.explorable_pages()
)
# Get page ordering
ordering = request.GET.get('ordering', '-latest_revision_created_at')
if ordering not in [
'title',
'-title',
'content_type',
'-content_type',
'live', '-live',
'latest_revision_created_at',
'-latest_revision_created_at',
'ord'
]:
ordering = '-latest_revision_created_at'
if ordering == 'ord':
# preserve the native ordering from get_children()
pass
elif ordering == 'latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the top of the list.
# Do this by annotating with Count('latest_revision_created_at'),
# which returns 0 for these
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('null_position', 'latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the end of the list.
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('-null_position', '-latest_revision_created_at')
else:
pages = pages.order_by(ordering)
# Don't paginate if sorting by page order - all pages must be shown to
# allow drag-and-drop reordering
do_paginate = ordering != 'ord'
if do_paginate or pages.count() < 100:
# Retrieve pages in their most specific form, so that custom
# get_admin_display_title and get_url_parts methods on subclasses are respected.
# However, skip this on unpaginated listings with >100 child pages as this could
# be a significant performance hit. (This should only happen on the reorder view,
# and hopefully no-one is having to do manual reordering on listings that large...)
pages = pages.specific(defer=True)
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_explorer_page_queryset'):
pages = hook(parent_page, pages, request)
# Pagination
if do_paginate:
paginator = Paginator(pages, per_page=50)
pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page.specific,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'pages': pages,
'do_paginate': do_paginate,
})
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return render(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'next': get_valid_next_url_from_request(request),
})
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all()
paginator = Paginator(pages, per_page=10)
pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/content_type_use.html', {
'pages': pages,
'app_name': content_type_app_name,
'content_type': content_type,
'page_class': page_class,
})
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
page_class = content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if page_class not in parent_page.creatable_subpage_models():
raise PermissionDenied
if not page_class.can_create_at(parent_page):
raise PermissionDenied
for fn in hooks.get_hooks('before_create_page'):
result = fn(request, parent_page, page_class)
if hasattr(result, 'status_code'):
return result
page = page_class(owner=request.user)
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(request=request, instance=page)
form_class = edit_handler.get_form_class()
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent_page)
if form.is_valid():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit'))
if not is_publishing:
page.live = False
# Save page
parent_page.add_child(instance=page)
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')))
messages.success(request, _("Page '{0}' created and published.").format(page.get_admin_display_title()), buttons=buttons)
elif is_submitting:
messages.success(
request,
_("Page '{0}' created and submitted for moderation.").format(page.get_admin_display_title()),
buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page.id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
]
)
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else:
messages.success(request, _("Page '{0}' created.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
messages.validation_error(
request, _("The page could not be created due to validation errors"), form
)
has_unsaved_changes = True
else:
signals.init_new_page.send(sender=create, page=page, parent=parent_page)
form = form_class(instance=page, parent_page=parent_page)
has_unsaved_changes = False
edit_handler = edit_handler.bind_to(form=form)
return render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'action_menu': PageActionMenu(request, view='create', parent_page=parent_page),
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
})
def edit(request, page_id):
real_page_record = get_object_or_404(Page, id=page_id)
latest_revision = real_page_record.get_latest_revision()
page = real_page_record.get_latest_revision_as_page()
parent = page.get_parent()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
for fn in hooks.get_hooks('before_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(instance=page, request=request)
form_class = edit_handler.get_form_class()
if page_perms.user_has_lock():
if page.locked_at:
lock_message = format_html(_("<b>Page '{}' was locked</b> by <b>you</b> on <b>{}</b>."), page.get_admin_display_title(), page.locked_at.strftime("%d %b %Y %H:%M"))
else:
lock_message = format_html(_("<b>Page '{}' is locked</b> by <b>you</b>."), page.get_admin_display_title())
messages.warning(request, lock_message, extra_tags='lock')
elif page_perms.page_locked():
if page.locked_by and page.locked_at:
lock_message = format_html(_("<b>Page '{}' was locked</b> by <b>{}</b> on <b>{}</b>."), page.get_admin_display_title(), str(page.locked_by), page.locked_at.strftime("%d %b %Y %H:%M"))
else:
# Page was probably locked with an old version of Wagtail, or a script
lock_message = format_html(_("<b>Page '{}' is locked</b>."), page.get_admin_display_title())
messages.error(request, lock_message, extra_tags='lock')
next_url = get_valid_next_url_from_request(request)
errors_debug = None
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent)
if form.is_valid() and not page_perms.page_locked():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish()
is_submitting = bool(request.POST.get('action-submit'))
is_reverting = bool(request.POST.get('revision'))
# If a revision ID was passed in the form, get that revision so its
# date can be referenced in notification messages
if is_reverting:
previous_revision = get_object_or_404(page.revisions, id=request.POST.get('revision'))
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# store submitted go_live_at for messaging below
go_live_at = page.go_live_at
# Publish
if is_publishing:
revision.publish()
# Need to reload the page because the URL may have changed, and we
# need the up-to-date URL for the "View Live" button.
page = page.specific_class.objects.get(pk=page.pk)
# Notifications
if is_publishing:
if go_live_at and go_live_at > timezone.now():
# Page has been scheduled for publishing in the future
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been scheduled for publishing."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
if page.live:
message = _(
"Page '{0}' is live and this revision has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
])
else:
# Page is being published now
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been published."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been published."
).format(
page.get_admin_display_title()
)
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit')))
messages.success(request, message, buttons=buttons)
elif is_submitting:
message = _(
"Page '{0}' has been submitted for moderation."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page_id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page_id,)),
_('Edit')
)
])
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else: # Saving
if is_reverting:
message = _(
"Page '{0}' has been replaced with revision from {1}."
).format(
page.get_admin_display_title(),
previous_revision.created_at.strftime("%d %b %Y %H:%M")
)
else:
message = _(
"Page '{0}' has been updated."
).format(
page.get_admin_display_title()
)
messages.success(request, message)
for fn in hooks.get_hooks('after_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here - redirect back to the explorer
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
if page_perms.page_locked():
messages.error(request, _("The page could not be saved as it is locked"))
else:
messages.validation_error(
request, _("The page could not be saved due to validation errors"), form
)
errors_debug = (
repr(form.errors)
+ repr([
(name, formset.errors)
for (name, formset) in form.formsets.items()
if formset.errors
])
)
has_unsaved_changes = True
else:
form = form_class(instance=page, parent_page=parent)
has_unsaved_changes = False
edit_handler = edit_handler.bind_to(form=form)
# Check for revisions still undergoing moderation and warn
if latest_revision and latest_revision.submitted_for_moderation:
buttons = []
if page.live:
buttons.append(messages.button(
reverse('wagtailadmin_pages:revisions_compare', args=(page.id, 'live', latest_revision.id)),
_('Compare with live version')
))
messages.warning(request, _("This page is currently awaiting moderation"), buttons=buttons)
if page.live and page.has_unpublished_changes:
# Page status needs to present the version of the page containing the correct live URL
page_for_status = real_page_record.specific
else:
page_for_status = page
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'page_for_status': page_for_status,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': errors_debug,
'action_menu': PageActionMenu(request, view='edit', page=page),
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
'page_locked': page_perms.page_locked(),
})
def delete(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
if not page.permissions_for_user(request.user).can_delete():
raise PermissionDenied
with transaction.atomic():
for fn in hooks.get_hooks('before_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
parent_id = page.get_parent().id
page.delete()
messages.success(request, _("Page '{0}' deleted.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', parent_id)
return render(request, 'wagtailadmin/pages/confirm_delete.html', {
'page': page,
'descendant_count': page.get_descendant_count(),
'next': next_url,
})
def view_draft(request, page_id):
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
perms = page.permissions_for_user(request.user)
if not (perms.can_publish() or perms.can_edit()):
raise PermissionDenied
return page.make_preview_request(request, page.default_preview_mode)
class PreviewOnEdit(View):
http_method_names = ('post', 'get')
preview_expiration_timeout = 60 * 60 * 24 # seconds
session_key_prefix = 'wagtail-preview-'
def remove_old_preview_data(self):
expiration = time() - self.preview_expiration_timeout
expired_keys = [
k for k, v in self.request.session.items()
if k.startswith(self.session_key_prefix) and v[1] < expiration]
# Removes the session key gracefully
for k in expired_keys:
self.request.session.pop(k)
@property
def session_key(self):
return self.session_key_prefix + ','.join(self.args)
def get_page(self):
return get_object_or_404(Page,
id=self.args[0]).get_latest_revision_as_page()
def get_form(self, page, query_dict):
form_class = page.get_edit_handler().get_form_class()
parent_page = page.get_parent().specific
if self.session_key not in self.request.session:
# Session key not in session, returning null form
return form_class(instance=page, parent_page=parent_page)
return form_class(query_dict, instance=page, parent_page=parent_page)
def post(self, request, *args, **kwargs):
# TODO: Handle request.FILES.
request.session[self.session_key] = request.POST.urlencode(), time()
self.remove_old_preview_data()
form = self.get_form(self.get_page(), request.POST)
return JsonResponse({'is_valid': form.is_valid()})
def error_response(self, page):
return render(self.request, 'wagtailadmin/pages/preview_error.html',
{'page': page})
def get(self, request, *args, **kwargs):
page = self.get_page()
post_data, timestamp = self.request.session.get(self.session_key,
(None, None))
if not isinstance(post_data, str):
post_data = ''
form = self.get_form(page, QueryDict(post_data))
if not form.is_valid():
return self.error_response(page)
form.save(commit=False)
preview_mode = request.GET.get('mode', page.default_preview_mode)
return page.make_preview_request(request, preview_mode)
class PreviewOnCreate(PreviewOnEdit):
def get_page(self):
(content_type_app_name, content_type_model_name,
parent_page_id) = self.args
try:
content_type = ContentType.objects.get_by_natural_key(
content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page = content_type.model_class()()
parent_page = get_object_or_404(Page, id=parent_page_id).specific
# We need to populate treebeard's path / depth fields in order to
# pass validation. We can't make these 100% consistent with the rest
# of the tree without making actual database changes (such as
# incrementing the parent's numchild field), but by calling treebeard's
# internal _get_path method, we can set a 'realistic' value that will
# hopefully enable tree traversal operations
# to at least partially work.
page.depth = parent_page.depth + 1
# Puts the page at the maximum possible path
# for a child of `parent_page`.
page.path = Page._get_children_path_interval(parent_page.path)[1]
return page
def get_form(self, page, query_dict):
form = super().get_form(page, query_dict)
if form.is_valid():
# Ensures our unsaved page has a suitable url.
form.instance.set_url_path(form.parent_page)
form.instance.full_clean()
return form
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unpublish():
raise PermissionDenied
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
include_descendants = request.POST.get("include_descendants", False)
page.unpublish()
if include_descendants:
live_descendant_pages = page.get_descendants().live().specific()
for live_descendant_page in live_descendant_pages:
if user_perms.for_page(live_descendant_page).can_unpublish():
live_descendant_page.unpublish()
messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
return render(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
'next': next_url,
'live_descendant_count': page.get_descendants().live().count(),
})
def move_choose_destination(request, page_to_move_id, viewed_page_id=None):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
page_perms = page_to_move.permissions_for_user(request.user)
if not page_perms.can_move():
raise PermissionDenied
if viewed_page_id:
viewed_page = get_object_or_404(Page, id=viewed_page_id)
else:
viewed_page = Page.get_first_root_node()
viewed_page.can_choose = page_perms.can_move_to(viewed_page)
child_pages = []
for target in viewed_page.get_children():
# can't move the page into itself or its descendants
target.can_choose = page_perms.can_move_to(target)
target.can_descend = (
not(target == page_to_move
or target.is_child_of(page_to_move))
and target.get_children_count()
)
child_pages.append(target)
# Pagination
paginator = Paginator(child_pages, per_page=50)
child_pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/move_choose_destination.html', {
'page_to_move': page_to_move,
'viewed_page': viewed_page,
'child_pages': child_pages,
})
def move_confirm(request, page_to_move_id, destination_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id).specific
destination = get_object_or_404(Page, id=destination_id)
if not page_to_move.permissions_for_user(request.user).can_move_to(destination):
raise PermissionDenied
if not Page._slug_is_available(page_to_move.slug, destination, page=page_to_move):
messages.error(
request,
_("The slug '{0}' is already in use at the selected parent page. Make sure the slug is unique and try again".format(page_to_move.slug))
)
return redirect('wagtailadmin_pages:move_choose_destination', page_to_move.id, destination.id)
for fn in hooks.get_hooks('before_move_page'):
result = fn(request, page_to_move, destination)
if hasattr(result, 'status_code'):
return result
if request.method == 'POST':
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
page_to_move.move(destination, pos='last-child')
messages.success(request, _("Page '{0}' moved.").format(page_to_move.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page_to_move.id,)), _('Edit'))
])
for fn in hooks.get_hooks('after_move_page'):
result = fn(request, page_to_move)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailadmin_explore', destination.id)
return render(request, 'wagtailadmin/pages/confirm_move.html', {
'page_to_move': page_to_move,
'destination': destination,
})
def set_page_position(request, page_to_move_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
parent_page = page_to_move.get_parent()
if not parent_page.permissions_for_user(request.user).can_reorder_children():
raise PermissionDenied
if request.method == 'POST':
# Get position parameter
position = request.GET.get('position', None)
# Find page thats already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
# If the page has been moved to the right, insert it to the
# right. If left, then left.
old_position = list(parent_page.get_children()).index(page_to_move)
if int(position) < old_position:
page_to_move.move(position_page, pos='left')
elif int(position) > old_position:
page_to_move.move(position_page, pos='right')
else:
# Move page to end
page_to_move.move(parent_page, pos='last-child')
return HttpResponse('')
@user_passes_test(user_has_any_page_permission)
def copy(request, page_id):
page = Page.objects.get(id=page_id)
# Parent page defaults to parent of source page
parent_page = page.get_parent()
# Check if the user has permission to publish subpages on the parent
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
# Create the form
form = CopyForm(request.POST or None, user=request.user, page=page, can_publish=can_publish)
next_url = get_valid_next_url_from_request(request)
for fn in hooks.get_hooks('before_copy_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
# Check if user is submitting
if request.method == 'POST':
# Prefill parent_page in case the form is invalid (as prepopulated value for the form field,
# because ModelChoiceField seems to not fall back to the user given value)
parent_page = Page.objects.get(id=request.POST['new_parent_page'])
if form.is_valid():
# Receive the parent page (this should never be empty)
if form.cleaned_data['new_parent_page']:
parent_page = form.cleaned_data['new_parent_page']
if not page.permissions_for_user(request.user).can_copy_to(parent_page,
form.cleaned_data.get('copy_subpages')):
raise PermissionDenied
# Re-check if the user has permission to publish subpages on the new parent
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
# Copy the page
new_page = page.specific.copy(
recursive=form.cleaned_data.get('copy_subpages'),
to=parent_page,
update_attrs={
'title': form.cleaned_data['new_title'],
'slug': form.cleaned_data['new_slug'],
},
keep_live=(can_publish and form.cleaned_data.get('publish_copies')),
user=request.user,
)
# Give a success message back to the user
if form.cleaned_data.get('copy_subpages'):
messages.success(
request,
_("Page '{0}' and {1} subpages copied.").format(page.get_admin_display_title(), new_page.get_descendants().count())
)
else:
messages.success(request, _("Page '{0}' copied.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_copy_page'):
result = fn(request, page, new_page)
if hasattr(result, 'status_code'):
return result
# Redirect to explore of parent page
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', parent_page.id)
return render(request, 'wagtailadmin/pages/copy.html', {
'page': page,
'form': form,
'next': next_url,
})
@vary_on_headers('X-Requested-With')
@user_passes_test(user_has_any_page_permission)
def search(request):
pages = all_pages = Page.objects.all().prefetch_related('content_type').specific()
q = MATCH_ALL
content_types = []
pagination_query_params = QueryDict({}, mutable=True)
ordering = None
if 'ordering' in request.GET:
if request.GET['ordering'] in ['title', '-title', 'latest_revision_created_at', '-latest_revision_created_at', 'live', '-live']:
ordering = request.GET['ordering']
if ordering == 'title':
pages = pages.order_by('title')
elif ordering == '-title':
pages = pages.order_by('-title')
if ordering == 'latest_revision_created_at':
pages = pages.order_by('latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
pages = pages.order_by('-latest_revision_created_at')
if ordering == 'live':
pages = pages.order_by('live')
elif ordering == '-live':
pages = pages.order_by('-live')
if 'content_type' in request.GET:
pagination_query_params['content_type'] = request.GET['content_type']
app_label, model_name = request.GET['content_type'].split('.')
try:
selected_content_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
raise Http404
pages = pages.filter(content_type=selected_content_type)
else:
selected_content_type = None
if 'q' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data['q']
pagination_query_params['q'] = q
all_pages = all_pages.search(q, order_by_relevance=not ordering, operator='and')
pages = pages.search(q, order_by_relevance=not ordering, operator='and')
if pages.supports_facet:
content_types = [
(ContentType.objects.get(id=content_type_id), count)
for content_type_id, count in all_pages.facet('content_type_id').items()
]
else:
form = SearchForm()
paginator = Paginator(pages, per_page=20)
pages = paginator.get_page(request.GET.get('p'))
if request.is_ajax():
return render(request, "wagtailadmin/pages/search_results.html", {
'pages': pages,
'all_pages': all_pages,
'query_string': q,
'content_types': content_types,
'selected_content_type': selected_content_type,
'ordering': ordering,
'pagination_query_params': pagination_query_params.urlencode(),
})
else:
return render(request, "wagtailadmin/pages/search.html", {
'search_form': form,
'pages': pages,
'all_pages': all_pages,
'query_string': q,
'content_types': content_types,
'selected_content_type': selected_content_type,
'ordering': ordering,
'pagination_query_params': pagination_query_params.urlencode(),
})
def approve_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.approve_moderation()
message = _("Page '{0}' published.").format(revision.page.get_admin_display_title())
buttons = []
if revision.page.url is not None:
buttons.append(messages.button(revision.page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit')))
messages.success(request, message, buttons=buttons)
if not send_notification(revision.id, 'approved', request.user.pk):
messages.error(request, _("Failed to send approval notifications"))
return redirect('wagtailadmin_home')
def reject_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.reject_moderation()
messages.success(request, _("Page '{0}' rejected for publication.").format(revision.page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit'))
])
if not send_notification(revision.id, 'rejected', request.user.pk):
messages.error(request, _("Failed to send rejection notifications"))
return redirect('wagtailadmin_home')
@require_GET
def preview_for_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
page = revision.as_page_object()
return page.make_preview_request(request, page.default_preview_mode, extra_request_attrs={
'revision_id': revision_id
})
@require_POST
def lock(request, page_id):
# Get the page
page = get_object_or_404(Page, id=page_id).specific
# Check permissions
if not page.permissions_for_user(request.user).can_lock():
raise PermissionDenied
# Lock the page
if not page.locked:
page.locked = True
page.locked_by = request.user
page.locked_at = timezone.now()
page.save()
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@require_POST
def unlock(request, page_id):
# Get the page
page = get_object_or_404(Page, id=page_id).specific
# Check permissions
if not page.permissions_for_user(request.user).can_unlock():
raise PermissionDenied
# Unlock the page
if page.locked:
page.locked = False
page.locked_by = None
page.locked_at = None
page.save()
messages.success(request, _("Page '{0}' is now unlocked.").format(page.get_admin_display_title()), extra_tags='unlock')
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@user_passes_test(user_has_any_page_permission)
def revisions_index(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
# Get page ordering
ordering = request.GET.get('ordering', '-created_at')
if ordering not in ['created_at', '-created_at', ]:
ordering = '-created_at'
revisions = page.revisions.order_by(ordering)
paginator = Paginator(revisions, per_page=20)
revisions = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/revisions/index.html', {
'page': page,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'revisions': revisions,
})
def revisions_revert(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
revision_page = revision.as_page_object()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(instance=revision_page,
request=request)
form_class = edit_handler.get_form_class()
form = form_class(instance=revision_page)
edit_handler = edit_handler.bind_to(form=form)
user_avatar = render_to_string('wagtailadmin/shared/user_avatar.html', {'user': revision.user})
messages.warning(request, mark_safe(
_("You are viewing a previous revision of this page from <b>%(created_at)s</b> by %(user)s") % {
'created_at': revision.created_at.strftime("%d %b %Y %H:%M"),
'user': user_avatar,
}
))
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'revision': revision,
'is_revision': True,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': None,
'action_menu': PageActionMenu(request, view='revisions_revert', page=page),
'preview_modes': page.preview_modes,
'form': form, # Used in unit tests
})
@user_passes_test(user_has_any_page_permission)
def revisions_view(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
perms = page.permissions_for_user(request.user)
if not (perms.can_publish() or perms.can_edit()):
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
revision_page = revision.as_page_object()
return revision_page.make_preview_request(request, page.default_preview_mode)
def revisions_compare(request, page_id, revision_id_a, revision_id_b):
page = get_object_or_404(Page, id=page_id).specific
# Get revision to compare from
if revision_id_a == 'live':
if not page.live:
raise Http404
revision_a = page
revision_a_heading = _("Live")
elif revision_id_a == 'earliest':
revision_a = page.revisions.order_by('created_at', 'id').first()
if revision_a:
revision_a = revision_a.as_page_object()
revision_a_heading = _("Earliest")
else:
raise Http404
else:
revision_a = get_object_or_404(page.revisions, id=revision_id_a).as_page_object()
revision_a_heading = str(get_object_or_404(page.revisions, id=revision_id_a).created_at)
# Get revision to compare to
if revision_id_b == 'live':
if not page.live:
raise Http404
revision_b = page
revision_b_heading = _("Live")
elif revision_id_b == 'latest':
revision_b = page.revisions.order_by('created_at', 'id').last()
if revision_b:
revision_b = revision_b.as_page_object()
revision_b_heading = _("Latest")
else:
raise Http404
else:
revision_b = get_object_or_404(page.revisions, id=revision_id_b).as_page_object()
revision_b_heading = str(get_object_or_404(page.revisions, id=revision_id_b).created_at)
comparison = page.get_edit_handler().get_comparison()
comparison = [comp(revision_a, revision_b) for comp in comparison]
comparison = [comp for comp in comparison if comp.has_changed()]
return render(request, 'wagtailadmin/pages/revisions/compare.html', {
'page': page,
'revision_a_heading': revision_a_heading,
'revision_a': revision_a,
'revision_b_heading': revision_b_heading,
'revision_b': revision_b,
'comparison': comparison,
})
def revisions_unschedule(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unschedule():
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
next_url = get_valid_next_url_from_request(request)
subtitle = _('revision {0} of "{1}"').format(revision.id, page.get_admin_display_title())
if request.method == 'POST':
revision.approved_go_live_at = None
revision.save(update_fields=['approved_go_live_at'])
messages.success(request, _('Revision {0} of "{1}" unscheduled.').format(revision.id, page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_pages:revisions_index', page.id)
return render(request, 'wagtailadmin/pages/revisions/confirm_unschedule.html', {
'page': page,
'revision': revision,
'next': next_url,
'subtitle': subtitle
})
| 38.979907 | 195 | 0.628886 | from time import time
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator
from django.db import transaction
from django.db.models import Count
from django.http import Http404, HttpResponse, JsonResponse
from django.http.request import QueryDict
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.html import format_html
from django.utils.http import is_safe_url, urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.vary import vary_on_headers
from django.views.generic import View
from wagtail.admin import messages, signals
from wagtail.admin.action_menu import PageActionMenu
from wagtail.admin.auth import user_has_any_page_permission, user_passes_test
from wagtail.admin.forms.pages import CopyForm
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.mail import send_notification
from wagtail.admin.navigation import get_explorable_root_page
from wagtail.core import hooks
from wagtail.core.models import Page, PageRevision, UserPagePermissionsProxy
from wagtail.search.query import MATCH_ALL
def get_valid_next_url_from_request(request):
next_url = request.POST.get('next') or request.GET.get('next')
if not next_url or not is_safe_url(url=next_url, allowed_hosts={request.get_host()}):
return ''
return next_url
@user_passes_test(user_has_any_page_permission)
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
else:
parent_page = Page.get_first_root_node()
root_page = get_explorable_root_page(request.user)
if not (
parent_page.pk == root_page.pk
or parent_page.is_descendant_of(root_page)
):
return redirect('wagtailadmin_explore', root_page.pk)
parent_page = parent_page.specific
user_perms = UserPagePermissionsProxy(request.user)
pages = (
parent_page.get_children().prefetch_related(
"content_type", "sites_rooted_here"
)
& user_perms.explorable_pages()
)
ordering = request.GET.get('ordering', '-latest_revision_created_at')
if ordering not in [
'title',
'-title',
'content_type',
'-content_type',
'live', '-live',
'latest_revision_created_at',
'-latest_revision_created_at',
'ord'
]:
ordering = '-latest_revision_created_at'
if ordering == 'ord':
pass
elif ordering == 'latest_revision_created_at':
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('null_position', 'latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('-null_position', '-latest_revision_created_at')
else:
pages = pages.order_by(ordering)
# allow drag-and-drop reordering
do_paginate = ordering != 'ord'
if do_paginate or pages.count() < 100:
# Retrieve pages in their most specific form, so that custom
# get_admin_display_title and get_url_parts methods on subclasses are respected.
# However, skip this on unpaginated listings with >100 child pages as this could
# be a significant performance hit. (This should only happen on the reorder view,
# and hopefully no-one is having to do manual reordering on listings that large...)
pages = pages.specific(defer=True)
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_explorer_page_queryset'):
pages = hook(parent_page, pages, request)
# Pagination
if do_paginate:
paginator = Paginator(pages, per_page=50)
pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page.specific,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'pages': pages,
'do_paginate': do_paginate,
})
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return render(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'next': get_valid_next_url_from_request(request),
})
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all()
paginator = Paginator(pages, per_page=10)
pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/content_type_use.html', {
'pages': pages,
'app_name': content_type_app_name,
'content_type': content_type,
'page_class': page_class,
})
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
page_class = content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if page_class not in parent_page.creatable_subpage_models():
raise PermissionDenied
if not page_class.can_create_at(parent_page):
raise PermissionDenied
for fn in hooks.get_hooks('before_create_page'):
result = fn(request, parent_page, page_class)
if hasattr(result, 'status_code'):
return result
page = page_class(owner=request.user)
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(request=request, instance=page)
form_class = edit_handler.get_form_class()
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent_page)
if form.is_valid():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit'))
if not is_publishing:
page.live = False
# Save page
parent_page.add_child(instance=page)
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')))
messages.success(request, _("Page '{0}' created and published.").format(page.get_admin_display_title()), buttons=buttons)
elif is_submitting:
messages.success(
request,
_("Page '{0}' created and submitted for moderation.").format(page.get_admin_display_title()),
buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page.id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
]
)
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else:
messages.success(request, _("Page '{0}' created.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
messages.validation_error(
request, _("The page could not be created due to validation errors"), form
)
has_unsaved_changes = True
else:
signals.init_new_page.send(sender=create, page=page, parent=parent_page)
form = form_class(instance=page, parent_page=parent_page)
has_unsaved_changes = False
edit_handler = edit_handler.bind_to(form=form)
return render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'action_menu': PageActionMenu(request, view='create', parent_page=parent_page),
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
})
def edit(request, page_id):
real_page_record = get_object_or_404(Page, id=page_id)
latest_revision = real_page_record.get_latest_revision()
page = real_page_record.get_latest_revision_as_page()
parent = page.get_parent()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
for fn in hooks.get_hooks('before_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(instance=page, request=request)
form_class = edit_handler.get_form_class()
if page_perms.user_has_lock():
if page.locked_at:
lock_message = format_html(_("<b>Page '{}' was locked</b> by <b>you</b> on <b>{}</b>."), page.get_admin_display_title(), page.locked_at.strftime("%d %b %Y %H:%M"))
else:
lock_message = format_html(_("<b>Page '{}' is locked</b> by <b>you</b>."), page.get_admin_display_title())
messages.warning(request, lock_message, extra_tags='lock')
elif page_perms.page_locked():
if page.locked_by and page.locked_at:
lock_message = format_html(_("<b>Page '{}' was locked</b> by <b>{}</b> on <b>{}</b>."), page.get_admin_display_title(), str(page.locked_by), page.locked_at.strftime("%d %b %Y %H:%M"))
else:
lock_message = format_html(_("<b>Page '{}' is locked</b>."), page.get_admin_display_title())
messages.error(request, lock_message, extra_tags='lock')
next_url = get_valid_next_url_from_request(request)
errors_debug = None
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent)
if form.is_valid() and not page_perms.page_locked():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish()
is_submitting = bool(request.POST.get('action-submit'))
is_reverting = bool(request.POST.get('revision'))
if is_reverting:
previous_revision = get_object_or_404(page.revisions, id=request.POST.get('revision'))
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
go_live_at = page.go_live_at
if is_publishing:
revision.publish()
page = page.specific_class.objects.get(pk=page.pk)
if is_publishing:
if go_live_at and go_live_at > timezone.now():
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been scheduled for publishing."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
if page.live:
message = _(
"Page '{0}' is live and this revision has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
])
else:
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been published."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been published."
).format(
page.get_admin_display_title()
)
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit')))
messages.success(request, message, buttons=buttons)
elif is_submitting:
message = _(
"Page '{0}' has been submitted for moderation."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page_id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page_id,)),
_('Edit')
)
])
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else:
if is_reverting:
message = _(
"Page '{0}' has been replaced with revision from {1}."
).format(
page.get_admin_display_title(),
previous_revision.created_at.strftime("%d %b %Y %H:%M")
)
else:
message = _(
"Page '{0}' has been updated."
).format(
page.get_admin_display_title()
)
messages.success(request, message)
for fn in hooks.get_hooks('after_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
if page_perms.page_locked():
messages.error(request, _("The page could not be saved as it is locked"))
else:
messages.validation_error(
request, _("The page could not be saved due to validation errors"), form
)
errors_debug = (
repr(form.errors)
+ repr([
(name, formset.errors)
for (name, formset) in form.formsets.items()
if formset.errors
])
)
has_unsaved_changes = True
else:
form = form_class(instance=page, parent_page=parent)
has_unsaved_changes = False
edit_handler = edit_handler.bind_to(form=form)
# Check for revisions still undergoing moderation and warn
if latest_revision and latest_revision.submitted_for_moderation:
buttons = []
if page.live:
buttons.append(messages.button(
reverse('wagtailadmin_pages:revisions_compare', args=(page.id, 'live', latest_revision.id)),
_('Compare with live version')
))
messages.warning(request, _("This page is currently awaiting moderation"), buttons=buttons)
if page.live and page.has_unpublished_changes:
# Page status needs to present the version of the page containing the correct live URL
page_for_status = real_page_record.specific
else:
page_for_status = page
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'page_for_status': page_for_status,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': errors_debug,
'action_menu': PageActionMenu(request, view='edit', page=page),
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
'page_locked': page_perms.page_locked(),
})
def delete(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
if not page.permissions_for_user(request.user).can_delete():
raise PermissionDenied
with transaction.atomic():
for fn in hooks.get_hooks('before_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
parent_id = page.get_parent().id
page.delete()
messages.success(request, _("Page '{0}' deleted.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', parent_id)
return render(request, 'wagtailadmin/pages/confirm_delete.html', {
'page': page,
'descendant_count': page.get_descendant_count(),
'next': next_url,
})
def view_draft(request, page_id):
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
perms = page.permissions_for_user(request.user)
if not (perms.can_publish() or perms.can_edit()):
raise PermissionDenied
return page.make_preview_request(request, page.default_preview_mode)
class PreviewOnEdit(View):
http_method_names = ('post', 'get')
preview_expiration_timeout = 60 * 60 * 24 # seconds
session_key_prefix = 'wagtail-preview-'
def remove_old_preview_data(self):
expiration = time() - self.preview_expiration_timeout
expired_keys = [
k for k, v in self.request.session.items()
if k.startswith(self.session_key_prefix) and v[1] < expiration]
# Removes the session key gracefully
for k in expired_keys:
self.request.session.pop(k)
@property
def session_key(self):
return self.session_key_prefix + ','.join(self.args)
def get_page(self):
return get_object_or_404(Page,
id=self.args[0]).get_latest_revision_as_page()
def get_form(self, page, query_dict):
form_class = page.get_edit_handler().get_form_class()
parent_page = page.get_parent().specific
if self.session_key not in self.request.session:
# Session key not in session, returning null form
return form_class(instance=page, parent_page=parent_page)
return form_class(query_dict, instance=page, parent_page=parent_page)
def post(self, request, *args, **kwargs):
# TODO: Handle request.FILES.
request.session[self.session_key] = request.POST.urlencode(), time()
self.remove_old_preview_data()
form = self.get_form(self.get_page(), request.POST)
return JsonResponse({'is_valid': form.is_valid()})
def error_response(self, page):
return render(self.request, 'wagtailadmin/pages/preview_error.html',
{'page': page})
def get(self, request, *args, **kwargs):
page = self.get_page()
post_data, timestamp = self.request.session.get(self.session_key,
(None, None))
if not isinstance(post_data, str):
post_data = ''
form = self.get_form(page, QueryDict(post_data))
if not form.is_valid():
return self.error_response(page)
form.save(commit=False)
preview_mode = request.GET.get('mode', page.default_preview_mode)
return page.make_preview_request(request, preview_mode)
class PreviewOnCreate(PreviewOnEdit):
def get_page(self):
(content_type_app_name, content_type_model_name,
parent_page_id) = self.args
try:
content_type = ContentType.objects.get_by_natural_key(
content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page = content_type.model_class()()
parent_page = get_object_or_404(Page, id=parent_page_id).specific
# We need to populate treebeard's path / depth fields in order to
# of the tree without making actual database changes (such as
# incrementing the parent's numchild field), but by calling treebeard's
# internal _get_path method, we can set a 'realistic' value that will
# hopefully enable tree traversal operations
# to at least partially work.
page.depth = parent_page.depth + 1
# Puts the page at the maximum possible path
# for a child of `parent_page`.
page.path = Page._get_children_path_interval(parent_page.path)[1]
return page
def get_form(self, page, query_dict):
form = super().get_form(page, query_dict)
if form.is_valid():
# Ensures our unsaved page has a suitable url.
form.instance.set_url_path(form.parent_page)
form.instance.full_clean()
return form
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unpublish():
raise PermissionDenied
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
include_descendants = request.POST.get("include_descendants", False)
page.unpublish()
if include_descendants:
live_descendant_pages = page.get_descendants().live().specific()
for live_descendant_page in live_descendant_pages:
if user_perms.for_page(live_descendant_page).can_unpublish():
live_descendant_page.unpublish()
messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
return render(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
'next': next_url,
'live_descendant_count': page.get_descendants().live().count(),
})
def move_choose_destination(request, page_to_move_id, viewed_page_id=None):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
page_perms = page_to_move.permissions_for_user(request.user)
if not page_perms.can_move():
raise PermissionDenied
if viewed_page_id:
viewed_page = get_object_or_404(Page, id=viewed_page_id)
else:
viewed_page = Page.get_first_root_node()
viewed_page.can_choose = page_perms.can_move_to(viewed_page)
child_pages = []
for target in viewed_page.get_children():
# can't move the page into itself or its descendants
target.can_choose = page_perms.can_move_to(target)
target.can_descend = (
not(target == page_to_move
or target.is_child_of(page_to_move))
and target.get_children_count()
)
child_pages.append(target)
paginator = Paginator(child_pages, per_page=50)
child_pages = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/move_choose_destination.html', {
'page_to_move': page_to_move,
'viewed_page': viewed_page,
'child_pages': child_pages,
})
def move_confirm(request, page_to_move_id, destination_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id).specific
destination = get_object_or_404(Page, id=destination_id)
if not page_to_move.permissions_for_user(request.user).can_move_to(destination):
raise PermissionDenied
if not Page._slug_is_available(page_to_move.slug, destination, page=page_to_move):
messages.error(
request,
_("The slug '{0}' is already in use at the selected parent page. Make sure the slug is unique and try again".format(page_to_move.slug))
)
return redirect('wagtailadmin_pages:move_choose_destination', page_to_move.id, destination.id)
for fn in hooks.get_hooks('before_move_page'):
result = fn(request, page_to_move, destination)
if hasattr(result, 'status_code'):
return result
if request.method == 'POST':
page_to_move.move(destination, pos='last-child')
messages.success(request, _("Page '{0}' moved.").format(page_to_move.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page_to_move.id,)), _('Edit'))
])
for fn in hooks.get_hooks('after_move_page'):
result = fn(request, page_to_move)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailadmin_explore', destination.id)
return render(request, 'wagtailadmin/pages/confirm_move.html', {
'page_to_move': page_to_move,
'destination': destination,
})
def set_page_position(request, page_to_move_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
parent_page = page_to_move.get_parent()
if not parent_page.permissions_for_user(request.user).can_reorder_children():
raise PermissionDenied
if request.method == 'POST':
# Get position parameter
position = request.GET.get('position', None)
# Find page thats already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
old_position = list(parent_page.get_children()).index(page_to_move)
if int(position) < old_position:
page_to_move.move(position_page, pos='left')
elif int(position) > old_position:
page_to_move.move(position_page, pos='right')
else:
page_to_move.move(parent_page, pos='last-child')
return HttpResponse('')
@user_passes_test(user_has_any_page_permission)
def copy(request, page_id):
page = Page.objects.get(id=page_id)
parent_page = page.get_parent()
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
form = CopyForm(request.POST or None, user=request.user, page=page, can_publish=can_publish)
next_url = get_valid_next_url_from_request(request)
for fn in hooks.get_hooks('before_copy_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if request.method == 'POST':
parent_page = Page.objects.get(id=request.POST['new_parent_page'])
if form.is_valid():
if form.cleaned_data['new_parent_page']:
parent_page = form.cleaned_data['new_parent_page']
if not page.permissions_for_user(request.user).can_copy_to(parent_page,
form.cleaned_data.get('copy_subpages')):
raise PermissionDenied
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
new_page = page.specific.copy(
recursive=form.cleaned_data.get('copy_subpages'),
to=parent_page,
update_attrs={
'title': form.cleaned_data['new_title'],
'slug': form.cleaned_data['new_slug'],
},
keep_live=(can_publish and form.cleaned_data.get('publish_copies')),
user=request.user,
)
if form.cleaned_data.get('copy_subpages'):
messages.success(
request,
_("Page '{0}' and {1} subpages copied.").format(page.get_admin_display_title(), new_page.get_descendants().count())
)
else:
messages.success(request, _("Page '{0}' copied.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_copy_page'):
result = fn(request, page, new_page)
if hasattr(result, 'status_code'):
return result
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', parent_page.id)
return render(request, 'wagtailadmin/pages/copy.html', {
'page': page,
'form': form,
'next': next_url,
})
@vary_on_headers('X-Requested-With')
@user_passes_test(user_has_any_page_permission)
def search(request):
pages = all_pages = Page.objects.all().prefetch_related('content_type').specific()
q = MATCH_ALL
content_types = []
pagination_query_params = QueryDict({}, mutable=True)
ordering = None
if 'ordering' in request.GET:
if request.GET['ordering'] in ['title', '-title', 'latest_revision_created_at', '-latest_revision_created_at', 'live', '-live']:
ordering = request.GET['ordering']
if ordering == 'title':
pages = pages.order_by('title')
elif ordering == '-title':
pages = pages.order_by('-title')
if ordering == 'latest_revision_created_at':
pages = pages.order_by('latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
pages = pages.order_by('-latest_revision_created_at')
if ordering == 'live':
pages = pages.order_by('live')
elif ordering == '-live':
pages = pages.order_by('-live')
if 'content_type' in request.GET:
pagination_query_params['content_type'] = request.GET['content_type']
app_label, model_name = request.GET['content_type'].split('.')
try:
selected_content_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
raise Http404
pages = pages.filter(content_type=selected_content_type)
else:
selected_content_type = None
if 'q' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data['q']
pagination_query_params['q'] = q
all_pages = all_pages.search(q, order_by_relevance=not ordering, operator='and')
pages = pages.search(q, order_by_relevance=not ordering, operator='and')
if pages.supports_facet:
content_types = [
(ContentType.objects.get(id=content_type_id), count)
for content_type_id, count in all_pages.facet('content_type_id').items()
]
else:
form = SearchForm()
paginator = Paginator(pages, per_page=20)
pages = paginator.get_page(request.GET.get('p'))
if request.is_ajax():
return render(request, "wagtailadmin/pages/search_results.html", {
'pages': pages,
'all_pages': all_pages,
'query_string': q,
'content_types': content_types,
'selected_content_type': selected_content_type,
'ordering': ordering,
'pagination_query_params': pagination_query_params.urlencode(),
})
else:
return render(request, "wagtailadmin/pages/search.html", {
'search_form': form,
'pages': pages,
'all_pages': all_pages,
'query_string': q,
'content_types': content_types,
'selected_content_type': selected_content_type,
'ordering': ordering,
'pagination_query_params': pagination_query_params.urlencode(),
})
def approve_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.approve_moderation()
message = _("Page '{0}' published.").format(revision.page.get_admin_display_title())
buttons = []
if revision.page.url is not None:
buttons.append(messages.button(revision.page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit')))
messages.success(request, message, buttons=buttons)
if not send_notification(revision.id, 'approved', request.user.pk):
messages.error(request, _("Failed to send approval notifications"))
return redirect('wagtailadmin_home')
def reject_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.reject_moderation()
messages.success(request, _("Page '{0}' rejected for publication.").format(revision.page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit'))
])
if not send_notification(revision.id, 'rejected', request.user.pk):
messages.error(request, _("Failed to send rejection notifications"))
return redirect('wagtailadmin_home')
@require_GET
def preview_for_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
page = revision.as_page_object()
return page.make_preview_request(request, page.default_preview_mode, extra_request_attrs={
'revision_id': revision_id
})
@require_POST
def lock(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
if not page.permissions_for_user(request.user).can_lock():
raise PermissionDenied
if not page.locked:
page.locked = True
page.locked_by = request.user
page.locked_at = timezone.now()
page.save()
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@require_POST
def unlock(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
if not page.permissions_for_user(request.user).can_unlock():
raise PermissionDenied
if page.locked:
page.locked = False
page.locked_by = None
page.locked_at = None
page.save()
messages.success(request, _("Page '{0}' is now unlocked.").format(page.get_admin_display_title()), extra_tags='unlock')
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, allowed_hosts={request.get_host()}):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@user_passes_test(user_has_any_page_permission)
def revisions_index(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
ordering = request.GET.get('ordering', '-created_at')
if ordering not in ['created_at', '-created_at', ]:
ordering = '-created_at'
revisions = page.revisions.order_by(ordering)
paginator = Paginator(revisions, per_page=20)
revisions = paginator.get_page(request.GET.get('p'))
return render(request, 'wagtailadmin/pages/revisions/index.html', {
'page': page,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'revisions': revisions,
})
def revisions_revert(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
revision_page = revision.as_page_object()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(instance=revision_page,
request=request)
form_class = edit_handler.get_form_class()
form = form_class(instance=revision_page)
edit_handler = edit_handler.bind_to(form=form)
user_avatar = render_to_string('wagtailadmin/shared/user_avatar.html', {'user': revision.user})
messages.warning(request, mark_safe(
_("You are viewing a previous revision of this page from <b>%(created_at)s</b> by %(user)s") % {
'created_at': revision.created_at.strftime("%d %b %Y %H:%M"),
'user': user_avatar,
}
))
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'revision': revision,
'is_revision': True,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': None,
'action_menu': PageActionMenu(request, view='revisions_revert', page=page),
'preview_modes': page.preview_modes,
'form': form,
})
@user_passes_test(user_has_any_page_permission)
def revisions_view(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
perms = page.permissions_for_user(request.user)
if not (perms.can_publish() or perms.can_edit()):
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
revision_page = revision.as_page_object()
return revision_page.make_preview_request(request, page.default_preview_mode)
def revisions_compare(request, page_id, revision_id_a, revision_id_b):
page = get_object_or_404(Page, id=page_id).specific
if revision_id_a == 'live':
if not page.live:
raise Http404
revision_a = page
revision_a_heading = _("Live")
elif revision_id_a == 'earliest':
revision_a = page.revisions.order_by('created_at', 'id').first()
if revision_a:
revision_a = revision_a.as_page_object()
revision_a_heading = _("Earliest")
else:
raise Http404
else:
revision_a = get_object_or_404(page.revisions, id=revision_id_a).as_page_object()
revision_a_heading = str(get_object_or_404(page.revisions, id=revision_id_a).created_at)
if revision_id_b == 'live':
if not page.live:
raise Http404
revision_b = page
revision_b_heading = _("Live")
elif revision_id_b == 'latest':
revision_b = page.revisions.order_by('created_at', 'id').last()
if revision_b:
revision_b = revision_b.as_page_object()
revision_b_heading = _("Latest")
else:
raise Http404
else:
revision_b = get_object_or_404(page.revisions, id=revision_id_b).as_page_object()
revision_b_heading = str(get_object_or_404(page.revisions, id=revision_id_b).created_at)
comparison = page.get_edit_handler().get_comparison()
comparison = [comp(revision_a, revision_b) for comp in comparison]
comparison = [comp for comp in comparison if comp.has_changed()]
return render(request, 'wagtailadmin/pages/revisions/compare.html', {
'page': page,
'revision_a_heading': revision_a_heading,
'revision_a': revision_a,
'revision_b_heading': revision_b_heading,
'revision_b': revision_b,
'comparison': comparison,
})
def revisions_unschedule(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unschedule():
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
next_url = get_valid_next_url_from_request(request)
subtitle = _('revision {0} of "{1}"').format(revision.id, page.get_admin_display_title())
if request.method == 'POST':
revision.approved_go_live_at = None
revision.save(update_fields=['approved_go_live_at'])
messages.success(request, _('Revision {0} of "{1}" unscheduled.').format(revision.id, page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_pages:revisions_index', page.id)
return render(request, 'wagtailadmin/pages/revisions/confirm_unschedule.html', {
'page': page,
'revision': revision,
'next': next_url,
'subtitle': subtitle
})
| true | true |
1c2ec2a5a869df996ee9bf32ef07179dc62555f2 | 2,633 | py | Python | src/test/parser/template/node_tests/test_base.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | src/test/parser/template/node_tests/test_base.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | src/test/parser/template/node_tests/test_base.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.parser.template.nodes.id import TemplateIdNode
from programy.parser.template.nodes.srai import TemplateSRAINode
from test.parser.template.base import TemplateTestsBaseClass
######################################################################################################################
#
class TemplateNodeBasicTests(TemplateTestsBaseClass):
def test_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
def test_node_children(self):
node = TemplateNode()
node.append(TemplateWordNode("Word1"))
self.assertEqual(len(node.children), 1)
node.append(TemplateWordNode("Word2"))
self.assertEqual(len(node.children), 2)
self.assertEqual("Word1 Word2", node.resolve_children_to_string(None, None))
self.assertEqual("Word1 Word2", node.resolve(None, None))
self.assertEqual("[NODE]", node.to_string())
def test_to_xml_simple(self):
node = TemplateNode()
node.append(TemplateWordNode("Word1"))
node.append(TemplateWordNode("Word2"))
self.assertEqual("Word1 Word2", node.to_xml(None, None))
def test_to_xml_composite(self):
node = TemplateNode()
node.append(TemplateWordNode("Word1"))
node.append(TemplateIdNode())
srai = TemplateSRAINode()
srai.append(TemplateWordNode("Srai1"))
node.append(srai)
node.append(TemplateWordNode("Word2"))
self.assertEqual("Word1 <id /> <srai>Srai1</srai> Word2", node.to_xml(None, None))
def test_xml_tree_simple(self):
node = TemplateNode()
node.append(TemplateWordNode("Word1"))
node.append(TemplateWordNode("Word2"))
xml = node.xml_tree(None, None)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template>Word1 Word2</template>", xml_str)
def test_xml_tree_simple_composite(self):
node = TemplateNode()
node.append(TemplateWordNode("Word1"))
node.append(TemplateIdNode())
srai = TemplateSRAINode()
srai.append(TemplateWordNode("Srai1"))
node.append(srai)
node.append(TemplateWordNode("Word2"))
xml = node.xml_tree(None, None)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template>Word1 <id /> <srai>Srai1</srai> Word2</template>", xml_str)
| 40.507692 | 118 | 0.64793 | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.parser.template.nodes.id import TemplateIdNode
from programy.parser.template.nodes.srai import TemplateSRAINode
from test.parser.template.base import TemplateTestsBaseClass
| true | true |
1c2ec2a610c90310555eabd176019815f61ca306 | 422 | py | Python | pollster/urls.py | Timoh97/eVote-Intell | 20044a41b41a5437eebc6c704592c0b0bf85d92a | [
"MIT"
] | null | null | null | pollster/urls.py | Timoh97/eVote-Intell | 20044a41b41a5437eebc6c704592c0b0bf85d92a | [
"MIT"
] | null | null | null | pollster/urls.py | Timoh97/eVote-Intell | 20044a41b41a5437eebc6c704592c0b0bf85d92a | [
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
path('', include('users.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 26.375 | 80 | 0.71327 |
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
path('', include('users.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
1c2ec34f43ca0917576c97710652026196d402a3 | 445 | py | Python | helpers.py | pawangeek/Innovacer-Intern | 1b3e239a63244670b72a2c0c0513d75c0d95cd86 | [
"MIT"
] | null | null | null | helpers.py | pawangeek/Innovacer-Intern | 1b3e239a63244670b72a2c0c0513d75c0d95cd86 | [
"MIT"
] | 1 | 2021-06-02T00:45:03.000Z | 2021-06-02T00:45:03.000Z | helpers.py | pawangeek/Innovacer-Intern | 1b3e239a63244670b72a2c0c0513d75c0d95cd86 | [
"MIT"
] | null | null | null | from twilio.rest import Client
import smtplib
def sendmail(message,sender,receiver,password):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(sender, password)
s.sendmail(sender, receiver, message)
s.quit()
def sendmsg(message,receiver):
account_sid = ''
auth_token = ''
client = Client(account_sid, auth_token)
message = client.messages.create(body=message,from_='+12055288894',to=receiver)
| 26.176471 | 84 | 0.703371 | from twilio.rest import Client
import smtplib
def sendmail(message,sender,receiver,password):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(sender, password)
s.sendmail(sender, receiver, message)
s.quit()
def sendmsg(message,receiver):
account_sid = ''
auth_token = ''
client = Client(account_sid, auth_token)
message = client.messages.create(body=message,from_='+12055288894',to=receiver)
| true | true |
1c2ec438c82f5d97cc085888116ae9b52dc90aef | 3,927 | py | Python | videos-master/_2018/eop/chapter1/million_flips.py | samsmusa/My-manim-master | a79266ea21fbb7e84d0133030146549f381c31cb | [
"MIT"
] | 5 | 2021-03-18T02:28:07.000Z | 2021-04-10T03:40:24.000Z | videos-master/_2018/eop/chapter1/million_flips.py | samsmusa/My-manim-master | a79266ea21fbb7e84d0133030146549f381c31cb | [
"MIT"
] | null | null | null | videos-master/_2018/eop/chapter1/million_flips.py | samsmusa/My-manim-master | a79266ea21fbb7e84d0133030146549f381c31cb | [
"MIT"
] | 1 | 2022-02-16T03:22:47.000Z | 2022-02-16T03:22:47.000Z | from manim_imports_ext import *
from _2018.eop.reusable_imports import *
class MillionFlips(Scene):
def construct(self):
title = TexText("1{,}000{,}000 flips")
title.to_edge(UP)
self.add(title)
small_wait_time = 1.0 / 15 # Um...
n_flips_label = TexText("\\# Flips: ")
n_heads_label = TexText("\\# Heads: ")
n_flips_count = Integer(0)
n_heads_count = Integer(0)
n_heads_label.to_edge(RIGHT, buff=2 * LARGE_BUFF)
n_flips_label.next_to(n_heads_label, DOWN, aligned_edge=LEFT)
n_flips_count.next_to(n_flips_label[-1], RIGHT)
n_heads_count.next_to(n_heads_label[-1], RIGHT)
VGroup(n_flips_count, n_heads_count).shift(0.5 * SMALL_BUFF * UP)
self.add(n_flips_label, n_heads_label, n_flips_count, n_heads_count)
coins = VGroup(*[
FlatHeads() if random.random() < 0.5 else FlatTails()
for x in range(100)
])
self.organize_group(coins)
proportions = np.random.normal(0.5, 0.5 * 0.1, 100)
hundred_boxes = VGroup(*[
Square(
stroke_width=1,
stroke_color=WHITE,
fill_opacity=1,
fill_color=interpolate_color(COLOR_HEADS, COLOR_TAILS, prop)
)
for prop in proportions
])
self.organize_group(hundred_boxes)
ten_k_proportions = np.random.normal(0.5, 0.5 * 0.01, 100)
ten_k_boxes = VGroup(*[
Square(
stroke_width=1,
stroke_color=WHITE,
fill_opacity=1,
fill_color=interpolate_color(COLOR_HEADS, COLOR_TAILS, prop)
)
for prop in ten_k_proportions
])
self.organize_group(ten_k_boxes)
# Animations
for coin in coins:
self.add(coin)
self.increment(n_flips_count)
if isinstance(coin, FlatHeads):
self.increment(n_heads_count)
self.wait(small_wait_time)
self.play(
FadeIn(hundred_boxes[0]),
coins.set_stroke, {"width": 0},
coins.replace, hundred_boxes[0]
)
hundred_boxes[0].add(coins)
for box, prop in list(zip(hundred_boxes, proportions))[1:]:
self.add(box)
self.increment(n_flips_count, 100)
self.increment(n_heads_count, int(np.round(prop * 100)))
self.wait(small_wait_time)
self.play(
FadeIn(ten_k_boxes[0]),
hundred_boxes.set_stroke, {"width": 0},
hundred_boxes.replace, ten_k_boxes[0]
)
ten_k_boxes[0].add(hundred_boxes)
for box, prop in list(zip(ten_k_boxes, ten_k_proportions))[1:]:
self.add(box)
self.increment(n_flips_count, 10000)
self.increment(n_heads_count, int(np.round(prop * 10000)))
self.wait(small_wait_time)
self.wait()
def organize_group(self, group):
group.arrange_in_grid(10)
group.set_height(5)
group.shift(DOWN + 2 * LEFT)
def increment(self, integer_mob, value=1):
new_int = Integer(integer_mob.number + value)
new_int.move_to(integer_mob, DL)
integer_mob.number += value
integer_mob.submobjects = new_int.submobjects
class PropHeadsWithinThousandth(Scene):
def construct(self):
prob = Tex(
"P(499{,}000 \\le", "\\# \\text{H}", "\\le 501{,}000)",
"\\approx", "0.9545",
)
prob[1].set_color(RED)
prob[-1].set_color(YELLOW)
self.add(prob)
class PropHeadsWithinHundredth(Scene):
def construct(self):
prob = Tex(
"P(490{,}000 \\le", "\\# \\text{H}", "\\le 510{,}000)",
"\\approx", "0.99999999\\dots",
)
prob[1].set_color(RED)
prob[-1].set_color(YELLOW)
self.add(prob)
| 33 | 76 | 0.569646 | from manim_imports_ext import *
from _2018.eop.reusable_imports import *
class MillionFlips(Scene):
def construct(self):
title = TexText("1{,}000{,}000 flips")
title.to_edge(UP)
self.add(title)
small_wait_time = 1.0 / 15
n_flips_label = TexText("\\# Flips: ")
n_heads_label = TexText("\\# Heads: ")
n_flips_count = Integer(0)
n_heads_count = Integer(0)
n_heads_label.to_edge(RIGHT, buff=2 * LARGE_BUFF)
n_flips_label.next_to(n_heads_label, DOWN, aligned_edge=LEFT)
n_flips_count.next_to(n_flips_label[-1], RIGHT)
n_heads_count.next_to(n_heads_label[-1], RIGHT)
VGroup(n_flips_count, n_heads_count).shift(0.5 * SMALL_BUFF * UP)
self.add(n_flips_label, n_heads_label, n_flips_count, n_heads_count)
coins = VGroup(*[
FlatHeads() if random.random() < 0.5 else FlatTails()
for x in range(100)
])
self.organize_group(coins)
proportions = np.random.normal(0.5, 0.5 * 0.1, 100)
hundred_boxes = VGroup(*[
Square(
stroke_width=1,
stroke_color=WHITE,
fill_opacity=1,
fill_color=interpolate_color(COLOR_HEADS, COLOR_TAILS, prop)
)
for prop in proportions
])
self.organize_group(hundred_boxes)
ten_k_proportions = np.random.normal(0.5, 0.5 * 0.01, 100)
ten_k_boxes = VGroup(*[
Square(
stroke_width=1,
stroke_color=WHITE,
fill_opacity=1,
fill_color=interpolate_color(COLOR_HEADS, COLOR_TAILS, prop)
)
for prop in ten_k_proportions
])
self.organize_group(ten_k_boxes)
for coin in coins:
self.add(coin)
self.increment(n_flips_count)
if isinstance(coin, FlatHeads):
self.increment(n_heads_count)
self.wait(small_wait_time)
self.play(
FadeIn(hundred_boxes[0]),
coins.set_stroke, {"width": 0},
coins.replace, hundred_boxes[0]
)
hundred_boxes[0].add(coins)
for box, prop in list(zip(hundred_boxes, proportions))[1:]:
self.add(box)
self.increment(n_flips_count, 100)
self.increment(n_heads_count, int(np.round(prop * 100)))
self.wait(small_wait_time)
self.play(
FadeIn(ten_k_boxes[0]),
hundred_boxes.set_stroke, {"width": 0},
hundred_boxes.replace, ten_k_boxes[0]
)
ten_k_boxes[0].add(hundred_boxes)
for box, prop in list(zip(ten_k_boxes, ten_k_proportions))[1:]:
self.add(box)
self.increment(n_flips_count, 10000)
self.increment(n_heads_count, int(np.round(prop * 10000)))
self.wait(small_wait_time)
self.wait()
def organize_group(self, group):
group.arrange_in_grid(10)
group.set_height(5)
group.shift(DOWN + 2 * LEFT)
def increment(self, integer_mob, value=1):
new_int = Integer(integer_mob.number + value)
new_int.move_to(integer_mob, DL)
integer_mob.number += value
integer_mob.submobjects = new_int.submobjects
class PropHeadsWithinThousandth(Scene):
def construct(self):
prob = Tex(
"P(499{,}000 \\le", "\\# \\text{H}", "\\le 501{,}000)",
"\\approx", "0.9545",
)
prob[1].set_color(RED)
prob[-1].set_color(YELLOW)
self.add(prob)
class PropHeadsWithinHundredth(Scene):
def construct(self):
prob = Tex(
"P(490{,}000 \\le", "\\# \\text{H}", "\\le 510{,}000)",
"\\approx", "0.99999999\\dots",
)
prob[1].set_color(RED)
prob[-1].set_color(YELLOW)
self.add(prob)
| true | true |
1c2ec67d2fe3ccd4f635ade02be7acc0755fd5d1 | 1,381 | py | Python | node-client/CasperLabs.py | wimel/CasperLabs | 40be04ada8c718eebf765519f25b86d381276bd4 | [
"Apache-2.0"
] | null | null | null | node-client/CasperLabs.py | wimel/CasperLabs | 40be04ada8c718eebf765519f25b86d381276bd4 | [
"Apache-2.0"
] | null | null | null | node-client/CasperLabs.py | wimel/CasperLabs | 40be04ada8c718eebf765519f25b86d381276bd4 | [
"Apache-2.0"
] | null | null | null | '''CasperLabs node client
Usage:
python CasperLabs.py contract1.rho
python CasperLabs.py -c 'new x in { x!(1 + 1) }'
We assume the CasperLabs node is running and that it is listening on port
5000. Double-check that you see this message in the logs:
Server started, listening on 50000
The output should be something like:
Storage Contents:
@{15a23988-03df-4835-9c55-fb9fbf843a47}!(2) |
for( x0, x1 <= @{\"stdoutAck\"} ) { Nil } |
for( x0 <= @{\"stdout\"} ) { Nil } |
for( x0, x1 <= @{\"stderrAck\"} ) { Nil } |
for( x0 <= @{\"stderr\"} ) { Nil }"
'''
from __future__ import print_function
# cribbed from https://grpc.io/docs/tutorials/basic/python.html
import repl_pb2
import repl_pb2_grpc
def main(argv, stdout, insecure_channel,
host='127.0.0.1',
port=50000):
channel = insecure_channel('%s:%s' % (host, port))
replCh = repl_pb2_grpc.ReplStub(channel)
if '-c' in argv:
line = argv[-1]
req = repl_pb2.CmdRequest(line=line)
output = replCh.Run(req).output
else:
fileName = argv[1]
req = repl_pb2.EvalRequest(fileName=fileName)
output = replCh.Eval(req).output
print(output, file=stdout)
if __name__ == '__main__':
def _script():
from sys import argv, stdout
from grpc import insecure_channel
main(argv, stdout, insecure_channel)
_script()
| 25.574074 | 73 | 0.642288 |
from __future__ import print_function
import repl_pb2
import repl_pb2_grpc
def main(argv, stdout, insecure_channel,
host='127.0.0.1',
port=50000):
channel = insecure_channel('%s:%s' % (host, port))
replCh = repl_pb2_grpc.ReplStub(channel)
if '-c' in argv:
line = argv[-1]
req = repl_pb2.CmdRequest(line=line)
output = replCh.Run(req).output
else:
fileName = argv[1]
req = repl_pb2.EvalRequest(fileName=fileName)
output = replCh.Eval(req).output
print(output, file=stdout)
if __name__ == '__main__':
def _script():
from sys import argv, stdout
from grpc import insecure_channel
main(argv, stdout, insecure_channel)
_script()
| true | true |
1c2ec69d36b4278d011acfe99a84032bb18b7934 | 5,259 | py | Python | maskrcnn_benchmark/data/datasets/coco.py | Iamal1/maskrcnn-benchmark | d53c1986e72c6a647179f5bf0e060db1160a1a42 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/data/datasets/coco.py | Iamal1/maskrcnn-benchmark | d53c1986e72c6a647179f5bf0e060db1160a1a42 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/data/datasets/coco.py | Iamal1/maskrcnn-benchmark | d53c1986e72c6a647179f5bf0e060db1160a1a42 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from torch.distributions.beta import Beta
from PIL import Image
import logging
logger = logging.getLogger("maskrcnn_benchmark.coco")
import numpy
min_keypoints_per_image = 10
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
ids.append(img_id)
self.ids = ids
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self.transforms = transforms
def __getitem__(self, idx):
# '''
# img is tensor now
# '''
# img_a, target_a, idx_a = self.get_one_item(idx)
# img_b, target_b, idx_b = self.get_one_item((idx+1) % len(self.ids))
# #merge them
# #merge img
# m = Beta(torch.tensor([1.5]), torch.tensor([1.5]))
# cof_a = m.sample()
# #cof_a = 0.5
# c,ha,wa = img_a.shape
# c,hb,wb = img_b.shape
# h,w = (max(ha,hb),max(wa,wb))
# img = img_a.new_zeros((c,h,w))
# img[:,:ha,:wa] = cof_a * img_a
# img[:,:hb,:wb] = (1-cof_a) * img_b
# #merge labels and masks
# boxes = torch.cat([target_a.bbox,target_b.bbox],dim=0)
# target = BoxList(boxes, (w,h), mode="xyxy")
# classes = torch.cat([target_a.get_field('labels'),target_b.get_field('labels')],dim=0)
# target.add_field("labels", classes)
# masks = target_a.get_field("masks").instances.polygons + target_b.get_field("masks").instances.polygons
# masks = SegmentationMask(masks, (w,h), mode='poly')
# target.add_field("masks", masks)
# # #add marks
# # marks = [1]*target_a.bbox.size(0) + [0] * target_b.bbox.size(0)
# # target.add_field("marks", torch.tensor(marks))
# cofs = [cof_a]*target_a.bbox.size(0) + [1-cof_a] * target_b.bbox.size(0)
# target.add_field('cofs',torch.tensor(cofs))
# return img, target, idx
# def get_one_item(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| 36.520833 | 113 | 0.630728 |
import torch
import torchvision
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from torch.distributions.beta import Beta
from PIL import Image
import logging
logger = logging.getLogger("maskrcnn_benchmark.coco")
import numpy
min_keypoints_per_image = 10
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno):
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
ids.append(img_id)
self.ids = ids
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self.transforms = transforms
def __getitem__(self, idx):
# '''
# img is tensor now
# '''
# img_a, target_a, idx_a = self.get_one_item(idx)
# img_b, target_b, idx_b = self.get_one_item((idx+1) % len(self.ids))
# #merge them
# #merge img
# m = Beta(torch.tensor([1.5]), torch.tensor([1.5]))
# cof_a = m.sample()
# #cof_a = 0.5
# c,ha,wa = img_a.shape
# c,hb,wb = img_b.shape
# h,w = (max(ha,hb),max(wa,wb))
# img = img_a.new_zeros((c,h,w))
# img[:,:ha,:wa] = cof_a * img_a
# img[:,:hb,:wb] = (1-cof_a) * img_b
# #merge labels and masks
# boxes = torch.cat([target_a.bbox,target_b.bbox],dim=0)
# target = BoxList(boxes, (w,h), mode="xyxy")
# classes = torch.cat([target_a.get_field('labels'),target_b.get_field('labels')],dim=0)
# target.add_field("labels", classes)
# masks = target_a.get_field("masks").instances.polygons + target_b.get_field("masks").instances.polygons
# masks = SegmentationMask(masks, (w,h), mode='poly')
# target.add_field("masks", masks)
# # #add marks
# # marks = [1]*target_a.bbox.size(0) + [0] * target_b.bbox.size(0)
# # target.add_field("marks", torch.tensor(marks))
# cofs = [cof_a]*target_a.bbox.size(0) + [1-cof_a] * target_b.bbox.size(0)
# target.add_field('cofs',torch.tensor(cofs))
# return img, target, idx
# def get_one_item(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| true | true |
1c2ec7b68935225eff86132b336f0e0e3d582fbb | 518 | py | Python | tests/extension/thread_/stream_ringbuffer_stall/test_thread_stream_ringbuffer_stall.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 232 | 2015-09-01T16:07:48.000Z | 2022-03-28T14:53:28.000Z | tests/extension/thread_/stream_ringbuffer_stall/test_thread_stream_ringbuffer_stall.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 34 | 2015-08-21T09:13:03.000Z | 2022-03-21T23:52:44.000Z | tests/extension/thread_/stream_ringbuffer_stall/test_thread_stream_ringbuffer_stall.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 46 | 2015-09-24T14:39:57.000Z | 2022-02-23T21:59:56.000Z | from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import thread_stream_ringbuffer_stall
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = thread_stream_ringbuffer_stall.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
| 27.263158 | 114 | 0.69112 | from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import thread_stream_ringbuffer_stall
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = thread_stream_ringbuffer_stall.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
| true | true |
1c2eca595410e26023ed5a93ba28e9991119cccd | 12,483 | py | Python | 2021/Problem 24.py | christopherliu/advent-of-code | d18b54c538e7af608ba2efd92bf469b28ad5fe98 | [
"MIT"
] | null | null | null | 2021/Problem 24.py | christopherliu/advent-of-code | d18b54c538e7af608ba2efd92bf469b28ad5fe98 | [
"MIT"
] | null | null | null | 2021/Problem 24.py | christopherliu/advent-of-code | d18b54c538e7af608ba2efd92bf469b28ad5fe98 | [
"MIT"
] | null | null | null | import itertools
from dataclasses import dataclass
@dataclass
class Instruction:
name: "str" = None
argA: "str" = None
argB: "str" = None
class MONAD():
def __init__(self, instructions):
self.instructions = instructions
def reset(self):
self.variables = {
"w": 0,
"x": 0,
"y": 0,
"z": 0,
}
def get(self, value):
if value in self.variables:
return self.variables[value]
else:
return int(value)
def run(self, input):
cursor = 0
for instruction in self.instructions:
if instruction.name == "inp":
self.variables[instruction.argA] = input[cursor]
cursor += 1
elif instruction.name == "add":
self.variables[instruction.argA] += self.get(instruction.argB)
elif instruction.name == "mul":
self.variables[instruction.argA] *= self.get(instruction.argB)
elif instruction.name == "div":
self.variables[instruction.argA] //= self.get(instruction.argB)
elif instruction.name == "mod":
self.variables[instruction.argA] %= self.get(instruction.argB)
elif instruction.name == "eql":
self.variables[instruction.argA] = 1 if self.variables[instruction.argA] == self.get(instruction.argB) else 0
def get_abstraction(self, value):
if value in self.abstract_variables:
return self.abstract_variables[value]
else:
return int(value)
def compile_to_algebra(self):
# Baby's first optimizing compiler
self.abstract_variables = {
"w": 0,
"x": 0,
"y": 0,
"z": 0,
}
cursor = 0
for instruction in self.instructions:
if instruction.name == "inp":
self.abstract_variables[instruction.argA] = "X%s" % cursor
cursor += 1
elif instruction.name == "add":
if self.get_abstraction(instruction.argB) == 0:
continue
elif self.abstract_variables[instruction.argA] == 0:
self.abstract_variables[instruction.argA] = self.get_abstraction(instruction.argB)
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] += self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s+%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mul":
if self.abstract_variables[instruction.argA] == 0 or self.get_abstraction(instruction.argB) == 0:
self.abstract_variables[instruction.argA] = 0
continue
elif self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] *= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s*%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "div":
if self.abstract_variables[instruction.argA] == 0 or isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] //= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s/%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mod":
if self.abstract_variables[instruction.argA] == 0:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] %= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s%%%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "eql":
if self.abstract_variables[instruction.argA] == self.get_abstraction(instruction.argB):
self.abstract_variables[instruction.argA] = 1
elif isinstance(self.abstract_variables[instruction.argA], str) and self.abstract_variables[instruction.argA].startswith("X") and isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) > 10:
self.abstract_variables[instruction.argA] = 0
elif isinstance(self.get_abstraction(instruction.argB), str) and self.get_abstraction(instruction.argB).startswith("X") and isinstance(self.abstract_variables[instruction.argA], int) and self.abstract_variables[instruction.argA] > 10:
self.abstract_variables[instruction.argA] = 0
else:
self.abstract_variables[instruction.argA] = "(%s==%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
return self.abstract_variables
def compile_to_ast(self):
self.abstract_variables = {
"w": 0,
"x": 0,
"y": 0,
"z": 0,
}
cursor = 0
for instruction in self.instructions:
if instruction.name == "inp":
self.abstract_variables[instruction.argA] = InputVariable(cursor)
cursor += 1
elif instruction.name == "add":
if self.get_abstraction(instruction.argB) == 0:
continue
elif self.abstract_variables[instruction.argA] == 0:
self.abstract_variables[instruction.argA] = self.get_abstraction(instruction.argB)
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] += self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s+%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mul":
if self.abstract_variables[instruction.argA] == 0 or self.get_abstraction(instruction.argB) == 0:
self.abstract_variables[instruction.argA] = 0
continue
elif self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] *= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s*%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "div":
if self.abstract_variables[instruction.argA] == 0 or isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] //= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s/%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mod":
if self.abstract_variables[instruction.argA] == 0:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] %= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s%%%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "eql":
if self.abstract_variables[instruction.argA] == self.get_abstraction(instruction.argB):
self.abstract_variables[instruction.argA] = 1
elif isinstance(self.abstract_variables[instruction.argA], str) and self.abstract_variables[instruction.argA].startswith("X") and isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) > 10:
self.abstract_variables[instruction.argA] = 0
elif isinstance(self.get_abstraction(instruction.argB), str) and self.get_abstraction(instruction.argB).startswith("X") and isinstance(self.abstract_variables[instruction.argA], int) and self.abstract_variables[instruction.argA] > 10:
self.abstract_variables[instruction.argA] = 0
else:
self.abstract_variables[instruction.argA] = "(%s==%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
return self.abstract_variables
def is_valid(self, model_number):
self.reset()
self.run(model_number)
return self.variables["z"] == 0
@classmethod
def from_file(cls, filename):
return MONAD([Instruction(*line.strip().split(" ")) for line in open(filename, "r").readlines()])
my_monad = MONAD.from_file("Day 24 input.txt")
# Method 1: Too slow
# valid_model_numbers = [range(9,0,-1) for _ in range(0, 14)]
# cursor = 0
# for model_number in itertools.product(*valid_model_numbers):
# cursor += 1
# if cursor % 1000000 == 0:
# print("Progress: Testing %s" % "".join([str(d) for d in model_number]))
# if my_monad.is_valid(model_number):
# print("Found a valid model number: %s" % model_number)
# break
# Method 2: Try it myself (generates too long of a string, but gives us some idea of what it does)
# print(my_monad.compile_to_algebra()["z"])
# Biggest: 99919765949498
valid_model_numbers = [range(1,9) for _ in range(0, 14)]
# cursor = 0
# for model_number in itertools.product(*valid_model_numbers):
# # Apply constraints retrieved from analysis
# if model_number[1] != 4: continue
# if model_number[2] != 8 + model_number[3]: continue
# if model_number[4] != 2 + model_number[5]: continue
# if model_number[8] < 6: continue
# if model_number[9] + 5 != model_number[10]: continue
# cursor += 1
# if cursor % 1000000 == 0:
# print("Progress: Testing %s" % "".join([str(d) for d in model_number]))
# if my_monad.is_valid(model_number):
# print("Found a valid model number: %s" % "".join([str(d) for d in model_number]))
# break
first_numbers = [2,4,9,1]
valid_ex_numbers = [range(1,9) for _ in range(0, 10)]
cursor = 0
for model_number_ex in itertools.product(*valid_ex_numbers):
# Apply constraints retrieved from analysis
model_number = first_numbers + list(model_number_ex)
if model_number[4] != 2 + model_number[5]: continue
if model_number[9] + 5 != model_number[10]: continue
cursor += 1
if cursor % 1000000 == 0:
print("Progress: Testing %s" % "".join([str(d) for d in model_number]))
if my_monad.is_valid(model_number):
print("Found a valid model number: %s" % "".join([str(d) for d in model_number]))
break | 54.991189 | 250 | 0.621325 | import itertools
from dataclasses import dataclass
@dataclass
class Instruction:
name: "str" = None
argA: "str" = None
argB: "str" = None
class MONAD():
def __init__(self, instructions):
self.instructions = instructions
def reset(self):
self.variables = {
"w": 0,
"x": 0,
"y": 0,
"z": 0,
}
def get(self, value):
if value in self.variables:
return self.variables[value]
else:
return int(value)
def run(self, input):
cursor = 0
for instruction in self.instructions:
if instruction.name == "inp":
self.variables[instruction.argA] = input[cursor]
cursor += 1
elif instruction.name == "add":
self.variables[instruction.argA] += self.get(instruction.argB)
elif instruction.name == "mul":
self.variables[instruction.argA] *= self.get(instruction.argB)
elif instruction.name == "div":
self.variables[instruction.argA] //= self.get(instruction.argB)
elif instruction.name == "mod":
self.variables[instruction.argA] %= self.get(instruction.argB)
elif instruction.name == "eql":
self.variables[instruction.argA] = 1 if self.variables[instruction.argA] == self.get(instruction.argB) else 0
def get_abstraction(self, value):
if value in self.abstract_variables:
return self.abstract_variables[value]
else:
return int(value)
def compile_to_algebra(self):
self.abstract_variables = {
"w": 0,
"x": 0,
"y": 0,
"z": 0,
}
cursor = 0
for instruction in self.instructions:
if instruction.name == "inp":
self.abstract_variables[instruction.argA] = "X%s" % cursor
cursor += 1
elif instruction.name == "add":
if self.get_abstraction(instruction.argB) == 0:
continue
elif self.abstract_variables[instruction.argA] == 0:
self.abstract_variables[instruction.argA] = self.get_abstraction(instruction.argB)
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] += self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s+%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mul":
if self.abstract_variables[instruction.argA] == 0 or self.get_abstraction(instruction.argB) == 0:
self.abstract_variables[instruction.argA] = 0
continue
elif self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] *= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s*%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "div":
if self.abstract_variables[instruction.argA] == 0 or isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] //= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s/%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mod":
if self.abstract_variables[instruction.argA] == 0:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] %= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s%%%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "eql":
if self.abstract_variables[instruction.argA] == self.get_abstraction(instruction.argB):
self.abstract_variables[instruction.argA] = 1
elif isinstance(self.abstract_variables[instruction.argA], str) and self.abstract_variables[instruction.argA].startswith("X") and isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) > 10:
self.abstract_variables[instruction.argA] = 0
elif isinstance(self.get_abstraction(instruction.argB), str) and self.get_abstraction(instruction.argB).startswith("X") and isinstance(self.abstract_variables[instruction.argA], int) and self.abstract_variables[instruction.argA] > 10:
self.abstract_variables[instruction.argA] = 0
else:
self.abstract_variables[instruction.argA] = "(%s==%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
return self.abstract_variables
def compile_to_ast(self):
self.abstract_variables = {
"w": 0,
"x": 0,
"y": 0,
"z": 0,
}
cursor = 0
for instruction in self.instructions:
if instruction.name == "inp":
self.abstract_variables[instruction.argA] = InputVariable(cursor)
cursor += 1
elif instruction.name == "add":
if self.get_abstraction(instruction.argB) == 0:
continue
elif self.abstract_variables[instruction.argA] == 0:
self.abstract_variables[instruction.argA] = self.get_abstraction(instruction.argB)
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] += self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s+%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mul":
if self.abstract_variables[instruction.argA] == 0 or self.get_abstraction(instruction.argB) == 0:
self.abstract_variables[instruction.argA] = 0
continue
elif self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] *= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s*%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "div":
if self.abstract_variables[instruction.argA] == 0 or isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) == 1:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] //= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s/%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "mod":
if self.abstract_variables[instruction.argA] == 0:
continue
elif isinstance(self.abstract_variables[instruction.argA], int) and isinstance(self.get_abstraction(instruction.argB), int):
self.abstract_variables[instruction.argA] %= self.get_abstraction(instruction.argB)
else:
self.abstract_variables[instruction.argA] = "(%s%%%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
elif instruction.name == "eql":
if self.abstract_variables[instruction.argA] == self.get_abstraction(instruction.argB):
self.abstract_variables[instruction.argA] = 1
elif isinstance(self.abstract_variables[instruction.argA], str) and self.abstract_variables[instruction.argA].startswith("X") and isinstance(self.get_abstraction(instruction.argB), int) and self.get_abstraction(instruction.argB) > 10:
self.abstract_variables[instruction.argA] = 0
elif isinstance(self.get_abstraction(instruction.argB), str) and self.get_abstraction(instruction.argB).startswith("X") and isinstance(self.abstract_variables[instruction.argA], int) and self.abstract_variables[instruction.argA] > 10:
self.abstract_variables[instruction.argA] = 0
else:
self.abstract_variables[instruction.argA] = "(%s==%s)" % (self.abstract_variables[instruction.argA], self.get_abstraction(instruction.argB))
return self.abstract_variables
def is_valid(self, model_number):
self.reset()
self.run(model_number)
return self.variables["z"] == 0
@classmethod
def from_file(cls, filename):
return MONAD([Instruction(*line.strip().split(" ")) for line in open(filename, "r").readlines()])
my_monad = MONAD.from_file("Day 24 input.txt")
# Method 1: Too slow
# valid_model_numbers = [range(9,0,-1) for _ in range(0, 14)]
# cursor = 0
# for model_number in itertools.product(*valid_model_numbers):
# cursor += 1
# if cursor % 1000000 == 0:
# print("Progress: Testing %s" % "".join([str(d) for d in model_number]))
# if my_monad.is_valid(model_number):
# print("Found a valid model number: %s" % model_number)
# break
# Method 2: Try it myself (generates too long of a string, but gives us some idea of what it does)
# print(my_monad.compile_to_algebra()["z"])
# Biggest: 99919765949498
valid_model_numbers = [range(1,9) for _ in range(0, 14)]
# cursor = 0
# for model_number in itertools.product(*valid_model_numbers):
# # Apply constraints retrieved from analysis
# if model_number[1] != 4: continue
# if model_number[2] != 8 + model_number[3]: continue
# if model_number[4] != 2 + model_number[5]: continue
# if model_number[8] < 6: continue
# if model_number[9] + 5 != model_number[10]: continue
# cursor += 1
# if cursor % 1000000 == 0:
# print("Progress: Testing %s" % "".join([str(d) for d in model_number]))
# if my_monad.is_valid(model_number):
# print("Found a valid model number: %s" % "".join([str(d) for d in model_number]))
# break
first_numbers = [2,4,9,1]
valid_ex_numbers = [range(1,9) for _ in range(0, 10)]
cursor = 0
for model_number_ex in itertools.product(*valid_ex_numbers):
# Apply constraints retrieved from analysis
model_number = first_numbers + list(model_number_ex)
if model_number[4] != 2 + model_number[5]: continue
if model_number[9] + 5 != model_number[10]: continue
cursor += 1
if cursor % 1000000 == 0:
print("Progress: Testing %s" % "".join([str(d) for d in model_number]))
if my_monad.is_valid(model_number):
print("Found a valid model number: %s" % "".join([str(d) for d in model_number]))
break | true | true |
1c2eca9be52290e0ec2122ab12558d3900b0af13 | 145 | py | Python | functions/cmd_print.py | morozoffnor/govnoed_grisha_rewritten | 6a34336cede03a081954479f998d5a8162e1a31d | [
"Apache-2.0"
] | null | null | null | functions/cmd_print.py | morozoffnor/govnoed_grisha_rewritten | 6a34336cede03a081954479f998d5a8162e1a31d | [
"Apache-2.0"
] | null | null | null | functions/cmd_print.py | morozoffnor/govnoed_grisha_rewritten | 6a34336cede03a081954479f998d5a8162e1a31d | [
"Apache-2.0"
] | null | null | null | async def cmd_print(type, msg):
if type == 'debug':
print('[DEBUG] ' + msg)
elif type == 'error':
print('[ERROR] ' + msg) | 29 | 31 | 0.510345 | async def cmd_print(type, msg):
if type == 'debug':
print('[DEBUG] ' + msg)
elif type == 'error':
print('[ERROR] ' + msg) | true | true |
1c2ecbe1f6b05a7858b30f9647bf59ee17958586 | 6,952 | py | Python | robonomics_liability/src/robonomics_liability/LiabilityExecutionsPersistence.py | Vourhey/robonomics_comm | 1b7c6dc85985909cb925d82b1081ec556423029e | [
"BSD-3-Clause"
] | 16 | 2017-11-15T15:20:34.000Z | 2021-08-05T03:08:13.000Z | robonomics_liability/src/robonomics_liability/LiabilityExecutionsPersistence.py | aang1985/robonomics_comm | 4f7a339e01cbd00fc0f51405c77d89d6ae5e0d7d | [
"BSD-3-Clause"
] | 80 | 2018-02-08T22:44:41.000Z | 2021-07-15T10:12:09.000Z | robonomics_liability/src/robonomics_liability/LiabilityExecutionsPersistence.py | aang1985/robonomics_comm | 4f7a339e01cbd00fc0f51405c77d89d6ae5e0d7d | [
"BSD-3-Clause"
] | 13 | 2018-02-08T14:22:26.000Z | 2021-11-20T00:29:14.000Z | import rospy
import shelve
import time
from threading import Lock, Timer
from robonomics_liability.msg import Liability, LiabilityExecutionTimestamp
from robonomics_liability.srv import PersistenceContainsLiability, PersistenceContainsLiabilityResponse, PersistenceLiabilityTimestamp, PersistenceLiabilityTimestampResponse
from persistent_queue import PersistentQueue
class LiabilityExecutionsPersistence:
def __init__(self):
"""
Robonomics liability persistence node initialisation.
"""
rospy.init_node('robonomics_liability_persistence')
self.persistence_update_interval = rospy.get_param('~persistence_update_interval', 0.1)
self.__liability_executions_lock = Lock()
self.__liability_timestamps_lock = Lock()
self.__liability_executions = shelve.open("robonomics_liability_executions.persistence")
self.__liability_executions_timestamps = shelve.open("robonomics_liability_executions_timestamps.persistence")
self.__liability_executions_timestamps_queue = PersistentQueue('robonomics_liability_executions_timestamps.queue')
rospy.Subscriber('persistence/add', Liability, self.__add_liability)
rospy.Subscriber('persistence/del', Liability, self.__del_liability)
rospy.Subscriber("persistence/update_timestamp", LiabilityExecutionTimestamp,
self.__update_liability_execution_timestamp_handler)
rospy.Service("persistence/exists", PersistenceContainsLiability, self.__liability_exists)
rospy.Service("persistence/get_liability_timestamp", PersistenceLiabilityTimestamp, self.__get_liability_timestamp)
self.__incoming_liability_topic = rospy.Publisher('incoming', Liability, queue_size=10)
self.__restore_executions()
def __update_liability_execution_timestamp_handler(self, msg):
self.__liability_executions_timestamps_queue.push(msg)
def __update_liability_execution_timestamp(self, msg):
rospy.logdebug("update liability %s execution timestamp", msg.address.address)
if msg.address.address not in self.__liability_executions_timestamps:
rospy.logwarn("liability %s already unregistered from timestamps persistence",
msg.address.address)
return
try:
self.__liability_timestamps_lock.acquire()
self.__liability_executions_timestamps[msg.address.address] = msg.timestamp
self.__liability_executions_timestamps.sync()
finally:
self.__liability_timestamps_lock.release()
rospy.logdebug("Persistence liability %s timestamp %s",
msg.address.address,
self.__liability_executions_timestamps[msg.address.address])
def __liability_exists(self, msg):
return PersistenceContainsLiabilityResponse(msg.address.address in self.__liability_executions)
def __register_liability_in_timestamp_persistence(self, msg):
try:
self.__liability_timestamps_lock.acquire()
if msg.address.address not in self.__liability_executions_timestamps:
self.__liability_executions_timestamps[msg.address.address] = rospy.Time.from_sec(0)
rospy.loginfo("Timestamps persistence contains %s value for liability %s",
self.__liability_executions_timestamps[msg.address.address],
msg.address.address)
self.__liability_executions_timestamps.sync()
finally:
self.__liability_timestamps_lock.release()
def __add_liability(self, msg):
try:
self.__liability_executions_lock.acquire()
self.__liability_executions[msg.address.address] = msg
self.__register_liability_in_timestamp_persistence(msg)
self.__liability_executions.sync()
rospy.loginfo("Liability %s added to liabilities executions persistence store", msg.address.address)
finally:
self.__liability_executions_lock.release()
def __del_liability(self, msg):
try:
self.__liability_executions_lock.acquire()
del self.__liability_executions[msg.address.address]
self.__liability_executions.sync()
rospy.loginfo("Liability %s deleted from liabilities executions persistence store", msg.address.address)
except KeyError:
rospy.logwarn("Liability %s not found in liabilities executions persistence store", msg.address.address)
finally:
self.__liability_executions_lock.release()
try:
self.__liability_timestamps_lock.acquire()
del self.__liability_executions_timestamps[msg.address.address]
self.__liability_executions_timestamps.sync()
rospy.loginfo("Liability %s deleted from liabilities timestamps persistence store", msg.address.address)
except KeyError:
rospy.logwarn("Liability %s not found in liabilities timestamps persistence store", msg.address.address)
finally:
self.__liability_timestamps_lock.release()
def __restore_executions(self):
try:
self.__liability_executions_lock.acquire()
executions = list(self.__liability_executions.values())
finally:
self.__liability_executions_lock.release()
time.sleep(5)
for liability in executions:
self.__incoming_liability_topic.publish(liability)
rospy.logwarn("Liability %s received from liabilities executions persistence store",
liability.address.address)
def __get_liability_timestamp(self, msg):
timestamp = rospy.Time.from_sec(0)
liability_address = msg.address.address
queue_entry = self.__liability_executions_timestamps_queue.peek()
while queue_entry is not None:
time.sleep(0.1)
queue_entry = self.__liability_executions_timestamps_queue.peek()
try:
self.__liability_timestamps_lock.acquire()
timestamp = self.__liability_executions_timestamps[liability_address]
except KeyError as e:
rospy.logwarn("Unable to get known execution timestamp for liability %s", liability_address)
finally:
self.__liability_timestamps_lock.release()
return PersistenceLiabilityTimestampResponse(timestamp)
def spin(self):
def update_liability_timestamp_queue_handler():
entry = self.__liability_executions_timestamps_queue.peek()
if entry is not None:
self.__update_liability_execution_timestamp(entry)
self.__liability_executions_timestamps_queue.pop()
Timer(self.persistence_update_interval, update_liability_timestamp_queue_handler).start()
update_liability_timestamp_queue_handler()
rospy.spin()
| 49.657143 | 173 | 0.710875 | import rospy
import shelve
import time
from threading import Lock, Timer
from robonomics_liability.msg import Liability, LiabilityExecutionTimestamp
from robonomics_liability.srv import PersistenceContainsLiability, PersistenceContainsLiabilityResponse, PersistenceLiabilityTimestamp, PersistenceLiabilityTimestampResponse
from persistent_queue import PersistentQueue
class LiabilityExecutionsPersistence:
def __init__(self):
rospy.init_node('robonomics_liability_persistence')
self.persistence_update_interval = rospy.get_param('~persistence_update_interval', 0.1)
self.__liability_executions_lock = Lock()
self.__liability_timestamps_lock = Lock()
self.__liability_executions = shelve.open("robonomics_liability_executions.persistence")
self.__liability_executions_timestamps = shelve.open("robonomics_liability_executions_timestamps.persistence")
self.__liability_executions_timestamps_queue = PersistentQueue('robonomics_liability_executions_timestamps.queue')
rospy.Subscriber('persistence/add', Liability, self.__add_liability)
rospy.Subscriber('persistence/del', Liability, self.__del_liability)
rospy.Subscriber("persistence/update_timestamp", LiabilityExecutionTimestamp,
self.__update_liability_execution_timestamp_handler)
rospy.Service("persistence/exists", PersistenceContainsLiability, self.__liability_exists)
rospy.Service("persistence/get_liability_timestamp", PersistenceLiabilityTimestamp, self.__get_liability_timestamp)
self.__incoming_liability_topic = rospy.Publisher('incoming', Liability, queue_size=10)
self.__restore_executions()
def __update_liability_execution_timestamp_handler(self, msg):
self.__liability_executions_timestamps_queue.push(msg)
def __update_liability_execution_timestamp(self, msg):
rospy.logdebug("update liability %s execution timestamp", msg.address.address)
if msg.address.address not in self.__liability_executions_timestamps:
rospy.logwarn("liability %s already unregistered from timestamps persistence",
msg.address.address)
return
try:
self.__liability_timestamps_lock.acquire()
self.__liability_executions_timestamps[msg.address.address] = msg.timestamp
self.__liability_executions_timestamps.sync()
finally:
self.__liability_timestamps_lock.release()
rospy.logdebug("Persistence liability %s timestamp %s",
msg.address.address,
self.__liability_executions_timestamps[msg.address.address])
def __liability_exists(self, msg):
return PersistenceContainsLiabilityResponse(msg.address.address in self.__liability_executions)
def __register_liability_in_timestamp_persistence(self, msg):
try:
self.__liability_timestamps_lock.acquire()
if msg.address.address not in self.__liability_executions_timestamps:
self.__liability_executions_timestamps[msg.address.address] = rospy.Time.from_sec(0)
rospy.loginfo("Timestamps persistence contains %s value for liability %s",
self.__liability_executions_timestamps[msg.address.address],
msg.address.address)
self.__liability_executions_timestamps.sync()
finally:
self.__liability_timestamps_lock.release()
def __add_liability(self, msg):
try:
self.__liability_executions_lock.acquire()
self.__liability_executions[msg.address.address] = msg
self.__register_liability_in_timestamp_persistence(msg)
self.__liability_executions.sync()
rospy.loginfo("Liability %s added to liabilities executions persistence store", msg.address.address)
finally:
self.__liability_executions_lock.release()
def __del_liability(self, msg):
try:
self.__liability_executions_lock.acquire()
del self.__liability_executions[msg.address.address]
self.__liability_executions.sync()
rospy.loginfo("Liability %s deleted from liabilities executions persistence store", msg.address.address)
except KeyError:
rospy.logwarn("Liability %s not found in liabilities executions persistence store", msg.address.address)
finally:
self.__liability_executions_lock.release()
try:
self.__liability_timestamps_lock.acquire()
del self.__liability_executions_timestamps[msg.address.address]
self.__liability_executions_timestamps.sync()
rospy.loginfo("Liability %s deleted from liabilities timestamps persistence store", msg.address.address)
except KeyError:
rospy.logwarn("Liability %s not found in liabilities timestamps persistence store", msg.address.address)
finally:
self.__liability_timestamps_lock.release()
def __restore_executions(self):
try:
self.__liability_executions_lock.acquire()
executions = list(self.__liability_executions.values())
finally:
self.__liability_executions_lock.release()
time.sleep(5)
for liability in executions:
self.__incoming_liability_topic.publish(liability)
rospy.logwarn("Liability %s received from liabilities executions persistence store",
liability.address.address)
def __get_liability_timestamp(self, msg):
timestamp = rospy.Time.from_sec(0)
liability_address = msg.address.address
queue_entry = self.__liability_executions_timestamps_queue.peek()
while queue_entry is not None:
time.sleep(0.1)
queue_entry = self.__liability_executions_timestamps_queue.peek()
try:
self.__liability_timestamps_lock.acquire()
timestamp = self.__liability_executions_timestamps[liability_address]
except KeyError as e:
rospy.logwarn("Unable to get known execution timestamp for liability %s", liability_address)
finally:
self.__liability_timestamps_lock.release()
return PersistenceLiabilityTimestampResponse(timestamp)
def spin(self):
def update_liability_timestamp_queue_handler():
entry = self.__liability_executions_timestamps_queue.peek()
if entry is not None:
self.__update_liability_execution_timestamp(entry)
self.__liability_executions_timestamps_queue.pop()
Timer(self.persistence_update_interval, update_liability_timestamp_queue_handler).start()
update_liability_timestamp_queue_handler()
rospy.spin()
| true | true |
1c2ecd001c90904790561e0bd820cdd3c213c213 | 3,282 | py | Python | frameworks/Python/fastapi/app.py | tommilligan/FrameworkBenchmarks | a5bad622429f14f13d872589d7054aefaa75002d | [
"BSD-3-Clause"
] | 5,300 | 2015-01-02T08:04:20.000Z | 2022-03-31T10:08:33.000Z | frameworks/Python/fastapi/app.py | tommilligan/FrameworkBenchmarks | a5bad622429f14f13d872589d7054aefaa75002d | [
"BSD-3-Clause"
] | 3,075 | 2015-01-01T05:11:45.000Z | 2022-03-31T23:56:33.000Z | frameworks/Python/fastapi/app.py | tommilligan/FrameworkBenchmarks | a5bad622429f14f13d872589d7054aefaa75002d | [
"BSD-3-Clause"
] | 2,151 | 2015-01-02T14:16:09.000Z | 2022-03-30T00:15:26.000Z | import asyncio
import asyncpg
import os
import jinja2
from fastapi import FastAPI
from starlette.responses import HTMLResponse, UJSONResponse, PlainTextResponse
from random import randint
from operator import itemgetter
from urllib.parse import parse_qs
READ_ROW_SQL = 'SELECT "randomnumber", "id" FROM "world" WHERE id = $1'
WRITE_ROW_SQL = 'UPDATE "world" SET "randomnumber"=$1 WHERE id=$2'
ADDITIONAL_ROW = [0, 'Additional fortune added at request time.']
async def setup_database():
global connection_pool
connection_pool = await asyncpg.create_pool(
user=os.getenv('PGUSER', 'benchmarkdbuser'),
password=os.getenv('PGPASS', 'benchmarkdbpass'),
database='hello_world',
host='tfb-database',
port=5432
)
def load_fortunes_template():
path = os.path.join('templates', 'fortune.html')
with open(path, 'r') as template_file:
template_text = template_file.read()
return jinja2.Template(template_text)
def get_num_queries(queries):
try:
query_count = int(queries)
except (ValueError, TypeError):
return 1
if query_count < 1:
return 1
if query_count > 500:
return 500
return query_count
connection_pool = None
sort_fortunes_key = itemgetter(1)
template = load_fortunes_template()
loop = asyncio.get_event_loop()
loop.run_until_complete(setup_database())
app = FastAPI()
@app.get('/json')
async def json_serialization():
return UJSONResponse({'message': 'Hello, world!'})
@app.get('/db')
async def single_database_query():
row_id = randint(1, 10000)
async with connection_pool.acquire() as connection:
number = await connection.fetchval(READ_ROW_SQL, row_id)
return UJSONResponse({'id': row_id, 'randomNumber': number})
@app.get('/queries')
async def multiple_database_queries(queries = None):
num_queries = get_num_queries(queries)
row_ids = [randint(1, 10000) for _ in range(num_queries)]
worlds = []
async with connection_pool.acquire() as connection:
statement = await connection.prepare(READ_ROW_SQL)
for row_id in row_ids:
number = await statement.fetchval(row_id)
worlds.append({'id': row_id, 'randomNumber': number})
return UJSONResponse(worlds)
@app.get('/fortunes')
async def fortunes():
async with connection_pool.acquire() as connection:
fortunes = await connection.fetch('SELECT * FROM Fortune')
fortunes.append(ADDITIONAL_ROW)
fortunes.sort(key=sort_fortunes_key)
content = template.render(fortunes=fortunes)
return HTMLResponse(content)
@app.get('/updates')
async def database_updates(queries = None):
num_queries = get_num_queries(queries)
updates = [(randint(1, 10000), randint(1, 10000)) for _ in range(num_queries)]
worlds = [{'id': row_id, 'randomNumber': number} for row_id, number in updates]
async with connection_pool.acquire() as connection:
statement = await connection.prepare(READ_ROW_SQL)
for row_id, number in updates:
await statement.fetchval(row_id)
await connection.executemany(WRITE_ROW_SQL, updates)
return UJSONResponse(worlds)
@app.get('/plaintext')
async def plaintext():
return PlainTextResponse(b'Hello, world!')
| 27.579832 | 83 | 0.704144 | import asyncio
import asyncpg
import os
import jinja2
from fastapi import FastAPI
from starlette.responses import HTMLResponse, UJSONResponse, PlainTextResponse
from random import randint
from operator import itemgetter
from urllib.parse import parse_qs
READ_ROW_SQL = 'SELECT "randomnumber", "id" FROM "world" WHERE id = $1'
WRITE_ROW_SQL = 'UPDATE "world" SET "randomnumber"=$1 WHERE id=$2'
ADDITIONAL_ROW = [0, 'Additional fortune added at request time.']
async def setup_database():
global connection_pool
connection_pool = await asyncpg.create_pool(
user=os.getenv('PGUSER', 'benchmarkdbuser'),
password=os.getenv('PGPASS', 'benchmarkdbpass'),
database='hello_world',
host='tfb-database',
port=5432
)
def load_fortunes_template():
path = os.path.join('templates', 'fortune.html')
with open(path, 'r') as template_file:
template_text = template_file.read()
return jinja2.Template(template_text)
def get_num_queries(queries):
try:
query_count = int(queries)
except (ValueError, TypeError):
return 1
if query_count < 1:
return 1
if query_count > 500:
return 500
return query_count
connection_pool = None
sort_fortunes_key = itemgetter(1)
template = load_fortunes_template()
loop = asyncio.get_event_loop()
loop.run_until_complete(setup_database())
app = FastAPI()
@app.get('/json')
async def json_serialization():
return UJSONResponse({'message': 'Hello, world!'})
@app.get('/db')
async def single_database_query():
row_id = randint(1, 10000)
async with connection_pool.acquire() as connection:
number = await connection.fetchval(READ_ROW_SQL, row_id)
return UJSONResponse({'id': row_id, 'randomNumber': number})
@app.get('/queries')
async def multiple_database_queries(queries = None):
num_queries = get_num_queries(queries)
row_ids = [randint(1, 10000) for _ in range(num_queries)]
worlds = []
async with connection_pool.acquire() as connection:
statement = await connection.prepare(READ_ROW_SQL)
for row_id in row_ids:
number = await statement.fetchval(row_id)
worlds.append({'id': row_id, 'randomNumber': number})
return UJSONResponse(worlds)
@app.get('/fortunes')
async def fortunes():
async with connection_pool.acquire() as connection:
fortunes = await connection.fetch('SELECT * FROM Fortune')
fortunes.append(ADDITIONAL_ROW)
fortunes.sort(key=sort_fortunes_key)
content = template.render(fortunes=fortunes)
return HTMLResponse(content)
@app.get('/updates')
async def database_updates(queries = None):
num_queries = get_num_queries(queries)
updates = [(randint(1, 10000), randint(1, 10000)) for _ in range(num_queries)]
worlds = [{'id': row_id, 'randomNumber': number} for row_id, number in updates]
async with connection_pool.acquire() as connection:
statement = await connection.prepare(READ_ROW_SQL)
for row_id, number in updates:
await statement.fetchval(row_id)
await connection.executemany(WRITE_ROW_SQL, updates)
return UJSONResponse(worlds)
@app.get('/plaintext')
async def plaintext():
return PlainTextResponse(b'Hello, world!')
| true | true |
1c2ecd7374ac4b43cc0c12a94a556e95164106a8 | 190 | py | Python | tests/test_train.py | hugobb/sgda | 69dcda47bb2c5b76d46ead32eb46ab5fb5e5e6d3 | [
"MIT"
] | 1 | 2022-02-16T04:20:02.000Z | 2022-02-16T04:20:02.000Z | tests/test_train.py | hugobb/sgda | 69dcda47bb2c5b76d46ead32eb46ab5fb5e5e6d3 | [
"MIT"
] | null | null | null | tests/test_train.py | hugobb/sgda | 69dcda47bb2c5b76d46ead32eb46ab5fb5e5e6d3 | [
"MIT"
] | null | null | null | import unittest
from gamesopt.train import train, TrainConfig
class TestOptimizer(unittest.TestCase):
def test_sgda(self):
config = TrainConfig(num_iter=2)
train(config) | 27.142857 | 45 | 0.736842 | import unittest
from gamesopt.train import train, TrainConfig
class TestOptimizer(unittest.TestCase):
def test_sgda(self):
config = TrainConfig(num_iter=2)
train(config) | true | true |
1c2ecdc1af37318f9cd7610ded59ac8671542db2 | 1,589 | py | Python | cinema/mainapp/models.py | Floou/cinema | 83a921ff802abaa632c336db4f9e5f4ca2907199 | [
"Apache-2.0"
] | null | null | null | cinema/mainapp/models.py | Floou/cinema | 83a921ff802abaa632c336db4f9e5f4ca2907199 | [
"Apache-2.0"
] | null | null | null | cinema/mainapp/models.py | Floou/cinema | 83a921ff802abaa632c336db4f9e5f4ca2907199 | [
"Apache-2.0"
] | null | null | null | from django.db import models, transaction, DatabaseError
from authapp.models import UserProfile
class Film(models.Model):
title = models.CharField(max_length=128, unique=True)
description = models.TextField()
# image = models.ImageField(upload_to='')
audience = models.ManyToManyField('authapp.UserProfile', related_name='audience')
is_active = models.BooleanField(default=True, db_index=True)
def __str__(self):
return f'{self.title}'
def restore(self):
self.is_active = True
self.title = self.title[1:]
self.schedule_set.all().update(is_active=True)
self.save()
return self
def delete(self, using=None, keep_parents=False):
with transaction.atomic() as _:
self.is_active = False
self.schedule_set.all().update(is_active=False)
self.title = f'_{self.title}'
self.save()
return 1, {}
class Meta:
verbose_name_plural = 'Название'
verbose_name = 'Названия'
class Schedule(models.Model):
film = models.ForeignKey(Film, on_delete=models.CASCADE)
date_time = models.DateTimeField()
is_active = models.BooleanField(default=True, db_index=True)
def __str__(self):
return f'{self.film}: {self.date_time}'
class Meta:
verbose_name_plural = 'Расписание'
verbose_name = 'Расписания'
class Seat(models.Model):
seat_no = models.IntegerField()
screening = models.ForeignKey(Schedule, on_delete=models.CASCADE)
is_active = models.BooleanField(default=True, db_index=True)
| 30.557692 | 85 | 0.670233 | from django.db import models, transaction, DatabaseError
from authapp.models import UserProfile
class Film(models.Model):
title = models.CharField(max_length=128, unique=True)
description = models.TextField()
audience = models.ManyToManyField('authapp.UserProfile', related_name='audience')
is_active = models.BooleanField(default=True, db_index=True)
def __str__(self):
return f'{self.title}'
def restore(self):
self.is_active = True
self.title = self.title[1:]
self.schedule_set.all().update(is_active=True)
self.save()
return self
def delete(self, using=None, keep_parents=False):
with transaction.atomic() as _:
self.is_active = False
self.schedule_set.all().update(is_active=False)
self.title = f'_{self.title}'
self.save()
return 1, {}
class Meta:
verbose_name_plural = 'Название'
verbose_name = 'Названия'
class Schedule(models.Model):
film = models.ForeignKey(Film, on_delete=models.CASCADE)
date_time = models.DateTimeField()
is_active = models.BooleanField(default=True, db_index=True)
def __str__(self):
return f'{self.film}: {self.date_time}'
class Meta:
verbose_name_plural = 'Расписание'
verbose_name = 'Расписания'
class Seat(models.Model):
seat_no = models.IntegerField()
screening = models.ForeignKey(Schedule, on_delete=models.CASCADE)
is_active = models.BooleanField(default=True, db_index=True)
| true | true |
1c2eceafcca534e953adf57c5c2827527a921f25 | 3,936 | py | Python | data/scripts/make_aga_db.py | flovo/goratings | 50b5443b73daae64306e256205eabee8f4815c65 | [
"MIT"
] | 13 | 2020-07-02T16:43:12.000Z | 2021-12-12T00:12:48.000Z | data/scripts/make_aga_db.py | flovo/goratings | 50b5443b73daae64306e256205eabee8f4815c65 | [
"MIT"
] | 13 | 2020-07-05T10:06:42.000Z | 2022-02-27T10:03:24.000Z | data/scripts/make_aga_db.py | flovo/goratings | 50b5443b73daae64306e256205eabee8f4815c65 | [
"MIT"
] | 2 | 2020-07-04T11:19:37.000Z | 2021-01-15T16:46:32.000Z | #!/usr/bin/env pypy3
import csv
import gzip
import json
import sqlite3
import sys
from collections import defaultdict
from math import isnan
from dateutil import parser
AGA_OFFSET = 2000000000
"""
SELECT
0 `Game_ID`,
1 `Tournament_Code`,
2 `Game_Date`,
3 `Round`,
4 `Pin_Player_1`,
5 `Color_1`,
6 `Rank_1`,
7 `Pin_Player_2`,
8 `Color_2`,
9 `Rank_2`,
10 `Handicap`,
11 `Komi`,
12 `Result`,
13 `Online`,
14 `Exclude`,
15 `Rated`,
16 `Elab_Date`
INTO OUTFILE 'games.csv'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\n'
FROM `games`;
2,"albu405","1994-04-30",1,3794,"W","6d",407,"B","4d",2,0,"B",0,0,1,"1994-05-07"
SELECT
`Pin_Player`,
'' as `Name`,
`Rating`,
`Sigma`,
`Elab_Date`
INTO OUTFILE 'players.csv'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\n'
FROM `players`;
3,"",-2.09381,0.42635,"2004-11-01"
SELECT
`Pin_Player`,
`Rating`,
`Sigma`,
`Elab_Date`
INTO OUTFILE 'ratings.csv'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\n'
FROM `ratings`;
10459,0.00000,0.00000,"0000-00-00"
24698,1.41766,0.96060,"2019-07-13"
SELECT
`Tournament_Code`,
`Tournament_Descr`,
`Tournament_Date`
INTO OUTFILE 'tournaments.csv'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\n'
FROM `tournaments`;
"albu405","Albuquerque Spring Tournament,","1994-04-30"
"""
conn = sqlite3.connect("aga-data.db")
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS game_records")
c.execute(
"""
CREATE TABLE IF NOT EXISTS game_records
(
id INTEGER PRIMARY KEY,
black_id INTEGER,
white_id INTEGER,
handicap INTEGER,
winner_id INTEGER,
ended INTEGER
);
"""
)
##
## Import games
##
ct = 0
rows = []
with open("aga/games.csv", "rt") as games_f:
games_csv = csv.reader(games_f, delimiter=",")
for row in games_csv:
ct += 1
if ct % 1000 == 0:
sys.stdout.write("%d\r" % ct)
sys.stdout.flush()
rows.append(row)
rows = sorted(rows, key=lambda x: "%s-%02d-%04d" % (x[2], int(x[3]), int(x[0]))) # sort by date , round , game_id
last_manual_rank = {}
game_id = AGA_OFFSET
print('')
ct = 0
for row in rows:
ct += 1
if ct % 1000 == 0:
sys.stdout.write("%d\r" % ct)
sys.stdout.flush()
exclude = int(row[14])
if exclude:
continue
game_id += 1 # we use our own id's so by id they are ordered by date, round, game id
ended = parser.parse(row[2]).timestamp()
p1_id = int(row[4]) + AGA_OFFSET
p1_color = row[5]
p2_id = int(row[7]) + AGA_OFFSET
handicap = int(row[10])
winner = row[12]
if winner == "B" or winner == "W":
winner = 1 if p1_color == winner else 2
else:
raise Exception("Invalid winner value: " + winner)
if p1_color == 'B':
black_id = p1_id
white_id = p2_id
elif p1_color == 'W':
white_id = p1_id
black_id = p2_id
else:
raise Exception("Bad p1 color: " + p1_color)
winner_id = p1_id if winner == 1 else p2_id
c.execute(
"""
INSERT INTO game_records
(
id,
black_id,
white_id,
handicap,
winner_id,
ended
)
VALUES
(
?,
?,
?,
?,
?,
?
)
""",
(
game_id,
black_id,
white_id,
handicap,
winner_id,
ended,
),
)
c.execute(
"""
CREATE INDEX black_ended ON game_records (black_id, -ended);
"""
)
c.execute(
"""
CREATE INDEX white_ended ON game_records (white_id, -ended);
"""
)
conn.commit()
c.close()
conn.execute("VACUUM")
conn.close()
| 18.222222 | 113 | 0.559705 |
import csv
import gzip
import json
import sqlite3
import sys
from collections import defaultdict
from math import isnan
from dateutil import parser
AGA_OFFSET = 2000000000
conn = sqlite3.connect("aga-data.db")
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS game_records")
c.execute(
"""
CREATE TABLE IF NOT EXISTS game_records
(
id INTEGER PRIMARY KEY,
black_id INTEGER,
white_id INTEGER,
handicap INTEGER,
winner_id INTEGER,
ended INTEGER
);
"""
)
= []
with open("aga/games.csv", "rt") as games_f:
games_csv = csv.reader(games_f, delimiter=",")
for row in games_csv:
ct += 1
if ct % 1000 == 0:
sys.stdout.write("%d\r" % ct)
sys.stdout.flush()
rows.append(row)
rows = sorted(rows, key=lambda x: "%s-%02d-%04d" % (x[2], int(x[3]), int(x[0])))
last_manual_rank = {}
game_id = AGA_OFFSET
print('')
ct = 0
for row in rows:
ct += 1
if ct % 1000 == 0:
sys.stdout.write("%d\r" % ct)
sys.stdout.flush()
exclude = int(row[14])
if exclude:
continue
game_id += 1
ended = parser.parse(row[2]).timestamp()
p1_id = int(row[4]) + AGA_OFFSET
p1_color = row[5]
p2_id = int(row[7]) + AGA_OFFSET
handicap = int(row[10])
winner = row[12]
if winner == "B" or winner == "W":
winner = 1 if p1_color == winner else 2
else:
raise Exception("Invalid winner value: " + winner)
if p1_color == 'B':
black_id = p1_id
white_id = p2_id
elif p1_color == 'W':
white_id = p1_id
black_id = p2_id
else:
raise Exception("Bad p1 color: " + p1_color)
winner_id = p1_id if winner == 1 else p2_id
c.execute(
"""
INSERT INTO game_records
(
id,
black_id,
white_id,
handicap,
winner_id,
ended
)
VALUES
(
?,
?,
?,
?,
?,
?
)
""",
(
game_id,
black_id,
white_id,
handicap,
winner_id,
ended,
),
)
c.execute(
"""
CREATE INDEX black_ended ON game_records (black_id, -ended);
"""
)
c.execute(
"""
CREATE INDEX white_ended ON game_records (white_id, -ended);
"""
)
conn.commit()
c.close()
conn.execute("VACUUM")
conn.close()
| true | true |
1c2ed0aa04526ddcb2922cc6b0d30d24707b557f | 1,396 | py | Python | ciphers/transposition_cipher.py | KirilBangachev/Python | 7ad45a46e02edda86a45969de8768f26ef44b306 | [
"MIT"
] | 6 | 2019-03-30T14:09:34.000Z | 2020-07-26T02:45:22.000Z | ciphers/transposition_cipher.py | KirilBangachev/Python | 7ad45a46e02edda86a45969de8768f26ef44b306 | [
"MIT"
] | 1 | 2019-09-01T06:43:06.000Z | 2019-09-01T06:44:55.000Z | ciphers/transposition_cipher.py | KirilBangachev/Python | 7ad45a46e02edda86a45969de8768f26ef44b306 | [
"MIT"
] | 7 | 2018-11-26T05:48:16.000Z | 2021-05-15T17:12:08.000Z | import math
def main():
message = input('Enter message: ')
key = int(input('Enter key [2-%s]: ' % (len(message) - 1)))
mode = input('Encryption/Decryption [e/d]: ')
if mode.lower().startswith('e'):
text = encryptMessage(key, message)
elif mode.lower().startswith('d'):
text = decryptMessage(key, message)
# Append pipe symbol (vertical bar) to identify spaces at the end.
print('Output:\n%s' %(text + '|'))
def encryptMessage(key, message):
"""
>>> encryptMessage(6, 'Harshil Darji')
'Hlia rDsahrij'
"""
cipherText = [''] * key
for col in range(key):
pointer = col
while pointer < len(message):
cipherText[col] += message[pointer]
pointer += key
return ''.join(cipherText)
def decryptMessage(key, message):
"""
>>> decryptMessage(6, 'Hlia rDsahrij')
'Harshil Darji'
"""
numCols = math.ceil(len(message) / key)
numRows = key
numShadedBoxes = (numCols * numRows) - len(message)
plainText = [""] * numCols
col = 0; row = 0;
for symbol in message:
plainText[col] += symbol
col += 1
if (col == numCols) or (col == numCols - 1) and (row >= numRows - numShadedBoxes):
col = 0
row += 1
return "".join(plainText)
if __name__ == '__main__':
import doctest
doctest.testmod()
main()
| 25.851852 | 90 | 0.567335 | import math
def main():
message = input('Enter message: ')
key = int(input('Enter key [2-%s]: ' % (len(message) - 1)))
mode = input('Encryption/Decryption [e/d]: ')
if mode.lower().startswith('e'):
text = encryptMessage(key, message)
elif mode.lower().startswith('d'):
text = decryptMessage(key, message)
print('Output:\n%s' %(text + '|'))
def encryptMessage(key, message):
cipherText = [''] * key
for col in range(key):
pointer = col
while pointer < len(message):
cipherText[col] += message[pointer]
pointer += key
return ''.join(cipherText)
def decryptMessage(key, message):
numCols = math.ceil(len(message) / key)
numRows = key
numShadedBoxes = (numCols * numRows) - len(message)
plainText = [""] * numCols
col = 0; row = 0;
for symbol in message:
plainText[col] += symbol
col += 1
if (col == numCols) or (col == numCols - 1) and (row >= numRows - numShadedBoxes):
col = 0
row += 1
return "".join(plainText)
if __name__ == '__main__':
import doctest
doctest.testmod()
main()
| true | true |
1c2ed0f7dd19b2b52848f7ccb350aac69ffa9104 | 1,606 | py | Python | tests/menu_test_9.py | sourcery-ai-bot/Qprompt | baa4810d7a2db450c659983179ff051706b6dadd | [
"MIT"
] | 49 | 2017-01-20T04:57:27.000Z | 2022-01-11T17:35:45.000Z | tests/menu_test_9.py | sourcery-ai-bot/Qprompt | baa4810d7a2db450c659983179ff051706b6dadd | [
"MIT"
] | 14 | 2016-02-19T05:53:12.000Z | 2020-01-11T16:08:16.000Z | tests/menu_test_9.py | sourcery-ai-bot/Qprompt | baa4810d7a2db450c659983179ff051706b6dadd | [
"MIT"
] | 7 | 2018-06-16T14:30:26.000Z | 2020-06-03T23:28:14.000Z | """Tests menu 'result' option return."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from qprompt import Menu
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
TOTAL = 0
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def setUp(self):
global TOTAL
TOTAL = 0
self.menu = Menu(inc, dec)
def test_menu_1(self):
global TOTAL
setinput("i")
result = self.menu.show()
self.assertEqual(1, TOTAL)
def test_menu_2(self):
global TOTAL
setinput("d")
result = self.menu.show()
self.assertEqual(-1, TOTAL)
def test_menu_3(self):
global TOTAL
setinput("i\ni\nd\ni\n")
result = self.menu.main(loop=True)
self.assertEqual(2, TOTAL)
def inc():
global TOTAL
TOTAL += 1
def dec():
global TOTAL
TOTAL -= 1
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| 26.766667 | 65 | 0.339352 |
lf.menu.main(loop=True)
self.assertEqual(2, TOTAL)
def inc():
global TOTAL
TOTAL += 1
def dec():
global TOTAL
TOTAL -= 1
| true | true |
1c2ed11906e34fdfd3a4357898cc9db0b3e19171 | 11,105 | py | Python | octavia/amphorae/backends/agent/api_server/plug.py | zjchao/octavia | e07031fa78604568c6e2112cb4cb147661bc57d7 | [
"Apache-2.0"
] | null | null | null | octavia/amphorae/backends/agent/api_server/plug.py | zjchao/octavia | e07031fa78604568c6e2112cb4cb147661bc57d7 | [
"Apache-2.0"
] | null | null | null | octavia/amphorae/backends/agent/api_server/plug.py | zjchao/octavia | e07031fa78604568c6e2112cb4cb147661bc57d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import os
import socket
import stat
import subprocess
import jinja2
from oslo_config import cfg
from oslo_log import log as logging
import pyroute2
import six
import webob
from werkzeug import exceptions
import netifaces
from octavia.common import constants as consts
CONF = cfg.CONF
ETH_X_VIP_CONF = 'plug_vip_ethX.conf.j2'
ETH_X_PORT_CONF = 'plug_port_ethX.conf.j2'
LOG = logging.getLogger(__name__)
j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader(
os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES))
template_port = j2_env.get_template(ETH_X_PORT_CONF)
template_vip = j2_env.get_template(ETH_X_VIP_CONF)
class Plug(object):
def __init__(self, osutils):
self._osutils = osutils
def plug_vip(self, vip, subnet_cidr, gateway,
mac_address, mtu=None, vrrp_ip=None, host_routes=None):
# Validate vip and subnet_cidr, calculate broadcast address and netmask
try:
render_host_routes = []
ip = ipaddress.ip_address(
vip if isinstance(vip, six.text_type) else six.u(vip))
network = ipaddress.ip_network(
subnet_cidr if isinstance(subnet_cidr, six.text_type)
else six.u(subnet_cidr))
vip = ip.exploded
broadcast = network.broadcast_address.exploded
netmask = (network.prefixlen if ip.version == 6
else network.netmask.exploded)
vrrp_version = None
if vrrp_ip:
vrrp_ip_obj = ipaddress.ip_address(
vrrp_ip if isinstance(vrrp_ip, six.text_type)
else six.u(vrrp_ip)
)
vrrp_version = vrrp_ip_obj.version
if host_routes:
for hr in host_routes:
network = ipaddress.ip_network(
hr['destination'] if isinstance(
hr['destination'], six.text_type) else
six.u(hr['destination']))
render_host_routes.append({'network': network,
'gw': hr['nexthop']})
except ValueError:
return webob.Response(json=dict(message="Invalid VIP"),
status=400)
# Check if the interface is already in the network namespace
# Do not attempt to re-plug the VIP if it is already in the
# network namespace
if self._netns_interface_exists(mac_address):
return webob.Response(
json=dict(message="Interface already exists"), status=409)
# This is the interface prior to moving into the netns
default_netns_interface = self._interface_by_mac(mac_address)
# Always put the VIP interface as eth1
primary_interface = consts.NETNS_PRIMARY_INTERFACE
secondary_interface = "{interface}:0".format(
interface=primary_interface)
interface_file_path = self._osutils.get_network_interface_file(
primary_interface)
self._osutils.create_netns_dir()
self._osutils.write_interfaces_file()
self._osutils.write_vip_interface_file(
interface_file_path=interface_file_path,
primary_interface=primary_interface,
vip=vip,
ip=ip,
broadcast=broadcast,
netmask=netmask,
gateway=gateway,
mtu=mtu,
vrrp_ip=vrrp_ip,
vrrp_version=vrrp_version,
render_host_routes=render_host_routes)
# Update the list of interfaces to add to the namespace
# This is used in the amphora reboot case to re-establish the namespace
self._update_plugged_interfaces_file(primary_interface, mac_address)
# Create the namespace
netns = pyroute2.NetNS(consts.AMPHORA_NAMESPACE, flags=os.O_CREAT)
netns.close()
# Load sysctl in new namespace
sysctl = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE,
[consts.SYSCTL_CMD, '--system'],
stdout=subprocess.PIPE)
sysctl.communicate()
sysctl.wait()
sysctl.release()
cmd_list = [['modprobe', 'ip_vs'],
[consts.SYSCTL_CMD, '-w', 'net.ipv4.vs.conntrack=1']]
if ip.version == 4:
# For lvs function, enable ip_vs kernel module, enable ip_forward
# conntrack in amphora network namespace.
cmd_list.append([consts.SYSCTL_CMD, '-w', 'net.ipv4.ip_forward=1'])
elif ip.version == 6:
cmd_list.append([consts.SYSCTL_CMD, '-w',
'net.ipv6.conf.all.forwarding=1'])
for cmd in cmd_list:
ns_exec = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE, cmd,
stdout=subprocess.PIPE)
ns_exec.wait()
ns_exec.release()
with pyroute2.IPRoute() as ipr:
# Move the interfaces into the namespace
idx = ipr.link_lookup(ifname=default_netns_interface)[0]
ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE,
IFLA_IFNAME=primary_interface)
# bring interfaces up
self._osutils.bring_interfaces_up(
ip, primary_interface, secondary_interface)
return webob.Response(json=dict(
message="OK",
details="VIP {vip} plugged on interface {interface}".format(
vip=vip, interface=primary_interface)), status=202)
def _check_ip_addresses(self, fixed_ips):
if fixed_ips:
for ip in fixed_ips:
try:
socket.inet_pton(socket.AF_INET, ip.get('ip_address'))
except socket.error:
socket.inet_pton(socket.AF_INET6, ip.get('ip_address'))
def plug_network(self, mac_address, fixed_ips, mtu=None):
# Check if the interface is already in the network namespace
# Do not attempt to re-plug the network if it is already in the
# network namespace
if self._netns_interface_exists(mac_address):
return webob.Response(json=dict(
message="Interface already exists"), status=409)
# This is the interface as it was initially plugged into the
# default network namespace, this will likely always be eth1
try:
self._check_ip_addresses(fixed_ips=fixed_ips)
except socket.error:
return webob.Response(json=dict(
message="Invalid network port"), status=400)
default_netns_interface = self._interface_by_mac(mac_address)
# We need to determine the interface name when inside the namespace
# to avoid name conflicts
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE,
flags=os.O_CREAT) as netns:
# 1 means just loopback, but we should already have a VIP. This
# works for the add/delete/add case as we don't delete interfaces
# Note, eth0 is skipped because that is the VIP interface
netns_interface = 'eth{0}'.format(len(netns.get_links()))
LOG.info('Plugged interface %s will become %s in the namespace %s',
default_netns_interface, netns_interface,
consts.AMPHORA_NAMESPACE)
interface_file_path = self._osutils.get_network_interface_file(
netns_interface)
self._osutils.write_port_interface_file(
netns_interface=netns_interface,
fixed_ips=fixed_ips,
mtu=mtu,
interface_file_path=interface_file_path)
# Update the list of interfaces to add to the namespace
self._update_plugged_interfaces_file(netns_interface, mac_address)
with pyroute2.IPRoute() as ipr:
# Move the interfaces into the namespace
idx = ipr.link_lookup(ifname=default_netns_interface)[0]
ipr.link('set', index=idx,
net_ns_fd=consts.AMPHORA_NAMESPACE,
IFLA_IFNAME=netns_interface)
self._osutils._bring_if_down(netns_interface)
self._osutils._bring_if_up(netns_interface, 'network')
return webob.Response(json=dict(
message="OK",
details="Plugged on interface {interface}".format(
interface=netns_interface)), status=202)
def _interface_by_mac(self, mac):
for interface in netifaces.interfaces():
if netifaces.AF_LINK in netifaces.ifaddresses(interface):
for link in netifaces.ifaddresses(
interface)[netifaces.AF_LINK]:
if link.get('addr', '').lower() == mac.lower():
return interface
# Poke the kernel to re-enumerate the PCI bus.
# We have had cases where nova hot plugs the interface but
# the kernel doesn't get the memo.
filename = '/sys/bus/pci/rescan'
flags = os.O_WRONLY
if os.path.isfile(filename):
with os.fdopen(os.open(filename, flags), 'w') as rescan_file:
rescan_file.write('1')
raise exceptions.HTTPException(
response=webob.Response(json=dict(
details="No suitable network interface found"), status=404))
def _update_plugged_interfaces_file(self, interface, mac_address):
# write interfaces to plugged_interfaces file and prevent duplicates
plug_inf_file = consts.PLUGGED_INTERFACES
flags = os.O_RDWR | os.O_CREAT
# mode 0644
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
with os.fdopen(os.open(plug_inf_file, flags, mode), 'r+') as text_file:
inf_list = [inf.split()[0].rstrip() for inf in text_file]
if mac_address not in inf_list:
text_file.write("{mac_address} {interface}\n".format(
mac_address=mac_address, interface=interface))
def _netns_interface_exists(self, mac_address):
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE,
flags=os.O_CREAT) as netns:
for link in netns.get_links():
for attr in link['attrs']:
if attr[0] == 'IFLA_ADDRESS' and attr[1] == mac_address:
return True
return False
| 41.12963 | 79 | 0.622242 |
import ipaddress
import os
import socket
import stat
import subprocess
import jinja2
from oslo_config import cfg
from oslo_log import log as logging
import pyroute2
import six
import webob
from werkzeug import exceptions
import netifaces
from octavia.common import constants as consts
CONF = cfg.CONF
ETH_X_VIP_CONF = 'plug_vip_ethX.conf.j2'
ETH_X_PORT_CONF = 'plug_port_ethX.conf.j2'
LOG = logging.getLogger(__name__)
j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader(
os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES))
template_port = j2_env.get_template(ETH_X_PORT_CONF)
template_vip = j2_env.get_template(ETH_X_VIP_CONF)
class Plug(object):
def __init__(self, osutils):
self._osutils = osutils
def plug_vip(self, vip, subnet_cidr, gateway,
mac_address, mtu=None, vrrp_ip=None, host_routes=None):
try:
render_host_routes = []
ip = ipaddress.ip_address(
vip if isinstance(vip, six.text_type) else six.u(vip))
network = ipaddress.ip_network(
subnet_cidr if isinstance(subnet_cidr, six.text_type)
else six.u(subnet_cidr))
vip = ip.exploded
broadcast = network.broadcast_address.exploded
netmask = (network.prefixlen if ip.version == 6
else network.netmask.exploded)
vrrp_version = None
if vrrp_ip:
vrrp_ip_obj = ipaddress.ip_address(
vrrp_ip if isinstance(vrrp_ip, six.text_type)
else six.u(vrrp_ip)
)
vrrp_version = vrrp_ip_obj.version
if host_routes:
for hr in host_routes:
network = ipaddress.ip_network(
hr['destination'] if isinstance(
hr['destination'], six.text_type) else
six.u(hr['destination']))
render_host_routes.append({'network': network,
'gw': hr['nexthop']})
except ValueError:
return webob.Response(json=dict(message="Invalid VIP"),
status=400)
if self._netns_interface_exists(mac_address):
return webob.Response(
json=dict(message="Interface already exists"), status=409)
default_netns_interface = self._interface_by_mac(mac_address)
primary_interface = consts.NETNS_PRIMARY_INTERFACE
secondary_interface = "{interface}:0".format(
interface=primary_interface)
interface_file_path = self._osutils.get_network_interface_file(
primary_interface)
self._osutils.create_netns_dir()
self._osutils.write_interfaces_file()
self._osutils.write_vip_interface_file(
interface_file_path=interface_file_path,
primary_interface=primary_interface,
vip=vip,
ip=ip,
broadcast=broadcast,
netmask=netmask,
gateway=gateway,
mtu=mtu,
vrrp_ip=vrrp_ip,
vrrp_version=vrrp_version,
render_host_routes=render_host_routes)
self._update_plugged_interfaces_file(primary_interface, mac_address)
netns = pyroute2.NetNS(consts.AMPHORA_NAMESPACE, flags=os.O_CREAT)
netns.close()
sysctl = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE,
[consts.SYSCTL_CMD, '--system'],
stdout=subprocess.PIPE)
sysctl.communicate()
sysctl.wait()
sysctl.release()
cmd_list = [['modprobe', 'ip_vs'],
[consts.SYSCTL_CMD, '-w', 'net.ipv4.vs.conntrack=1']]
if ip.version == 4:
cmd_list.append([consts.SYSCTL_CMD, '-w', 'net.ipv4.ip_forward=1'])
elif ip.version == 6:
cmd_list.append([consts.SYSCTL_CMD, '-w',
'net.ipv6.conf.all.forwarding=1'])
for cmd in cmd_list:
ns_exec = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE, cmd,
stdout=subprocess.PIPE)
ns_exec.wait()
ns_exec.release()
with pyroute2.IPRoute() as ipr:
idx = ipr.link_lookup(ifname=default_netns_interface)[0]
ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE,
IFLA_IFNAME=primary_interface)
self._osutils.bring_interfaces_up(
ip, primary_interface, secondary_interface)
return webob.Response(json=dict(
message="OK",
details="VIP {vip} plugged on interface {interface}".format(
vip=vip, interface=primary_interface)), status=202)
def _check_ip_addresses(self, fixed_ips):
if fixed_ips:
for ip in fixed_ips:
try:
socket.inet_pton(socket.AF_INET, ip.get('ip_address'))
except socket.error:
socket.inet_pton(socket.AF_INET6, ip.get('ip_address'))
def plug_network(self, mac_address, fixed_ips, mtu=None):
if self._netns_interface_exists(mac_address):
return webob.Response(json=dict(
message="Interface already exists"), status=409)
try:
self._check_ip_addresses(fixed_ips=fixed_ips)
except socket.error:
return webob.Response(json=dict(
message="Invalid network port"), status=400)
default_netns_interface = self._interface_by_mac(mac_address)
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE,
flags=os.O_CREAT) as netns:
# Note, eth0 is skipped because that is the VIP interface
netns_interface = 'eth{0}'.format(len(netns.get_links()))
LOG.info('Plugged interface %s will become %s in the namespace %s',
default_netns_interface, netns_interface,
consts.AMPHORA_NAMESPACE)
interface_file_path = self._osutils.get_network_interface_file(
netns_interface)
self._osutils.write_port_interface_file(
netns_interface=netns_interface,
fixed_ips=fixed_ips,
mtu=mtu,
interface_file_path=interface_file_path)
# Update the list of interfaces to add to the namespace
self._update_plugged_interfaces_file(netns_interface, mac_address)
with pyroute2.IPRoute() as ipr:
# Move the interfaces into the namespace
idx = ipr.link_lookup(ifname=default_netns_interface)[0]
ipr.link('set', index=idx,
net_ns_fd=consts.AMPHORA_NAMESPACE,
IFLA_IFNAME=netns_interface)
self._osutils._bring_if_down(netns_interface)
self._osutils._bring_if_up(netns_interface, 'network')
return webob.Response(json=dict(
message="OK",
details="Plugged on interface {interface}".format(
interface=netns_interface)), status=202)
def _interface_by_mac(self, mac):
for interface in netifaces.interfaces():
if netifaces.AF_LINK in netifaces.ifaddresses(interface):
for link in netifaces.ifaddresses(
interface)[netifaces.AF_LINK]:
if link.get('addr', '').lower() == mac.lower():
return interface
# Poke the kernel to re-enumerate the PCI bus.
# We have had cases where nova hot plugs the interface but
# the kernel doesn't get the memo.
filename = '/sys/bus/pci/rescan'
flags = os.O_WRONLY
if os.path.isfile(filename):
with os.fdopen(os.open(filename, flags), 'w') as rescan_file:
rescan_file.write('1')
raise exceptions.HTTPException(
response=webob.Response(json=dict(
details="No suitable network interface found"), status=404))
def _update_plugged_interfaces_file(self, interface, mac_address):
plug_inf_file = consts.PLUGGED_INTERFACES
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
with os.fdopen(os.open(plug_inf_file, flags, mode), 'r+') as text_file:
inf_list = [inf.split()[0].rstrip() for inf in text_file]
if mac_address not in inf_list:
text_file.write("{mac_address} {interface}\n".format(
mac_address=mac_address, interface=interface))
def _netns_interface_exists(self, mac_address):
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE,
flags=os.O_CREAT) as netns:
for link in netns.get_links():
for attr in link['attrs']:
if attr[0] == 'IFLA_ADDRESS' and attr[1] == mac_address:
return True
return False
| true | true |
1c2ed1198a770c6d0b0fcfb644dd93fa1250cc4c | 1,382 | py | Python | imgdata.py | DrNH4CK3R/Img-Forensic | 223c0fe37735a30b18b626e894a5aff384a60e37 | [
"MIT"
] | 1 | 2021-09-23T05:38:32.000Z | 2021-09-23T05:38:32.000Z | imgdata.py | DrNH4CK3R/Img-Forensic | 223c0fe37735a30b18b626e894a5aff384a60e37 | [
"MIT"
] | null | null | null | imgdata.py | DrNH4CK3R/Img-Forensic | 223c0fe37735a30b18b626e894a5aff384a60e37 | [
"MIT"
] | null | null | null |
from PIL import Image
from PIL.ExifTags import TAGS
import os
import sys
import time
lgreen = '\033[92m'
cyan = '\033[96m'
bold = '\033[01m'
red = '\033[31m'
os.system("clear")
print(bold+red+"""
___ ____ _
|_ _|_ __ ___ __ _ | _ \ __ _| |_ __ _
| || '_ ` _ \ / _` |_____| | | |/ _` | __/ _` |
| || | | | | | (_| |_____| |_| | (_| | || (_| |
|___|_| |_| |_|\__, | |____/ \__,_|\__\__,_|
|___/
Author: DrNH4CK3R
"""+red+bold)
print(cyan+"""
------------------------------------------------------------------
"""+cyan)
print(" ")
# path to image
imagename = input(lgreen+">> Enter Path to Image or Video : "+lgreen)
print(" ")
print(lgreen+"Extractng EXIF Data....."+lgreen)
time.sleep(2)
print(" ")
print(lgreen+"Converting EXIF Data"+lgreen)
time.sleep(1)
print(" ")
# read the image data using PIL
image = Image.open(imagename)
# extract EXIF data
exifdata = image.getexif()
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
if isinstance(data, bytes):
data = data.decode()
print(cyan+bold+f"{tag:25}: {data}"+bold+cyan)
| 21.59375 | 69 | 0.471056 |
from PIL import Image
from PIL.ExifTags import TAGS
import os
import sys
import time
lgreen = '\033[92m'
cyan = '\033[96m'
bold = '\033[01m'
red = '\033[31m'
os.system("clear")
print(bold+red+"""
___ ____ _
|_ _|_ __ ___ __ _ | _ \ __ _| |_ __ _
| || '_ ` _ \ / _` |_____| | | |/ _` | __/ _` |
| || | | | | | (_| |_____| |_| | (_| | || (_| |
|___|_| |_| |_|\__, | |____/ \__,_|\__\__,_|
|___/
Author: DrNH4CK3R
"""+red+bold)
print(cyan+"""
------------------------------------------------------------------
"""+cyan)
print(" ")
# path to image
imagename = input(lgreen+">> Enter Path to Image or Video : "+lgreen)
print(" ")
print(lgreen+"Extractng EXIF Data....."+lgreen)
time.sleep(2)
print(" ")
print(lgreen+"Converting EXIF Data"+lgreen)
time.sleep(1)
print(" ")
# read the image data using PIL
image = Image.open(imagename)
# extract EXIF data
exifdata = image.getexif()
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
if isinstance(data, bytes):
data = data.decode()
print(cyan+bold+f"{tag:25}: {data}"+bold+cyan)
| true | true |
1c2ed220c9c1e6a8dc2d7c4db9055675dd77049b | 4,607 | py | Python | django/contrib/auth/management/__init__.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/contrib/auth/management/__init__.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/contrib/auth/management/__init__.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-02-06T10:31:51.000Z | 2020-02-06T10:31:51.000Z | """
Creates permissions for all installed apps that need permissions.
"""
import getpass
import unicodedata
from django.apps import apps as global_apps
from django.contrib.auth import get_permission_codename
from django.core import exceptions
from django.db import DEFAULT_DB_ALIAS, router
def _get_all_permissions(opts):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
By default, this is ('add', 'change', 'delete')
"""
perms = []
for action in opts.default_permissions:
perms.append((
get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)
))
return perms
def create_permissions(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, apps=global_apps, **kwargs):
if not app_config.models_module:
return
app_label = app_config.label
try:
app_config = apps.get_app_config(app_label)
ContentType = apps.get_model('contenttypes', 'ContentType')
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
if not router.allow_migrate_model(using, Permission):
return
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_config.get_models():
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(using).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(Permission.objects.using(using).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
Permission(codename=codename, name=name, content_type=ct)
for ct, (codename, name) in searched_perms
if (ct.pk, codename) not in all_perms
]
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def get_system_username():
"""
Return the current system user's username, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# This file is used in apps.py, it should not trigger models import.
from django.contrib.auth import models as auth_app
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = (
unicodedata.normalize('NFKD', default_username)
.encode('ascii', 'ignore').decode('ascii')
.replace(' ', '').lower()
)
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
| 32.443662 | 118 | 0.666594 | import getpass
import unicodedata
from django.apps import apps as global_apps
from django.contrib.auth import get_permission_codename
from django.core import exceptions
from django.db import DEFAULT_DB_ALIAS, router
def _get_all_permissions(opts):
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
return builtin + custom
def _get_builtin_permissions(opts):
perms = []
for action in opts.default_permissions:
perms.append((
get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)
))
return perms
def create_permissions(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, apps=global_apps, **kwargs):
if not app_config.models_module:
return
app_label = app_config.label
try:
app_config = apps.get_app_config(app_label)
ContentType = apps.get_model('contenttypes', 'ContentType')
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
if not router.allow_migrate_model(using, Permission):
return
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_config.get_models():
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(using).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# a list of the ones we're going to create.
all_perms = set(Permission.objects.using(using).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
Permission(codename=codename, name=name, content_type=ct)
for ct, (codename, name) in searched_perms
if (ct.pk, codename) not in all_perms
]
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def get_system_username():
try:
result = getpass.getuser()
except (ImportError, KeyError):
return ''
return result
def get_default_username(check_db=True):
from django.contrib.auth import models as auth_app
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = (
unicodedata.normalize('NFKD', default_username)
.encode('ascii', 'ignore').decode('ascii')
.replace(' ', '').lower()
)
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
| true | true |
1c2ed251c6ebe0f19446db9655fb31fc40f8030c | 2,927 | py | Python | ci/create_codewind_index.py | josiemundi/stacks | e9e90a93d17a2719e9f26ee7cac05abe697fddd7 | [
"Apache-2.0"
] | 96 | 2019-06-19T14:47:05.000Z | 2022-02-20T09:31:14.000Z | ci/create_codewind_index.py | josiemundi/stacks | e9e90a93d17a2719e9f26ee7cac05abe697fddd7 | [
"Apache-2.0"
] | 591 | 2019-06-24T20:49:42.000Z | 2022-02-20T12:26:28.000Z | ci/create_codewind_index.py | josiemundi/stacks | e9e90a93d17a2719e9f26ee7cac05abe697fddd7 | [
"Apache-2.0"
] | 127 | 2019-06-21T10:02:15.000Z | 2021-08-10T11:55:18.000Z | #!/usr/bin/env python3
import yaml
import json
import os
import fnmatch
from collections import OrderedDict
import argparse
from argparse import ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-n", "--namePrefix", help="Display name prefix.", default="Appsody")
parser.add_argument("-f", "--file", help="Absolute or relative path, to a yaml file or directory of yaml files.", default=os.getcwd())
args = parser.parse_args()
displayNamePrefix = args.namePrefix
yaml_dir = os.path.normpath(args.file)
def generate_json():
with open(inputFile, 'r') as yamlFile, open(inputFile.rsplit('.', 1)[0] + ".json", 'wb') as jsonFile:
try:
doc = yaml.safe_load(yamlFile)
list = []
if (doc['stacks'] != None):
for item in doc['stacks']:
# get template name
for n in range(0, len(item['templates'])):
if len(item['templates'])==1:
template = ""
else:
template = " " + item['templates'][n]['id']
# populate stack details
res = (OrderedDict([
("displayName", displayNamePrefix + " " + item['name'] + template + " template"),
("description", item['description']),
("language", item['language']),
("projectType", "appsodyExtension"),
("projectStyle", "Appsody"),
("location", item['templates'][n]['url']),
("links", OrderedDict([
("self", "/devfiles/" +
item['id'] + "/devfile.yaml")
]))
]))
if ('deprecated' in item):
res.update([("displayName", "[Deprecated] " + displayNamePrefix + " " + item['name'] + template + " template"),
("deprecated", item['deprecated'])])
list.append(res)
jsonFile.write(json.dumps(list, indent=4, ensure_ascii=False).encode('utf8'))
print("Generated: " + inputFile.rsplit('.', 1)[0] + ".json")
except yaml.YAMLError as exc:
print(exc)
if os.path.isdir(yaml_dir):
for file in os.listdir(yaml_dir):
if fnmatch.fnmatch(file, '*.yaml'):
inputFile = yaml_dir + "/" + file
generate_json()
else:
inputFile = yaml_dir
generate_json() | 41.814286 | 143 | 0.460198 |
import yaml
import json
import os
import fnmatch
from collections import OrderedDict
import argparse
from argparse import ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-n", "--namePrefix", help="Display name prefix.", default="Appsody")
parser.add_argument("-f", "--file", help="Absolute or relative path, to a yaml file or directory of yaml files.", default=os.getcwd())
args = parser.parse_args()
displayNamePrefix = args.namePrefix
yaml_dir = os.path.normpath(args.file)
def generate_json():
with open(inputFile, 'r') as yamlFile, open(inputFile.rsplit('.', 1)[0] + ".json", 'wb') as jsonFile:
try:
doc = yaml.safe_load(yamlFile)
list = []
if (doc['stacks'] != None):
for item in doc['stacks']:
for n in range(0, len(item['templates'])):
if len(item['templates'])==1:
template = ""
else:
template = " " + item['templates'][n]['id']
res = (OrderedDict([
("displayName", displayNamePrefix + " " + item['name'] + template + " template"),
("description", item['description']),
("language", item['language']),
("projectType", "appsodyExtension"),
("projectStyle", "Appsody"),
("location", item['templates'][n]['url']),
("links", OrderedDict([
("self", "/devfiles/" +
item['id'] + "/devfile.yaml")
]))
]))
if ('deprecated' in item):
res.update([("displayName", "[Deprecated] " + displayNamePrefix + " " + item['name'] + template + " template"),
("deprecated", item['deprecated'])])
list.append(res)
jsonFile.write(json.dumps(list, indent=4, ensure_ascii=False).encode('utf8'))
print("Generated: " + inputFile.rsplit('.', 1)[0] + ".json")
except yaml.YAMLError as exc:
print(exc)
if os.path.isdir(yaml_dir):
for file in os.listdir(yaml_dir):
if fnmatch.fnmatch(file, '*.yaml'):
inputFile = yaml_dir + "/" + file
generate_json()
else:
inputFile = yaml_dir
generate_json() | true | true |
1c2ed2577fc97422b4a279da2c5778ffedaef873 | 2,333 | py | Python | PAMT.py | PuffinDev/PAMT | a398e049c40920e58cebc0c502f1e7020aa82d04 | [
"MIT"
] | null | null | null | PAMT.py | PuffinDev/PAMT | a398e049c40920e58cebc0c502f1e7020aa82d04 | [
"MIT"
] | null | null | null | PAMT.py | PuffinDev/PAMT | a398e049c40920e58cebc0c502f1e7020aa82d04 | [
"MIT"
] | null | null | null | import sys
import os
import time
import json
import fnmatch
from progress.bar import Bar
# Python Anti-Malware Toolkit
root = "/" #'/' for linux 'C:\' for windows
patterns = ['*.py', '*.sh']
matching_files = []
dangerous_files = []
bad_content = b'rm -rf' #Files that contain this text will be blacklisted
banner = \
'\u001b[34;1m' + """
-------------------------------
| __ \ /\ | \/ |__ __|
| |__) / \ | \ / | | |
| ___/ /\ \ | |\/| | | |
| | / ____ \| | | | | |
|_| /_/ \_\_| |_| |_|
Python Anti-Malware Toolkit
-------------------------------
""" + '\u001b[0m'
print(banner)
def scan():
global files
#scan filesystem
filecount = 0
print("Initialising...")
for path, subdirs, files in os.walk(root): #Count files for progress bar
for name in files:
filecount += 1
print('\n')
bar = Bar('Scanning filesystem', max=filecount)
previous = ""
for path, subdirs, files in os.walk(root): #Find files with specified patterns
for name in files:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
matching_files.append(os.path.join(path, name))
bar.next()
print('\n')
scan_files(matching_files)
def scan_files(files):
#scan list of filenames
bar2 = Bar('Identifying threats', max=len(files))
for file in files: #Scan files for a string
try:
with open(file, 'rb') as f:
with open("database/lines.json", 'r') as f2:
database = json.load(f2)
for bad_content in database.values():
bad_content = bad_content[0][0]
bad_content = bytes(bad_content, 'utf-8')
if bytes(bad_content) in f.read():
dangerous_files.append([bad_content.decode('utf-8'), file])
except FileNotFoundError:
pass
bar2.next()
with open("output.txt", 'w+') as f:
for file in dangerous_files:
f.write(file[0] + " --> " + file[1] + '\n')
print('\u001b[33m' + '\n\n' + str(len(dangerous_files)) + " Malicious files detected" + '\u001b[0m')
print("See the full list of files in output.txt")
scan() | 25.922222 | 104 | 0.520789 | import sys
import os
import time
import json
import fnmatch
from progress.bar import Bar
root = "/"
patterns = ['*.py', '*.sh']
matching_files = []
dangerous_files = []
bad_content = b'rm -rf'
banner = \
'\u001b[34;1m' + """
-------------------------------
| __ \ /\ | \/ |__ __|
| |__) / \ | \ / | | |
| ___/ /\ \ | |\/| | | |
| | / ____ \| | | | | |
|_| /_/ \_\_| |_| |_|
Python Anti-Malware Toolkit
-------------------------------
""" + '\u001b[0m'
print(banner)
def scan():
global files
filecount = 0
print("Initialising...")
for path, subdirs, files in os.walk(root):
for name in files:
filecount += 1
print('\n')
bar = Bar('Scanning filesystem', max=filecount)
previous = ""
for path, subdirs, files in os.walk(root):
for name in files:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
matching_files.append(os.path.join(path, name))
bar.next()
print('\n')
scan_files(matching_files)
def scan_files(files):
bar2 = Bar('Identifying threats', max=len(files))
for file in files:
try:
with open(file, 'rb') as f:
with open("database/lines.json", 'r') as f2:
database = json.load(f2)
for bad_content in database.values():
bad_content = bad_content[0][0]
bad_content = bytes(bad_content, 'utf-8')
if bytes(bad_content) in f.read():
dangerous_files.append([bad_content.decode('utf-8'), file])
except FileNotFoundError:
pass
bar2.next()
with open("output.txt", 'w+') as f:
for file in dangerous_files:
f.write(file[0] + " --> " + file[1] + '\n')
print('\u001b[33m' + '\n\n' + str(len(dangerous_files)) + " Malicious files detected" + '\u001b[0m')
print("See the full list of files in output.txt")
scan() | true | true |
1c2ed25db044f12f0682b55f85596a61dca543f5 | 8,696 | py | Python | src/ebay_rest/api/sell_account/models/fulfillment_policy_response.py | matecsaj/ebay_rest | dd23236f39e05636eff222f99df1e3699ce47d4a | [
"MIT"
] | 3 | 2021-12-12T04:28:03.000Z | 2022-03-10T03:29:18.000Z | src/ebay_rest/api/sell_account/models/fulfillment_policy_response.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 33 | 2021-06-16T20:44:36.000Z | 2022-03-30T14:55:06.000Z | src/ebay_rest/api/sell_account/models/fulfillment_policy_response.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 7 | 2021-06-03T09:30:23.000Z | 2022-03-08T19:51:33.000Z | # coding: utf-8
"""
Account API
The <b>Account API</b> gives sellers the ability to configure their eBay seller accounts, including the seller's policies (seller-defined custom policies and eBay business policies), opt in and out of eBay seller programs, configure sales tax tables, and get account information. <br><br>For details on the availability of the methods in this API, see <a href=\"/api-docs/sell/account/overview.html#requirements\">Account API requirements and restrictions</a>. # noqa: E501
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FulfillmentPolicyResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fulfillment_policies': 'list[FulfillmentPolicy]',
'href': 'str',
'limit': 'int',
'next': 'str',
'offset': 'int',
'prev': 'str',
'total': 'int'
}
attribute_map = {
'fulfillment_policies': 'fulfillmentPolicies',
'href': 'href',
'limit': 'limit',
'next': 'next',
'offset': 'offset',
'prev': 'prev',
'total': 'total'
}
def __init__(self, fulfillment_policies=None, href=None, limit=None, next=None, offset=None, prev=None, total=None): # noqa: E501
"""FulfillmentPolicyResponse - a model defined in Swagger""" # noqa: E501
self._fulfillment_policies = None
self._href = None
self._limit = None
self._next = None
self._offset = None
self._prev = None
self._total = None
self.discriminator = None
if fulfillment_policies is not None:
self.fulfillment_policies = fulfillment_policies
if href is not None:
self.href = href
if limit is not None:
self.limit = limit
if next is not None:
self.next = next
if offset is not None:
self.offset = offset
if prev is not None:
self.prev = prev
if total is not None:
self.total = total
@property
def fulfillment_policies(self):
"""Gets the fulfillment_policies of this FulfillmentPolicyResponse. # noqa: E501
A list of the seller's fulfillment policies. # noqa: E501
:return: The fulfillment_policies of this FulfillmentPolicyResponse. # noqa: E501
:rtype: list[FulfillmentPolicy]
"""
return self._fulfillment_policies
@fulfillment_policies.setter
def fulfillment_policies(self, fulfillment_policies):
"""Sets the fulfillment_policies of this FulfillmentPolicyResponse.
A list of the seller's fulfillment policies. # noqa: E501
:param fulfillment_policies: The fulfillment_policies of this FulfillmentPolicyResponse. # noqa: E501
:type: list[FulfillmentPolicy]
"""
self._fulfillment_policies = fulfillment_policies
@property
def href(self):
"""Gets the href of this FulfillmentPolicyResponse. # noqa: E501
This field is for future use. # noqa: E501
:return: The href of this FulfillmentPolicyResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this FulfillmentPolicyResponse.
This field is for future use. # noqa: E501
:param href: The href of this FulfillmentPolicyResponse. # noqa: E501
:type: str
"""
self._href = href
@property
def limit(self):
"""Gets the limit of this FulfillmentPolicyResponse. # noqa: E501
This field is for future use. # noqa: E501
:return: The limit of this FulfillmentPolicyResponse. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this FulfillmentPolicyResponse.
This field is for future use. # noqa: E501
:param limit: The limit of this FulfillmentPolicyResponse. # noqa: E501
:type: int
"""
self._limit = limit
@property
def next(self):
"""Gets the next of this FulfillmentPolicyResponse. # noqa: E501
This field is for future use. # noqa: E501
:return: The next of this FulfillmentPolicyResponse. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this FulfillmentPolicyResponse.
This field is for future use. # noqa: E501
:param next: The next of this FulfillmentPolicyResponse. # noqa: E501
:type: str
"""
self._next = next
@property
def offset(self):
"""Gets the offset of this FulfillmentPolicyResponse. # noqa: E501
This field is for future use. # noqa: E501
:return: The offset of this FulfillmentPolicyResponse. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this FulfillmentPolicyResponse.
This field is for future use. # noqa: E501
:param offset: The offset of this FulfillmentPolicyResponse. # noqa: E501
:type: int
"""
self._offset = offset
@property
def prev(self):
"""Gets the prev of this FulfillmentPolicyResponse. # noqa: E501
This field is for future use. # noqa: E501
:return: The prev of this FulfillmentPolicyResponse. # noqa: E501
:rtype: str
"""
return self._prev
@prev.setter
def prev(self, prev):
"""Sets the prev of this FulfillmentPolicyResponse.
This field is for future use. # noqa: E501
:param prev: The prev of this FulfillmentPolicyResponse. # noqa: E501
:type: str
"""
self._prev = prev
@property
def total(self):
"""Gets the total of this FulfillmentPolicyResponse. # noqa: E501
The total number of items retrieved in the result set. <br><br>If no items are found, this field is returned with a value of <code>0</code>. # noqa: E501
:return: The total of this FulfillmentPolicyResponse. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this FulfillmentPolicyResponse.
The total number of items retrieved in the result set. <br><br>If no items are found, this field is returned with a value of <code>0</code>. # noqa: E501
:param total: The total of this FulfillmentPolicyResponse. # noqa: E501
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FulfillmentPolicyResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FulfillmentPolicyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.946619 | 479 | 0.598436 |
import pprint
import re
import six
class FulfillmentPolicyResponse(object):
swagger_types = {
'fulfillment_policies': 'list[FulfillmentPolicy]',
'href': 'str',
'limit': 'int',
'next': 'str',
'offset': 'int',
'prev': 'str',
'total': 'int'
}
attribute_map = {
'fulfillment_policies': 'fulfillmentPolicies',
'href': 'href',
'limit': 'limit',
'next': 'next',
'offset': 'offset',
'prev': 'prev',
'total': 'total'
}
def __init__(self, fulfillment_policies=None, href=None, limit=None, next=None, offset=None, prev=None, total=None):
self._fulfillment_policies = None
self._href = None
self._limit = None
self._next = None
self._offset = None
self._prev = None
self._total = None
self.discriminator = None
if fulfillment_policies is not None:
self.fulfillment_policies = fulfillment_policies
if href is not None:
self.href = href
if limit is not None:
self.limit = limit
if next is not None:
self.next = next
if offset is not None:
self.offset = offset
if prev is not None:
self.prev = prev
if total is not None:
self.total = total
@property
def fulfillment_policies(self):
return self._fulfillment_policies
@fulfillment_policies.setter
def fulfillment_policies(self, fulfillment_policies):
self._fulfillment_policies = fulfillment_policies
@property
def href(self):
return self._href
@href.setter
def href(self, href):
self._href = href
@property
def limit(self):
return self._limit
@limit.setter
def limit(self, limit):
self._limit = limit
@property
def next(self):
return self._next
@next.setter
def next(self, next):
self._next = next
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, offset):
self._offset = offset
@property
def prev(self):
return self._prev
@prev.setter
def prev(self, prev):
self._prev = prev
@property
def total(self):
return self._total
@total.setter
def total(self, total):
self._total = total
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FulfillmentPolicyResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, FulfillmentPolicyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2ed28d9edb990dad0e6868305d0ddaf594099a | 846 | py | Python | 02.translate-bots/kakao_bot.1.py | TTEarth/chatbothon | 091ab78e8fce3dd942cf4e829f198f70a835380f | [
"Apache-2.0"
] | 1 | 2018-11-25T01:46:30.000Z | 2018-11-25T01:46:30.000Z | 02.translate-bots/kakao_bot.1.py | TTEarth/chatbothon | 091ab78e8fce3dd942cf4e829f198f70a835380f | [
"Apache-2.0"
] | null | null | null | 02.translate-bots/kakao_bot.1.py | TTEarth/chatbothon | 091ab78e8fce3dd942cf4e829f198f70a835380f | [
"Apache-2.0"
] | 2 | 2018-11-18T02:49:16.000Z | 2018-11-25T03:05:17.000Z | # -*- coding: utf-8 -*-
from flask import Flask
from flask import request
from flask import jsonify
from flask import json
from googletrans import Translator
app = Flask(__name__)
@app.route("/keyboard")
def keyboard():
return jsonify(type="text")
@app.route("/message", methods=["POST"])
def message():
data = json.loads(request.data)
content = data["content"]
translator = Translator()
translated = translator.translate(content, dest="en", src="ko")
response = {
"message": {
"text": translated.text
}
}
response = json.dumps(response, ensure_ascii=False)
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
# app.run(host="localhost", port=80)
# app.run(host="127.0.0.1", port=80)
# flask 예시 : app.run(host="0.0.0.0", port=5000)
| 22.864865 | 67 | 0.635934 |
from flask import Flask
from flask import request
from flask import jsonify
from flask import json
from googletrans import Translator
app = Flask(__name__)
@app.route("/keyboard")
def keyboard():
return jsonify(type="text")
@app.route("/message", methods=["POST"])
def message():
data = json.loads(request.data)
content = data["content"]
translator = Translator()
translated = translator.translate(content, dest="en", src="ko")
response = {
"message": {
"text": translated.text
}
}
response = json.dumps(response, ensure_ascii=False)
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| true | true |
1c2ed813675282f66b63170ef08fc72abf9aefb4 | 53,809 | py | Python | tensorflow/python/keras/callbacks.py | takafreak/tensorflow | b85cb440e257a367fb70f8321ddaa669d1bd9fae | [
"Apache-2.0"
] | 2 | 2020-12-06T02:26:32.000Z | 2021-08-20T03:40:32.000Z | tensorflow/python/keras/callbacks.py | takafreak/tensorflow | b85cb440e257a367fb70f8321ddaa669d1bd9fae | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/callbacks.py | takafreak/tensorflow | b85cb440e257a367fb70f8321ddaa669d1bd9fae | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import csv
import io
import json
import os
import time
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.mode_keys import ModeKeys
from tensorflow.python.util.tf_export import keras_export
try:
import requests
except ImportError:
requests = None
# pylint: disable=protected-access
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops.
Arguments:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
stateful_metric_names = None
if hasattr(model, 'metrics_names'):
stateful_metric_names = model.metrics_names[1:] # Exclude `loss`
callbacks = [BaseLogger(stateful_metrics=stateful_metric_names)
] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(
ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model()
callback_list.set_model(callback_model)
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT and hasattr(model, 'metrics_names'):
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
callback_list.model.stop_training = False
return callback_list
# pylint: enable=protected-access
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.EagerIterator)))
def make_logs(model, logs, outputs, mode, prefix=''):
"""Computes logs for sending to `on_batch_end` methods."""
if mode in {ModeKeys.TRAIN, ModeKeys.TEST}:
if hasattr(model, 'metrics_names'):
for label, output in zip(model.metrics_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
self._reset_batch_timing()
def _reset_batch_timing(self):
self._delta_t_batch = 0.
self._delta_ts = collections.defaultdict(
lambda: collections.deque([], maxlen=self.queue_length))
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
hook_name = 'on_{mode}_batch_{hook}'.format(mode=mode, hook=hook)
if hook == 'begin':
self._t_enter_batch = time.time()
if hook == 'end':
# Batch is ending, calculate batch time.
self._delta_t_batch = time.time() - self._t_enter_batch
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
batch_hook = getattr(callback, hook_name)
batch_hook(batch, logs)
self._delta_ts[hook_name].append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts[hook_name])
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method (%s) is slow compared '
'to the batch update (%f). Check your callbacks.', hook_name,
delta_t_median)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._reset_batch_timing()
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
@keras_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Model` class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
self.model = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Arguments:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
if self.use_steps:
self.target = self.params['steps']
else:
self.target = self.params['samples']
if self.verbose:
if self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
def on_batch_begin(self, batch, logs=None):
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
if self.use_steps:
self.seen += num_steps
else:
self.seen += batch_size * num_steps
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and (self.target is None or self.seen < self.target):
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@keras_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If send_as_json is set to True, the content type of the request will be
application/json. Otherwise the serialized JSON will be sent within a form.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as application/json.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""TensorBoard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('embeddings_freq', False):
logging.warning('Embeddings will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_freq', 'embeddings_layer_names',
'embeddings_metadata', 'embeddings_data'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model
with context.eager_mode():
self.writer = summary_ops_v2.create_file_writer(self.log_dir)
if self.write_graph:
if model.run_eagerly:
logging.warning('TensorBoard Callback will ignore `write_graph=True`'
'when `Model.run_eagerly=True`.`')
else:
with self.writer.as_default():
with summary_ops_v2.always_record_summaries():
summary_ops_v2.graph(K.get_graph())
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch."""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
self._log_metrics(logs, prefix='batch_', step=self._total_batches_seen)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
step = epoch if self.update_freq == 'epoch' else self._samples_seen
self._log_metrics(logs, prefix='epoch_', step=step)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
def on_train_end(self, logs=None):
with context.eager_mode():
self.writer.close()
def _log_metrics(self, logs, prefix, step):
"""Writes metrics out as custom scalar summaries.
Arguments:
logs: Dict. Keys are scalar summary names, values are NumPy scalars.
prefix: String. The prefix to apply to the scalar summary names.
step: Int. The global step to use for TensorBoard.
"""
if logs is None:
logs = {}
# Scrub non-metric items and assign batch or epoch prefix.
metric_logs = {(prefix + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
with context.eager_mode(), \
self.writer.as_default(), \
summary_ops_v2.always_record_summaries():
for name, value in metric_logs.items():
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
with context.eager_mode(), \
self.writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
self.writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
min_delta: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename,
mode + self.file_flags,
**self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| 34.648422 | 80 | 0.657344 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import csv
import io
import json
import os
import time
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.mode_keys import ModeKeys
from tensorflow.python.util.tf_export import keras_export
try:
import requests
except ImportError:
requests = None
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
if mode == ModeKeys.TRAIN:
model.history = History()
stateful_metric_names = None
if hasattr(model, 'metrics_names'):
stateful_metric_names = model.metrics_names[1:]
callbacks = [BaseLogger(stateful_metrics=stateful_metric_names)
] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(
ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names))
callback_list = CallbackList(callbacks)
callback_model = model._get_callback_model()
callback_list.set_model(callback_model)
callback_metrics = []
if mode != ModeKeys.PREDICT and hasattr(model, 'metrics_names'):
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
callback_list.model.stop_training = False
return callback_list
def _is_generator_like(data):
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.EagerIterator)))
def make_logs(model, logs, outputs, mode, prefix=''):
if mode in {ModeKeys.TRAIN, ModeKeys.TEST}:
if hasattr(model, 'metrics_names'):
for label, output in zip(model.metrics_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
class CallbackList(object):
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
self._reset_batch_timing()
def _reset_batch_timing(self):
self._delta_t_batch = 0.
self._delta_ts = collections.defaultdict(
lambda: collections.deque([], maxlen=self.queue_length))
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
if not self.callbacks:
return
hook_name = 'on_{mode}_batch_{hook}'.format(mode=mode, hook=hook)
if hook == 'begin':
self._t_enter_batch = time.time()
if hook == 'end':
self._delta_t_batch = time.time() - self._t_enter_batch
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
batch_hook = getattr(callback, hook_name)
batch_hook(batch, logs)
self._delta_ts[hook_name].append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts[hook_name])
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method (%s) is slow compared '
'to the batch update (%f). Check your callbacks.', hook_name,
delta_t_median)
def _call_begin_hook(self, mode):
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._reset_batch_timing()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
@keras_export('keras.callbacks.Callback')
class Callback(object):
def __init__(self):
self.validation_data = None
self.model = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_batch_begin(self, batch, logs=None):
def on_batch_end(self, batch, logs=None):
def on_epoch_begin(self, epoch, logs=None):
def on_epoch_end(self, epoch, logs=None):
def on_train_batch_begin(self, batch, logs=None):
self.on_batch_begin(batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
self.on_batch_end(batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
def on_test_batch_end(self, batch, logs=None):
def on_predict_batch_begin(self, batch, logs=None):
def on_predict_batch_end(self, batch, logs=None):
def on_train_begin(self, logs=None):
def on_train_end(self, logs=None):
def on_test_begin(self, logs=None):
def on_test_end(self, logs=None):
def on_predict_begin(self, logs=None):
def on_predict_end(self, logs=None):
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
if self.use_steps:
self.target = self.params['steps']
else:
self.target = self.params['samples']
if self.verbose:
if self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
def on_batch_begin(self, batch, logs=None):
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
num_steps = logs.get('num_steps', 1)
if self.use_steps:
self.seen += num_steps
else:
self.seen += batch_size * num_steps
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose and (self.target is None or self.seen < self.target):
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@keras_export('keras.callbacks.History')
class History(Callback):
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def set_model(self, model):
self.model = model
if (not self.save_weights_only and
not model._is_graph_network and
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try:
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError:
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback):
def __init__(self,
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
def _validate_kwargs(self, kwargs):
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('embeddings_freq', False):
logging.warning('Embeddings will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_freq', 'embeddings_layer_names',
'embeddings_metadata', 'embeddings_data'
}
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
self.model = model
with context.eager_mode():
self.writer = summary_ops_v2.create_file_writer(self.log_dir)
if self.write_graph:
if model.run_eagerly:
logging.warning('TensorBoard Callback will ignore `write_graph=True`'
'when `Model.run_eagerly=True`.`')
else:
with self.writer.as_default():
with summary_ops_v2.always_record_summaries():
summary_ops_v2.graph(K.get_graph())
def on_batch_end(self, batch, logs=None):
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
self._log_metrics(logs, prefix='batch_', step=self._total_batches_seen)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
def on_epoch_end(self, epoch, logs=None):
step = epoch if self.update_freq == 'epoch' else self._samples_seen
self._log_metrics(logs, prefix='epoch_', step=step)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
def on_train_end(self, logs=None):
with context.eager_mode():
self.writer.close()
def _log_metrics(self, logs, prefix, step):
if logs is None:
logs = {}
# Scrub non-metric items and assign batch or epoch prefix.
metric_logs = {(prefix + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
with context.eager_mode(), \
self.writer.as_default(), \
summary_ops_v2.always_record_summaries():
for name, value in metric_logs.items():
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
with context.eager_mode(), \
self.writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
self.writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename,
mode + self.file_flags,
**self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| true | true |
1c2ed867e0956d8de08780e0ee8cf45c04524811 | 4,788 | py | Python | opsgenie_swagger/models/list_user_forwarding_rules_response.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | null | null | null | opsgenie_swagger/models/list_user_forwarding_rules_response.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | null | null | null | opsgenie_swagger/models/list_user_forwarding_rules_response.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | 1 | 2020-11-07T11:27:13.000Z | 2020-11-07T11:27:13.000Z | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.base_response import BaseResponse # noqa: F401,E501
from opsgenie_swagger.models.forwarding_rule import ForwardingRule # noqa: F401,E501
class ListUserForwardingRulesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'took': 'float',
'data': 'list[ForwardingRule]'
}
attribute_map = {
'request_id': 'requestId',
'took': 'took',
'data': 'data'
}
def __init__(self, request_id=None, took=0.0, data=None): # noqa: E501
"""ListUserForwardingRulesResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._took = None
self._data = None
self.discriminator = None
self.request_id = request_id
self.took = took
if data is not None:
self.data = data
@property
def request_id(self):
"""Gets the request_id of this ListUserForwardingRulesResponse. # noqa: E501
:return: The request_id of this ListUserForwardingRulesResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ListUserForwardingRulesResponse.
:param request_id: The request_id of this ListUserForwardingRulesResponse. # noqa: E501
:type: str
"""
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501
self._request_id = request_id
@property
def took(self):
"""Gets the took of this ListUserForwardingRulesResponse. # noqa: E501
:return: The took of this ListUserForwardingRulesResponse. # noqa: E501
:rtype: float
"""
return self._took
@took.setter
def took(self, took):
"""Sets the took of this ListUserForwardingRulesResponse.
:param took: The took of this ListUserForwardingRulesResponse. # noqa: E501
:type: float
"""
if took is None:
raise ValueError("Invalid value for `took`, must not be `None`") # noqa: E501
self._took = took
@property
def data(self):
"""Gets the data of this ListUserForwardingRulesResponse. # noqa: E501
:return: The data of this ListUserForwardingRulesResponse. # noqa: E501
:rtype: list[ForwardingRule]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListUserForwardingRulesResponse.
:param data: The data of this ListUserForwardingRulesResponse. # noqa: E501
:type: list[ForwardingRule]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListUserForwardingRulesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.164706 | 96 | 0.591061 |
import pprint
import re
import six
from opsgenie_swagger.models.base_response import BaseResponse
from opsgenie_swagger.models.forwarding_rule import ForwardingRule
class ListUserForwardingRulesResponse(object):
swagger_types = {
'request_id': 'str',
'took': 'float',
'data': 'list[ForwardingRule]'
}
attribute_map = {
'request_id': 'requestId',
'took': 'took',
'data': 'data'
}
def __init__(self, request_id=None, took=0.0, data=None):
self._request_id = None
self._took = None
self._data = None
self.discriminator = None
self.request_id = request_id
self.took = took
if data is not None:
self.data = data
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, request_id):
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`")
self._request_id = request_id
@property
def took(self):
return self._took
@took.setter
def took(self, took):
if took is None:
raise ValueError("Invalid value for `took`, must not be `None`")
self._took = took
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ListUserForwardingRulesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2ed9606f75c5dc16def840ff41f8e1d5d88501 | 26,633 | py | Python | train_acne.py | wenh06/yolov4_acne_torch | 8eda65ff6805ec313de39c74aea12a774657f3ff | [
"Apache-2.0"
] | null | null | null | train_acne.py | wenh06/yolov4_acne_torch | 8eda65ff6805ec313de39c74aea12a774657f3ff | [
"Apache-2.0"
] | null | null | null | train_acne.py | wenh06/yolov4_acne_torch | 8eda65ff6805ec313de39c74aea12a774657f3ff | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
train acne detector using the enhanced ACNE04 dataset
More reference:
[1] https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
'''
import time
import logging
import os, sys
import argparse
from collections import deque
import datetime
import cv2
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
from torch.nn import functional as F
from tensorboardX import SummaryWriter
from easydict import EasyDict as ED
from dataset_acne04 import ACNE04
from cfg_acne04 import Cfg
from models import Yolov4
from tool.utils_iou import (
bboxes_iou, bboxes_giou, bboxes_diou, bboxes_ciou,
)
from tool.utils import post_processing, plot_boxes_cv2
# from tool.tv_reference.utils import MetricLogger
from tool.tv_reference.utils import collate_fn as val_collate
from tool.tv_reference.coco_utils import convert_to_coco_api
from tool.tv_reference.coco_eval import CocoEvaluator
DAS = True
class Yolo_loss(nn.Module):
def __init__(self, n_classes=1, n_anchors=3, device=None, batch=2, iou_type='iou'):
super(Yolo_loss, self).__init__()
self.device = device
self.strides = [8, 16, 32]
image_size = 608
self.n_classes = n_classes
self.n_anchors = n_anchors
self.iou_type = iou_type
self.anchors = [[12, 16], [19, 36], [40, 28], [36, 75], [76, 55], [72, 146], [142, 110], [192, 243], [459, 401]]
# self.anchors = [[7, 7], [8, 9], [10, 8], [11, 10], [11, 12], [13, 17], [14, 11], [16, 14], [20, 21]]
self.anch_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.ignore_thre = 0.5
self.masked_anchors, self.ref_anchors, self.grid_x, self.grid_y, self.anchor_w, self.anchor_h = [], [], [], [], [], []
for i in range(3):
all_anchors_grid = [(w / self.strides[i], h / self.strides[i]) for w, h in self.anchors]
masked_anchors = np.array([all_anchors_grid[j] for j in self.anch_masks[i]], dtype=np.float32)
ref_anchors = np.zeros((len(all_anchors_grid), 4), dtype=np.float32)
ref_anchors[:, 2:] = np.array(all_anchors_grid, dtype=np.float32)
ref_anchors = torch.from_numpy(ref_anchors)
# calculate pred - xywh obj cls
fsize = image_size // self.strides[i]
grid_x = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).to(device)
grid_y = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).permute(0, 1, 3, 2).to(device)
anchor_w = torch.from_numpy(masked_anchors[:, 0]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
device)
anchor_h = torch.from_numpy(masked_anchors[:, 1]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
device)
self.masked_anchors.append(masked_anchors)
self.ref_anchors.append(ref_anchors)
self.grid_x.append(grid_x)
self.grid_y.append(grid_y)
self.anchor_w.append(anchor_w)
self.anchor_h.append(anchor_h)
def build_target(self, pred, labels, batchsize, fsize, n_ch, output_id):
# target assignment
tgt_mask = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 4 + self.n_classes).to(device=self.device)
obj_mask = torch.ones(batchsize, self.n_anchors, fsize, fsize).to(device=self.device)
tgt_scale = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 2).to(self.device)
target = torch.zeros(batchsize, self.n_anchors, fsize, fsize, n_ch).to(self.device)
# labels = labels.cpu().data
nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects
truth_x_all = (labels[:, :, 2] + labels[:, :, 0]) / (self.strides[output_id] * 2)
truth_y_all = (labels[:, :, 3] + labels[:, :, 1]) / (self.strides[output_id] * 2)
truth_w_all = (labels[:, :, 2] - labels[:, :, 0]) / self.strides[output_id]
truth_h_all = (labels[:, :, 3] - labels[:, :, 1]) / self.strides[output_id]
truth_i_all = truth_x_all.to(torch.int16).cpu().numpy()
truth_j_all = truth_y_all.to(torch.int16).cpu().numpy()
for b in range(batchsize):
n = int(nlabel[b])
if n == 0:
continue
truth_box = torch.zeros(n, 4).to(self.device)
truth_box[:n, 2] = truth_w_all[b, :n]
truth_box[:n, 3] = truth_h_all[b, :n]
truth_i = truth_i_all[b, :n]
truth_j = truth_j_all[b, :n]
# calculate iou between truth and reference anchors
# anchor_ious_all = bboxes_iou(truth_box.cpu(), self.ref_anchors[output_id])
anchor_ious_all = bboxes_iou(
truth_box.cpu(),
self.ref_anchors[output_id],
fmt='voc',
# iou_type='iou',
iou_type=self.iou_type,
)
best_n_all = anchor_ious_all.argmax(dim=1)
best_n = best_n_all % 3
best_n_mask = ((best_n_all == self.anch_masks[output_id][0]) |
(best_n_all == self.anch_masks[output_id][1]) |
(best_n_all == self.anch_masks[output_id][2]))
if sum(best_n_mask) == 0:
continue
truth_box[:n, 0] = truth_x_all[b, :n]
truth_box[:n, 1] = truth_y_all[b, :n]
# pred_ious = bboxes_iou(pred[b].view(-1, 4), truth_box, xyxy=False)
pred_ious = bboxes_iou(
pred[b].view(-1, 4),
truth_box,
fmt='yolo',
# iou_type='iou',
iou_type=self.iou_type,
)
pred_best_iou, _ = pred_ious.max(dim=1)
pred_best_iou = (pred_best_iou > self.ignore_thre)
pred_best_iou = pred_best_iou.view(pred[b].shape[:3])
# set mask to zero (ignore) if pred matches truth
obj_mask[b] = ~ pred_best_iou
for ti in range(best_n.shape[0]):
if best_n_mask[ti] == 1:
i, j = truth_i[ti], truth_j[ti]
a = best_n[ti]
obj_mask[b, a, j, i] = 1
tgt_mask[b, a, j, i, :] = 1
target[b, a, j, i, 0] = truth_x_all[b, ti] - truth_x_all[b, ti].to(torch.int16).to(torch.float)
target[b, a, j, i, 1] = truth_y_all[b, ti] - truth_y_all[b, ti].to(torch.int16).to(torch.float)
target[b, a, j, i, 2] = torch.log(
truth_w_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 0] + 1e-16)
target[b, a, j, i, 3] = torch.log(
truth_h_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 1] + 1e-16)
target[b, a, j, i, 4] = 1
target[b, a, j, i, 5 + labels[b, ti, 4].to(torch.int16).cpu().numpy()] = 1
tgt_scale[b, a, j, i, :] = torch.sqrt(2 - truth_w_all[b, ti] * truth_h_all[b, ti] / fsize / fsize)
return obj_mask, tgt_mask, tgt_scale, target
def forward(self, xin, labels=None):
loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = 0, 0, 0, 0, 0, 0
for output_id, output in enumerate(xin):
batchsize = output.shape[0]
fsize = output.shape[2]
n_ch = 5 + self.n_classes
output = output.view(batchsize, self.n_anchors, n_ch, fsize, fsize)
output = output.permute(0, 1, 3, 4, 2) # .contiguous()
# logistic activation for xy, obj, cls
output[..., np.r_[:2, 4:n_ch]] = torch.sigmoid(output[..., np.r_[:2, 4:n_ch]])
pred = output[..., :4].clone()
pred[..., 0] += self.grid_x[output_id]
pred[..., 1] += self.grid_y[output_id]
pred[..., 2] = torch.exp(pred[..., 2]) * self.anchor_w[output_id]
pred[..., 3] = torch.exp(pred[..., 3]) * self.anchor_h[output_id]
obj_mask, tgt_mask, tgt_scale, target = self.build_target(
pred, labels, batchsize, fsize, n_ch, output_id
)
# loss calculation
output[..., 4] *= obj_mask
output[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
output[..., 2:4] *= tgt_scale
target[..., 4] *= obj_mask
target[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
target[..., 2:4] *= tgt_scale
loss_xy += F.binary_cross_entropy(
input=output[..., :2],
target=target[..., :2],
weight=tgt_scale*tgt_scale,
size_average=False,
)
loss_wh += F.mse_loss(input=output[..., 2:4], target=target[..., 2:4], size_average=False) / 2
loss_obj += F.binary_cross_entropy(input=output[..., 4], target=target[..., 4], size_average=False)
loss_cls += F.binary_cross_entropy(input=output[..., 5:], target=target[..., 5:], size_average=False)
loss_l2 += F.mse_loss(input=output, target=target, size_average=False)
loss = loss_xy + loss_wh + loss_obj + loss_cls
return loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2
def collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append([img])
bboxes.append([box])
images = np.concatenate(images, axis=0)
images = images.transpose(0, 3, 1, 2)
images = torch.from_numpy(images).div(255.0)
bboxes = np.concatenate(bboxes, axis=0)
bboxes = torch.from_numpy(bboxes)
return images, bboxes
def train(model, device, config, epochs=5, batch_size=1, save_ckpt=True, log_step=20, logger=None, img_scale=0.5):
"""
"""
train_dataset = ACNE04(label_path=config.train_label, cfg=config, train=True)
val_dataset = ACNE04(label_path=config.val_label, cfg=config, train=False)
n_train = len(train_dataset)
n_val = len(val_dataset)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=config.batch // config.subdivisions,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True, # setting False would result in error
collate_fn=collate,
)
val_loader = DataLoader(
dataset=val_dataset,
batch_size=config.batch // config.subdivisions,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True, # setting False would result in error
collate_fn=val_collate,
)
writer = SummaryWriter(
log_dir=config.TRAIN_TENSORBOARD_DIR,
filename_suffix=f'OPT_{config.TRAIN_OPTIMIZER}_LR_{config.learning_rate}_BS_{config.batch}_Sub_{config.subdivisions}_Size_{config.width}',
comment=f'OPT_{config.TRAIN_OPTIMIZER}_LR_{config.learning_rate}_BS_{config.batch}_Sub_{config.subdivisions}_Size_{config.width}',
)
max_itr = config.TRAIN_EPOCHS * n_train
# global_step = cfg.TRAIN_MINEPOCH * n_train
global_step = 0
if logger:
logger.info(f'''Starting training:
Epochs: {epochs}
Batch size: {config.batch}
Subdivisions: {config.subdivisions}
Learning rate: {config.learning_rate}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_ckpt}
Device: {device.type}
Images size: {config.width}
Optimizer: {config.TRAIN_OPTIMIZER}
Dataset classes: {config.classes}
Train label path:{config.train_label}
Pretrained: {config.pretrained}
''')
# learning rate setup
def burnin_schedule(i):
if i < config.burn_in:
factor = pow(i / config.burn_in, 4)
elif i < config.steps[0]:
factor = 1.0
elif i < config.steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
if config.TRAIN_OPTIMIZER.lower() == 'adam':
optimizer = optim.Adam(
params=model.parameters(),
lr=config.learning_rate / config.batch,
betas=(0.9, 0.999),
eps=1e-08,
)
elif config.TRAIN_OPTIMIZER.lower() == 'sgd':
optimizer = optim.SGD(
params=model.parameters(),
lr=config.learning_rate / config.batch,
momentum=config.momentum,
weight_decay=config.decay,
)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)
criterion = Yolo_loss(
n_classes=config.classes,
device=device,
batch=config.batch // config.subdivisions,
iou_type=config.iou_type,
)
# scheduler = ReduceLROnPlateau(optimizer, mode='max', verbose=True, patience=6, min_lr=1e-7)
# scheduler = CosineAnnealingWarmRestarts(optimizer, 0.001, 1e-6, 20)
save_prefix = 'Yolov4_epoch'
saved_models = deque()
model.train()
for epoch in range(epochs):
model.train()
epoch_loss = 0
epoch_step = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img', ncols=100) as pbar:
for i, batch in enumerate(train_loader):
global_step += 1
epoch_step += 1
images = batch[0]
bboxes = batch[1]
images = images.to(device=device, dtype=torch.float32)
bboxes = bboxes.to(device=device)
bboxes_pred = model(images)
loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = criterion(bboxes_pred, bboxes)
# loss = loss / config.subdivisions
loss.backward()
epoch_loss += loss.item()
if global_step % config.subdivisions == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if global_step % (log_step * config.subdivisions) == 0:
writer.add_scalar('train/Loss', loss.item(), global_step)
writer.add_scalar('train/loss_xy', loss_xy.item(), global_step)
writer.add_scalar('train/loss_wh', loss_wh.item(), global_step)
writer.add_scalar('train/loss_obj', loss_obj.item(), global_step)
writer.add_scalar('train/loss_cls', loss_cls.item(), global_step)
writer.add_scalar('train/loss_l2', loss_l2.item(), global_step)
writer.add_scalar('lr', scheduler.get_lr()[0] * config.batch, global_step)
pbar.set_postfix(**{
'loss (batch)': loss.item(),
'loss_xy': loss_xy.item(),
'loss_wh': loss_wh.item(),
'loss_obj': loss_obj.item(),
'loss_cls': loss_cls.item(),
'loss_l2': loss_l2.item(),
'lr': scheduler.get_lr()[0] * config.batch
})
if logger:
logger.info(f'Train step_{global_step}: loss : {loss.item()},loss xy : {loss_xy.item()}, loss wh : {loss_wh.item()}, loss obj : {loss_obj.item()}, loss cls : {loss_cls.item()}, loss l2 : {loss_l2.item()}, lr : {scheduler.get_lr()[0] * config.batch}')
pbar.update(images.shape[0])
# TODO: eval for each epoch using `evaluate`
eval_model = Yolov4(yolov4conv137weight=None, n_classes=config.classes, inference=True)
eval_model.load_state_dict(model.state_dict())
eval_model.to(device)
evaluator = evaluate(eval_model, val_loader, config, device, logger)
del eval_model
stats = evaluator.coco_eval['bbox'].stats
writer.add_scalar('train/AP', stats[0], global_step)
writer.add_scalar('train/AP50', stats[1], global_step)
writer.add_scalar('train/AP75', stats[2], global_step)
writer.add_scalar('train/AP_small', stats[3], global_step)
writer.add_scalar('train/AP_medium', stats[4], global_step)
writer.add_scalar('train/AP_large', stats[5], global_step)
writer.add_scalar('train/AR1', stats[6], global_step)
writer.add_scalar('train/AR10', stats[7], global_step)
writer.add_scalar('train/AR100', stats[8], global_step)
writer.add_scalar('train/AR_small', stats[9], global_step)
writer.add_scalar('train/AR_medium', stats[10], global_step)
writer.add_scalar('train/AR_large', stats[11], global_step)
if save_ckpt:
try:
os.mkdir(config.checkpoints)
if logger:
logger.info('Created checkpoint directory')
except OSError:
pass
save_path = os.path.join(config.checkpoints, f'{save_prefix}{epoch + 1}_{_get_date_str()}.pth')
torch.save(model.state_dict(), save_path)
if logger:
logger.info(f'Checkpoint {epoch + 1} saved!')
saved_models.append(save_path)
# remove outdated models
if len(saved_models) > config.keep_checkpoint_max > 0:
model_to_remove = saved_models.popleft()
try:
os.remove(model_to_remove)
except:
logger.info(f'failed to remove {model_to_remove}')
writer.close()
@torch.no_grad()
def evaluate(model, data_loader, cfg, device, logger=None, **kwargs):
""" finished, tested
"""
# cpu_device = torch.device("cpu")
model.eval()
# header = 'Test:'
coco = convert_to_coco_api(data_loader.dataset, bbox_fmt='coco')
coco_evaluator = CocoEvaluator(coco, iou_types = ["bbox"], bbox_fmt='coco')
for images, targets in data_loader:
model_input = [[cv2.resize(img, (cfg.w, cfg.h))] for img in images]
model_input = np.concatenate(model_input, axis=0)
model_input = model_input.transpose(0, 3, 1, 2)
model_input = torch.from_numpy(model_input).div(255.0)
model_input = model_input.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(model_input)
# outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
# outputs = outputs.cpu().detach().numpy()
res = {}
# for img, target, output in zip(images, targets, outputs):
for img, target, boxes, confs in zip(images, targets, outputs[0], outputs[1]):
img_height, img_width = img.shape[:2]
# boxes = output[...,:4].copy() # output boxes in yolo format
boxes = boxes.squeeze(2).cpu().detach().numpy()
boxes[...,2:] = boxes[...,2:] - boxes[...,:2] # Transform [x1, y1, x2, y2] to [x1, y1, w, h]
boxes[...,0] = boxes[...,0]*img_width
boxes[...,1] = boxes[...,1]*img_height
boxes[...,2] = boxes[...,2]*img_width
boxes[...,3] = boxes[...,3]*img_height
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# confs = output[...,4:].copy()
confs = confs.cpu().detach().numpy()
labels = np.argmax(confs, axis=1).flatten()
labels = torch.as_tensor(labels, dtype=torch.int64)
scores = np.max(confs, axis=1).flatten()
scores = torch.as_tensor(scores, dtype=torch.float32)
res[target["image_id"].item()] = {
"boxes": boxes,
"scores": scores,
"labels": labels,
}
debug = kwargs.get("debug", [])
if isinstance(debug, str):
debug = [debug]
debug = [item.lower() for item in debug]
if 'iou' in debug:
from tool.utils_iou_test import bboxes_iou_test
ouput_boxes = np.array(post_processing(None, 0.5, 0.5, outputs)[0])[...,:4]
img_height, img_width = images[0].shape[:2]
ouput_boxes[...,0] = ouput_boxes[...,0] * img_width
ouput_boxes[...,1] = ouput_boxes[...,1] * img_height
ouput_boxes[...,2] = ouput_boxes[...,2] * img_width
ouput_boxes[...,3] = ouput_boxes[...,3] * img_height
# coco format to yolo format
truth_boxes = targets[0]['boxes'].numpy().copy()
truth_boxes[...,:2] = truth_boxes[...,:2] + truth_boxes[...,2:]/2
iou = bboxes_iou_test(torch.Tensor(ouput_boxes), torch.Tensor(truth_boxes), fmt='yolo')
print(f"iou of first image = {iou}")
if len(debug) > 0:
return
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
# gather the stats from all processes
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
return coco_evaluator
def get_args(**kwargs):
"""
"""
pretrained_detector = '/mnt/wenhao71/workspace/yolov4_acne_torch/pretrained/yolov4.pth'
cfg = kwargs
parser = argparse.ArgumentParser(
description='Train the Model on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument(
# '-b', '--batch-size',
# metavar='B', type=int, nargs='?', default=2,
# help='Batch size',
# dest='batchsize')
parser.add_argument(
'-l', '--learning-rate',
metavar='LR', type=float, nargs='?', default=0.001,
help='Learning rate',
dest='learning_rate')
parser.add_argument(
'-f', '--load',
dest='load', type=str, default=pretrained_detector,
help='Load model from a .pth file')
parser.add_argument(
'-g', '--gpu',
metavar='G', type=str, default='0',
help='GPU',
dest='gpu')
# `dataset_dir` and `pretrained` already set in cfg_acne04.py
# parser.add_argument(
# '-dir', '--data-dir',
# type=str, default=None,
# help='dataset dir', dest='dataset_dir')
# parser.add_argument(
# '-pretrained',
# type=str, default=None,
# help='pretrained yolov4.conv.137')
parser.add_argument(
'-classes',
type=int, default=1,
help='dataset classes')
# parser.add_argument(
# '-train_label_path',
# dest='train_label', type=str, default='train.txt',
# help="train label path")
parser.add_argument(
'-iou-type', type=str, default='iou',
help='iou type (iou, giou, diou, ciou)',
dest='iou_type')
parser.add_argument(
'-keep-checkpoint-max', type=int, default=10,
help='maximum number of checkpoints to keep. If set 0, all checkpoints will be kept',
dest='keep_checkpoint_max')
parser.add_argument(
'-optimizer', type=str, default='adam',
help='training optimizer',
dest='TRAIN_OPTIMIZER')
args = vars(parser.parse_args())
cfg.update(args)
return ED(cfg)
def init_logger(log_file=None, log_dir=None, mode='a', verbose=0):
"""
"""
if log_dir is None:
log_dir = '~/temp/log/'
if log_file is None:
log_file = f'log_{_get_date_str()}.txt'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, log_file)
print(f'log file path: {log_file}')
logger = logging.getLogger('Yolov4-ACNE04')
c_handler = logging.StreamHandler(sys.stdout)
f_handler = logging.FileHandler(log_file)
if verbose >= 2:
print("levels of c_handler and f_handler are set DEBUG")
c_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
elif verbose >= 1:
print("level of c_handler is set INFO, level of f_handler is set DEBUG")
c_handler.setLevel(logging.INFO)
f_handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
else:
print("levels of c_handler and f_handler are set WARNING")
c_handler.setLevel(logging.WARNING)
f_handler.setLevel(logging.WARNING)
logger.setLevel(logging.WARNING)
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger
def _get_date_str():
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d_%H-%M')
"""
torch, torch vision, cu compatibility:
https://download.pytorch.org/whl/torch_stable.html
https://download.pytorch.org/whl/cu100/torch-1.3.1%2Bcu100-cp36-cp36m-linux_x86_64.whl
"""
if __name__ == "__main__":
cfg = get_args(**Cfg)
# os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
if not DAS:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cuda')
log_dir = cfg.TRAIN_TENSORBOARD_DIR
logger = init_logger(log_dir=log_dir)
logger.info(f"\n{'*'*20} Start Training {'*'*20}\n")
logger.info(f'Using device {device}')
logger.info(f"Using torch of version {torch.__version__}")
logger.info(f'with configuration {cfg}')
print(f"\n{'*'*20} Start Training {'*'*20}\n")
print(f'Using device {device}')
print(f"Using torch of version {torch.__version__}")
print(f'with configuration {cfg}')
model = Yolov4(cfg.pretrained, n_classes=cfg.classes)
if not DAS and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
if not DAS:
model.to(device=device)
else:
model.cuda()
try:
train(
model=model,
config=cfg,
epochs=cfg.TRAIN_EPOCHS,
device=device,
logger=logger,
)
except KeyboardInterrupt:
torch.save(model.state_dict(), os.path.join(cfg.checkpoints, 'INTERRUPTED.pth'))
logger.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 40.475684 | 274 | 0.578943 |
import time
import logging
import os, sys
import argparse
from collections import deque
import datetime
import cv2
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
from torch.nn import functional as F
from tensorboardX import SummaryWriter
from easydict import EasyDict as ED
from dataset_acne04 import ACNE04
from cfg_acne04 import Cfg
from models import Yolov4
from tool.utils_iou import (
bboxes_iou, bboxes_giou, bboxes_diou, bboxes_ciou,
)
from tool.utils import post_processing, plot_boxes_cv2
from tool.tv_reference.utils import collate_fn as val_collate
from tool.tv_reference.coco_utils import convert_to_coco_api
from tool.tv_reference.coco_eval import CocoEvaluator
DAS = True
class Yolo_loss(nn.Module):
def __init__(self, n_classes=1, n_anchors=3, device=None, batch=2, iou_type='iou'):
super(Yolo_loss, self).__init__()
self.device = device
self.strides = [8, 16, 32]
image_size = 608
self.n_classes = n_classes
self.n_anchors = n_anchors
self.iou_type = iou_type
self.anchors = [[12, 16], [19, 36], [40, 28], [36, 75], [76, 55], [72, 146], [142, 110], [192, 243], [459, 401]]
self.anch_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.ignore_thre = 0.5
self.masked_anchors, self.ref_anchors, self.grid_x, self.grid_y, self.anchor_w, self.anchor_h = [], [], [], [], [], []
for i in range(3):
all_anchors_grid = [(w / self.strides[i], h / self.strides[i]) for w, h in self.anchors]
masked_anchors = np.array([all_anchors_grid[j] for j in self.anch_masks[i]], dtype=np.float32)
ref_anchors = np.zeros((len(all_anchors_grid), 4), dtype=np.float32)
ref_anchors[:, 2:] = np.array(all_anchors_grid, dtype=np.float32)
ref_anchors = torch.from_numpy(ref_anchors)
fsize = image_size // self.strides[i]
grid_x = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).to(device)
grid_y = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).permute(0, 1, 3, 2).to(device)
anchor_w = torch.from_numpy(masked_anchors[:, 0]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
device)
anchor_h = torch.from_numpy(masked_anchors[:, 1]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
device)
self.masked_anchors.append(masked_anchors)
self.ref_anchors.append(ref_anchors)
self.grid_x.append(grid_x)
self.grid_y.append(grid_y)
self.anchor_w.append(anchor_w)
self.anchor_h.append(anchor_h)
def build_target(self, pred, labels, batchsize, fsize, n_ch, output_id):
tgt_mask = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 4 + self.n_classes).to(device=self.device)
obj_mask = torch.ones(batchsize, self.n_anchors, fsize, fsize).to(device=self.device)
tgt_scale = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 2).to(self.device)
target = torch.zeros(batchsize, self.n_anchors, fsize, fsize, n_ch).to(self.device)
nlabel = (labels.sum(dim=2) > 0).sum(dim=1)
truth_x_all = (labels[:, :, 2] + labels[:, :, 0]) / (self.strides[output_id] * 2)
truth_y_all = (labels[:, :, 3] + labels[:, :, 1]) / (self.strides[output_id] * 2)
truth_w_all = (labels[:, :, 2] - labels[:, :, 0]) / self.strides[output_id]
truth_h_all = (labels[:, :, 3] - labels[:, :, 1]) / self.strides[output_id]
truth_i_all = truth_x_all.to(torch.int16).cpu().numpy()
truth_j_all = truth_y_all.to(torch.int16).cpu().numpy()
for b in range(batchsize):
n = int(nlabel[b])
if n == 0:
continue
truth_box = torch.zeros(n, 4).to(self.device)
truth_box[:n, 2] = truth_w_all[b, :n]
truth_box[:n, 3] = truth_h_all[b, :n]
truth_i = truth_i_all[b, :n]
truth_j = truth_j_all[b, :n]
anchor_ious_all = bboxes_iou(
truth_box.cpu(),
self.ref_anchors[output_id],
fmt='voc',
iou_type=self.iou_type,
)
best_n_all = anchor_ious_all.argmax(dim=1)
best_n = best_n_all % 3
best_n_mask = ((best_n_all == self.anch_masks[output_id][0]) |
(best_n_all == self.anch_masks[output_id][1]) |
(best_n_all == self.anch_masks[output_id][2]))
if sum(best_n_mask) == 0:
continue
truth_box[:n, 0] = truth_x_all[b, :n]
truth_box[:n, 1] = truth_y_all[b, :n]
pred_ious = bboxes_iou(
pred[b].view(-1, 4),
truth_box,
fmt='yolo',
iou_type=self.iou_type,
)
pred_best_iou, _ = pred_ious.max(dim=1)
pred_best_iou = (pred_best_iou > self.ignore_thre)
pred_best_iou = pred_best_iou.view(pred[b].shape[:3])
obj_mask[b] = ~ pred_best_iou
for ti in range(best_n.shape[0]):
if best_n_mask[ti] == 1:
i, j = truth_i[ti], truth_j[ti]
a = best_n[ti]
obj_mask[b, a, j, i] = 1
tgt_mask[b, a, j, i, :] = 1
target[b, a, j, i, 0] = truth_x_all[b, ti] - truth_x_all[b, ti].to(torch.int16).to(torch.float)
target[b, a, j, i, 1] = truth_y_all[b, ti] - truth_y_all[b, ti].to(torch.int16).to(torch.float)
target[b, a, j, i, 2] = torch.log(
truth_w_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 0] + 1e-16)
target[b, a, j, i, 3] = torch.log(
truth_h_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 1] + 1e-16)
target[b, a, j, i, 4] = 1
target[b, a, j, i, 5 + labels[b, ti, 4].to(torch.int16).cpu().numpy()] = 1
tgt_scale[b, a, j, i, :] = torch.sqrt(2 - truth_w_all[b, ti] * truth_h_all[b, ti] / fsize / fsize)
return obj_mask, tgt_mask, tgt_scale, target
def forward(self, xin, labels=None):
loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = 0, 0, 0, 0, 0, 0
for output_id, output in enumerate(xin):
batchsize = output.shape[0]
fsize = output.shape[2]
n_ch = 5 + self.n_classes
output = output.view(batchsize, self.n_anchors, n_ch, fsize, fsize)
output = output.permute(0, 1, 3, 4, 2)
output[..., np.r_[:2, 4:n_ch]] = torch.sigmoid(output[..., np.r_[:2, 4:n_ch]])
pred = output[..., :4].clone()
pred[..., 0] += self.grid_x[output_id]
pred[..., 1] += self.grid_y[output_id]
pred[..., 2] = torch.exp(pred[..., 2]) * self.anchor_w[output_id]
pred[..., 3] = torch.exp(pred[..., 3]) * self.anchor_h[output_id]
obj_mask, tgt_mask, tgt_scale, target = self.build_target(
pred, labels, batchsize, fsize, n_ch, output_id
)
output[..., 4] *= obj_mask
output[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
output[..., 2:4] *= tgt_scale
target[..., 4] *= obj_mask
target[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
target[..., 2:4] *= tgt_scale
loss_xy += F.binary_cross_entropy(
input=output[..., :2],
target=target[..., :2],
weight=tgt_scale*tgt_scale,
size_average=False,
)
loss_wh += F.mse_loss(input=output[..., 2:4], target=target[..., 2:4], size_average=False) / 2
loss_obj += F.binary_cross_entropy(input=output[..., 4], target=target[..., 4], size_average=False)
loss_cls += F.binary_cross_entropy(input=output[..., 5:], target=target[..., 5:], size_average=False)
loss_l2 += F.mse_loss(input=output, target=target, size_average=False)
loss = loss_xy + loss_wh + loss_obj + loss_cls
return loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2
def collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append([img])
bboxes.append([box])
images = np.concatenate(images, axis=0)
images = images.transpose(0, 3, 1, 2)
images = torch.from_numpy(images).div(255.0)
bboxes = np.concatenate(bboxes, axis=0)
bboxes = torch.from_numpy(bboxes)
return images, bboxes
def train(model, device, config, epochs=5, batch_size=1, save_ckpt=True, log_step=20, logger=None, img_scale=0.5):
train_dataset = ACNE04(label_path=config.train_label, cfg=config, train=True)
val_dataset = ACNE04(label_path=config.val_label, cfg=config, train=False)
n_train = len(train_dataset)
n_val = len(val_dataset)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=config.batch // config.subdivisions,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True,
collate_fn=collate,
)
val_loader = DataLoader(
dataset=val_dataset,
batch_size=config.batch // config.subdivisions,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True,
collate_fn=val_collate,
)
writer = SummaryWriter(
log_dir=config.TRAIN_TENSORBOARD_DIR,
filename_suffix=f'OPT_{config.TRAIN_OPTIMIZER}_LR_{config.learning_rate}_BS_{config.batch}_Sub_{config.subdivisions}_Size_{config.width}',
comment=f'OPT_{config.TRAIN_OPTIMIZER}_LR_{config.learning_rate}_BS_{config.batch}_Sub_{config.subdivisions}_Size_{config.width}',
)
max_itr = config.TRAIN_EPOCHS * n_train
global_step = 0
if logger:
logger.info(f'''Starting training:
Epochs: {epochs}
Batch size: {config.batch}
Subdivisions: {config.subdivisions}
Learning rate: {config.learning_rate}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_ckpt}
Device: {device.type}
Images size: {config.width}
Optimizer: {config.TRAIN_OPTIMIZER}
Dataset classes: {config.classes}
Train label path:{config.train_label}
Pretrained: {config.pretrained}
''')
def burnin_schedule(i):
if i < config.burn_in:
factor = pow(i / config.burn_in, 4)
elif i < config.steps[0]:
factor = 1.0
elif i < config.steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
if config.TRAIN_OPTIMIZER.lower() == 'adam':
optimizer = optim.Adam(
params=model.parameters(),
lr=config.learning_rate / config.batch,
betas=(0.9, 0.999),
eps=1e-08,
)
elif config.TRAIN_OPTIMIZER.lower() == 'sgd':
optimizer = optim.SGD(
params=model.parameters(),
lr=config.learning_rate / config.batch,
momentum=config.momentum,
weight_decay=config.decay,
)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)
criterion = Yolo_loss(
n_classes=config.classes,
device=device,
batch=config.batch // config.subdivisions,
iou_type=config.iou_type,
)
save_prefix = 'Yolov4_epoch'
saved_models = deque()
model.train()
for epoch in range(epochs):
model.train()
epoch_loss = 0
epoch_step = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img', ncols=100) as pbar:
for i, batch in enumerate(train_loader):
global_step += 1
epoch_step += 1
images = batch[0]
bboxes = batch[1]
images = images.to(device=device, dtype=torch.float32)
bboxes = bboxes.to(device=device)
bboxes_pred = model(images)
loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = criterion(bboxes_pred, bboxes)
loss.backward()
epoch_loss += loss.item()
if global_step % config.subdivisions == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if global_step % (log_step * config.subdivisions) == 0:
writer.add_scalar('train/Loss', loss.item(), global_step)
writer.add_scalar('train/loss_xy', loss_xy.item(), global_step)
writer.add_scalar('train/loss_wh', loss_wh.item(), global_step)
writer.add_scalar('train/loss_obj', loss_obj.item(), global_step)
writer.add_scalar('train/loss_cls', loss_cls.item(), global_step)
writer.add_scalar('train/loss_l2', loss_l2.item(), global_step)
writer.add_scalar('lr', scheduler.get_lr()[0] * config.batch, global_step)
pbar.set_postfix(**{
'loss (batch)': loss.item(),
'loss_xy': loss_xy.item(),
'loss_wh': loss_wh.item(),
'loss_obj': loss_obj.item(),
'loss_cls': loss_cls.item(),
'loss_l2': loss_l2.item(),
'lr': scheduler.get_lr()[0] * config.batch
})
if logger:
logger.info(f'Train step_{global_step}: loss : {loss.item()},loss xy : {loss_xy.item()}, loss wh : {loss_wh.item()}, loss obj : {loss_obj.item()}, loss cls : {loss_cls.item()}, loss l2 : {loss_l2.item()}, lr : {scheduler.get_lr()[0] * config.batch}')
pbar.update(images.shape[0])
eval_model = Yolov4(yolov4conv137weight=None, n_classes=config.classes, inference=True)
eval_model.load_state_dict(model.state_dict())
eval_model.to(device)
evaluator = evaluate(eval_model, val_loader, config, device, logger)
del eval_model
stats = evaluator.coco_eval['bbox'].stats
writer.add_scalar('train/AP', stats[0], global_step)
writer.add_scalar('train/AP50', stats[1], global_step)
writer.add_scalar('train/AP75', stats[2], global_step)
writer.add_scalar('train/AP_small', stats[3], global_step)
writer.add_scalar('train/AP_medium', stats[4], global_step)
writer.add_scalar('train/AP_large', stats[5], global_step)
writer.add_scalar('train/AR1', stats[6], global_step)
writer.add_scalar('train/AR10', stats[7], global_step)
writer.add_scalar('train/AR100', stats[8], global_step)
writer.add_scalar('train/AR_small', stats[9], global_step)
writer.add_scalar('train/AR_medium', stats[10], global_step)
writer.add_scalar('train/AR_large', stats[11], global_step)
if save_ckpt:
try:
os.mkdir(config.checkpoints)
if logger:
logger.info('Created checkpoint directory')
except OSError:
pass
save_path = os.path.join(config.checkpoints, f'{save_prefix}{epoch + 1}_{_get_date_str()}.pth')
torch.save(model.state_dict(), save_path)
if logger:
logger.info(f'Checkpoint {epoch + 1} saved!')
saved_models.append(save_path)
if len(saved_models) > config.keep_checkpoint_max > 0:
model_to_remove = saved_models.popleft()
try:
os.remove(model_to_remove)
except:
logger.info(f'failed to remove {model_to_remove}')
writer.close()
@torch.no_grad()
def evaluate(model, data_loader, cfg, device, logger=None, **kwargs):
model.eval()
coco = convert_to_coco_api(data_loader.dataset, bbox_fmt='coco')
coco_evaluator = CocoEvaluator(coco, iou_types = ["bbox"], bbox_fmt='coco')
for images, targets in data_loader:
model_input = [[cv2.resize(img, (cfg.w, cfg.h))] for img in images]
model_input = np.concatenate(model_input, axis=0)
model_input = model_input.transpose(0, 3, 1, 2)
model_input = torch.from_numpy(model_input).div(255.0)
model_input = model_input.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(model_input)
model_time = time.time() - model_time
res = {}
for img, target, boxes, confs in zip(images, targets, outputs[0], outputs[1]):
img_height, img_width = img.shape[:2]
ueeze(2).cpu().detach().numpy()
boxes[...,2:] = boxes[...,2:] - boxes[...,:2]
boxes[...,0] = boxes[...,0]*img_width
boxes[...,1] = boxes[...,1]*img_height
boxes[...,2] = boxes[...,2]*img_width
boxes[...,3] = boxes[...,3]*img_height
boxes = torch.as_tensor(boxes, dtype=torch.float32)
confs = confs.cpu().detach().numpy()
labels = np.argmax(confs, axis=1).flatten()
labels = torch.as_tensor(labels, dtype=torch.int64)
scores = np.max(confs, axis=1).flatten()
scores = torch.as_tensor(scores, dtype=torch.float32)
res[target["image_id"].item()] = {
"boxes": boxes,
"scores": scores,
"labels": labels,
}
debug = kwargs.get("debug", [])
if isinstance(debug, str):
debug = [debug]
debug = [item.lower() for item in debug]
if 'iou' in debug:
from tool.utils_iou_test import bboxes_iou_test
ouput_boxes = np.array(post_processing(None, 0.5, 0.5, outputs)[0])[...,:4]
img_height, img_width = images[0].shape[:2]
ouput_boxes[...,0] = ouput_boxes[...,0] * img_width
ouput_boxes[...,1] = ouput_boxes[...,1] * img_height
ouput_boxes[...,2] = ouput_boxes[...,2] * img_width
ouput_boxes[...,3] = ouput_boxes[...,3] * img_height
truth_boxes = targets[0]['boxes'].numpy().copy()
truth_boxes[...,:2] = truth_boxes[...,:2] + truth_boxes[...,2:]/2
iou = bboxes_iou_test(torch.Tensor(ouput_boxes), torch.Tensor(truth_boxes), fmt='yolo')
print(f"iou of first image = {iou}")
if len(debug) > 0:
return
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
coco_evaluator.synchronize_between_processes()
coco_evaluator.accumulate()
coco_evaluator.summarize()
return coco_evaluator
def get_args(**kwargs):
pretrained_detector = '/mnt/wenhao71/workspace/yolov4_acne_torch/pretrained/yolov4.pth'
cfg = kwargs
parser = argparse.ArgumentParser(
description='Train the Model on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-l', '--learning-rate',
metavar='LR', type=float, nargs='?', default=0.001,
help='Learning rate',
dest='learning_rate')
parser.add_argument(
'-f', '--load',
dest='load', type=str, default=pretrained_detector,
help='Load model from a .pth file')
parser.add_argument(
'-g', '--gpu',
metavar='G', type=str, default='0',
help='GPU',
dest='gpu')
parser.add_argument(
'-classes',
type=int, default=1,
help='dataset classes')
parser.add_argument(
'-iou-type', type=str, default='iou',
help='iou type (iou, giou, diou, ciou)',
dest='iou_type')
parser.add_argument(
'-keep-checkpoint-max', type=int, default=10,
help='maximum number of checkpoints to keep. If set 0, all checkpoints will be kept',
dest='keep_checkpoint_max')
parser.add_argument(
'-optimizer', type=str, default='adam',
help='training optimizer',
dest='TRAIN_OPTIMIZER')
args = vars(parser.parse_args())
cfg.update(args)
return ED(cfg)
def init_logger(log_file=None, log_dir=None, mode='a', verbose=0):
if log_dir is None:
log_dir = '~/temp/log/'
if log_file is None:
log_file = f'log_{_get_date_str()}.txt'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, log_file)
print(f'log file path: {log_file}')
logger = logging.getLogger('Yolov4-ACNE04')
c_handler = logging.StreamHandler(sys.stdout)
f_handler = logging.FileHandler(log_file)
if verbose >= 2:
print("levels of c_handler and f_handler are set DEBUG")
c_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
elif verbose >= 1:
print("level of c_handler is set INFO, level of f_handler is set DEBUG")
c_handler.setLevel(logging.INFO)
f_handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
else:
print("levels of c_handler and f_handler are set WARNING")
c_handler.setLevel(logging.WARNING)
f_handler.setLevel(logging.WARNING)
logger.setLevel(logging.WARNING)
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger
def _get_date_str():
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d_%H-%M')
if __name__ == "__main__":
cfg = get_args(**Cfg)
if not DAS:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cuda')
log_dir = cfg.TRAIN_TENSORBOARD_DIR
logger = init_logger(log_dir=log_dir)
logger.info(f"\n{'*'*20} Start Training {'*'*20}\n")
logger.info(f'Using device {device}')
logger.info(f"Using torch of version {torch.__version__}")
logger.info(f'with configuration {cfg}')
print(f"\n{'*'*20} Start Training {'*'*20}\n")
print(f'Using device {device}')
print(f"Using torch of version {torch.__version__}")
print(f'with configuration {cfg}')
model = Yolov4(cfg.pretrained, n_classes=cfg.classes)
if not DAS and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
if not DAS:
model.to(device=device)
else:
model.cuda()
try:
train(
model=model,
config=cfg,
epochs=cfg.TRAIN_EPOCHS,
device=device,
logger=logger,
)
except KeyboardInterrupt:
torch.save(model.state_dict(), os.path.join(cfg.checkpoints, 'INTERRUPTED.pth'))
logger.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| true | true |
1c2ed974fbbbbb2a48514a7e2f63e0a2bfa50dd4 | 5,532 | py | Python | mayan/apps/events/apps.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/events/apps.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/events/apps.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from django.apps import apps
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import (
menu_object, menu_secondary, menu_tools, menu_topbar, menu_user
)
from mayan.apps.navigation.classes import SourceColumn
from mayan.apps.views.html_widgets import ObjectLinkWidget, TwoStateWidget
from .html_widgets import widget_event_actor_link, widget_event_type_link
from .links import (
link_current_user_events, link_current_user_events_export,
link_event_types_subscriptions_list, link_events_for_object_export,
link_events_list, link_events_list_export, link_notification_mark_read,
link_notification_mark_read_all, link_user_notifications_list
)
class EventsApp(MayanAppConfig):
app_namespace = 'events'
app_url = 'events'
has_rest_api = True
has_tests = True
name = 'mayan.apps.events'
verbose_name = _('Events')
def ready(self):
super().ready()
Action = apps.get_model(app_label='actstream', model_name='Action')
Notification = self.get_model(model_name='Notification')
StoredEventType = self.get_model(model_name='StoredEventType')
# Typecast the related field because actstream uses CharFields for
# the object_id the action_object, actor, and target fields.
ModelPermission.register_inheritance(
fk_field_cast=models.CharField, model=Action,
related='action_object'
)
ModelPermission.register_inheritance(
fk_field_cast=models.CharField, model=Action, related='actor'
)
ModelPermission.register_inheritance(
fk_field_cast=models.CharField, model=Action, related='target'
)
# Add labels to Action model, they are not marked translatable in the
# upstream package.
SourceColumn(
attribute='timestamp', is_identifier=True,
is_sortable=True, label=_('Date and time'), source=Action
)
SourceColumn(
func=widget_event_actor_link, label=_('Actor'),
include_label=True, source=Action
)
SourceColumn(
func=widget_event_type_link, label=_('Event'),
include_label=True, source=Action
)
SourceColumn(
attribute='target', label=_('Target'), include_label=True,
source=Action, widget=ObjectLinkWidget
)
SourceColumn(
attribute='action_object', label=_('Action object'),
include_label=True, source=Action, widget=ObjectLinkWidget
)
SourceColumn(
source=StoredEventType, label=_('Namespace'), attribute='namespace'
)
SourceColumn(
source=StoredEventType, label=_('Label'), attribute='label'
)
SourceColumn(
attribute='action__timestamp', is_identifier=True,
is_sortable=True, label=_('Date and time'), source=Notification
)
SourceColumn(
func=widget_event_actor_link, label=_('Actor'),
include_label=True, kwargs={'attribute': 'action'},
source=Notification
)
SourceColumn(
func=widget_event_type_link, label=_('Event'),
include_label=True, kwargs={'attribute': 'action'},
source=Notification
)
SourceColumn(
attribute='action.target', label=_('Target'), include_label=True,
source=Notification, widget=ObjectLinkWidget
)
SourceColumn(
attribute='action.action_object', label=_('Action object'),
include_label=True, source=Notification, widget=ObjectLinkWidget
)
SourceColumn(
attribute='read', include_label=True, is_sortable=True,
label=_('Seen'), source=Notification, widget=TwoStateWidget
)
menu_topbar.bind_links(
links=(link_user_notifications_list,), position=99
)
menu_object.bind_links(
links=(link_notification_mark_read,), sources=(Notification,)
)
menu_secondary.bind_links(
links=(link_notification_mark_read_all,),
sources=(
'events:notification_mark_read',
'events:notification_mark_read_all',
'events:user_notifications_list'
)
)
menu_secondary.bind_links(
links=(link_current_user_events_export,),
sources=(
'events:current_user_events',
'events:current_user_events_export',
)
)
menu_secondary.bind_links(
links=(link_events_list_export,),
sources=(
'events:events_list',
'events:events_list_export',
)
)
menu_secondary.bind_links(
links=(link_events_for_object_export,),
sources=(
'events:events_for_object',
'events:events_for_object_export'
)
)
menu_tools.bind_links(links=(link_events_list,))
menu_user.bind_links(
links=(
link_event_types_subscriptions_list, link_current_user_events
), position=50
)
| 37.378378 | 80 | 0.618764 | from django.apps import apps
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import (
menu_object, menu_secondary, menu_tools, menu_topbar, menu_user
)
from mayan.apps.navigation.classes import SourceColumn
from mayan.apps.views.html_widgets import ObjectLinkWidget, TwoStateWidget
from .html_widgets import widget_event_actor_link, widget_event_type_link
from .links import (
link_current_user_events, link_current_user_events_export,
link_event_types_subscriptions_list, link_events_for_object_export,
link_events_list, link_events_list_export, link_notification_mark_read,
link_notification_mark_read_all, link_user_notifications_list
)
class EventsApp(MayanAppConfig):
app_namespace = 'events'
app_url = 'events'
has_rest_api = True
has_tests = True
name = 'mayan.apps.events'
verbose_name = _('Events')
def ready(self):
super().ready()
Action = apps.get_model(app_label='actstream', model_name='Action')
Notification = self.get_model(model_name='Notification')
StoredEventType = self.get_model(model_name='StoredEventType')
ModelPermission.register_inheritance(
fk_field_cast=models.CharField, model=Action,
related='action_object'
)
ModelPermission.register_inheritance(
fk_field_cast=models.CharField, model=Action, related='actor'
)
ModelPermission.register_inheritance(
fk_field_cast=models.CharField, model=Action, related='target'
)
SourceColumn(
attribute='timestamp', is_identifier=True,
is_sortable=True, label=_('Date and time'), source=Action
)
SourceColumn(
func=widget_event_actor_link, label=_('Actor'),
include_label=True, source=Action
)
SourceColumn(
func=widget_event_type_link, label=_('Event'),
include_label=True, source=Action
)
SourceColumn(
attribute='target', label=_('Target'), include_label=True,
source=Action, widget=ObjectLinkWidget
)
SourceColumn(
attribute='action_object', label=_('Action object'),
include_label=True, source=Action, widget=ObjectLinkWidget
)
SourceColumn(
source=StoredEventType, label=_('Namespace'), attribute='namespace'
)
SourceColumn(
source=StoredEventType, label=_('Label'), attribute='label'
)
SourceColumn(
attribute='action__timestamp', is_identifier=True,
is_sortable=True, label=_('Date and time'), source=Notification
)
SourceColumn(
func=widget_event_actor_link, label=_('Actor'),
include_label=True, kwargs={'attribute': 'action'},
source=Notification
)
SourceColumn(
func=widget_event_type_link, label=_('Event'),
include_label=True, kwargs={'attribute': 'action'},
source=Notification
)
SourceColumn(
attribute='action.target', label=_('Target'), include_label=True,
source=Notification, widget=ObjectLinkWidget
)
SourceColumn(
attribute='action.action_object', label=_('Action object'),
include_label=True, source=Notification, widget=ObjectLinkWidget
)
SourceColumn(
attribute='read', include_label=True, is_sortable=True,
label=_('Seen'), source=Notification, widget=TwoStateWidget
)
menu_topbar.bind_links(
links=(link_user_notifications_list,), position=99
)
menu_object.bind_links(
links=(link_notification_mark_read,), sources=(Notification,)
)
menu_secondary.bind_links(
links=(link_notification_mark_read_all,),
sources=(
'events:notification_mark_read',
'events:notification_mark_read_all',
'events:user_notifications_list'
)
)
menu_secondary.bind_links(
links=(link_current_user_events_export,),
sources=(
'events:current_user_events',
'events:current_user_events_export',
)
)
menu_secondary.bind_links(
links=(link_events_list_export,),
sources=(
'events:events_list',
'events:events_list_export',
)
)
menu_secondary.bind_links(
links=(link_events_for_object_export,),
sources=(
'events:events_for_object',
'events:events_for_object_export'
)
)
menu_tools.bind_links(links=(link_events_list,))
menu_user.bind_links(
links=(
link_event_types_subscriptions_list, link_current_user_events
), position=50
)
| true | true |
1c2edb084a5b7ee65534fb4e2d94be0da9da3cb1 | 3,772 | py | Python | neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py | freyes/neutron | 197c222acb0390728106a083d1663f2c06427518 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py | freyes/neutron | 197c222acb0390728106a083d1663f2c06427518 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py | freyes/neutron | 197c222acb0390728106a083d1663f2c06427518 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_config import cfg
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.openvswitch.mech_driver \
import mech_openvswitch
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
VIF_DETAILS = {portbindings.CAP_PORT_FILTER: True,
portbindings.OVS_HYBRID_PLUG: True}
AGENT_TYPE = constants.AGENT_TYPE_OVS
GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'}
GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'}
BAD_TUNNEL_TYPES = ['bad_tunnel_type']
BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS,
'tunnel_types': BAD_TUNNEL_TYPES}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS,
'host': 'host'}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'dead_host'}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'bad_host_1'},
{'alive': True,
'configurations': BAD_CONFIGS,
'host': 'bad_host_2'}]
def setUp(self):
super(OpenvswitchMechanismBaseTestCase, self).setUp()
cfg.CONF.set_override('firewall_driver', 'iptables_hybrid',
'SECURITYGROUP')
self.driver = mech_openvswitch.OpenvswitchMechanismDriver()
self.driver.initialize()
class OpenvswitchMechanismSGDisabledBaseTestCase(
OpenvswitchMechanismBaseTestCase):
VIF_DETAILS = {portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False}
def setUp(self):
cfg.CONF.set_override('enable_security_group',
False,
group='SECURITYGROUP')
super(OpenvswitchMechanismSGDisabledBaseTestCase, self).setUp()
class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismFlatTestCase):
pass
class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismVlanTestCase):
pass
class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismGreTestCase):
pass
class OpenvswitchMechanismSGDisabledLocalTestCase(
OpenvswitchMechanismSGDisabledBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
| 36.621359 | 78 | 0.66702 |
from neutron_lib import constants
from oslo_config import cfg
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.openvswitch.mech_driver \
import mech_openvswitch
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
VIF_DETAILS = {portbindings.CAP_PORT_FILTER: True,
portbindings.OVS_HYBRID_PLUG: True}
AGENT_TYPE = constants.AGENT_TYPE_OVS
GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'}
GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'}
BAD_TUNNEL_TYPES = ['bad_tunnel_type']
BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS,
'tunnel_types': BAD_TUNNEL_TYPES}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS,
'host': 'host'}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'dead_host'}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'bad_host_1'},
{'alive': True,
'configurations': BAD_CONFIGS,
'host': 'bad_host_2'}]
def setUp(self):
super(OpenvswitchMechanismBaseTestCase, self).setUp()
cfg.CONF.set_override('firewall_driver', 'iptables_hybrid',
'SECURITYGROUP')
self.driver = mech_openvswitch.OpenvswitchMechanismDriver()
self.driver.initialize()
class OpenvswitchMechanismSGDisabledBaseTestCase(
OpenvswitchMechanismBaseTestCase):
VIF_DETAILS = {portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False}
def setUp(self):
cfg.CONF.set_override('enable_security_group',
False,
group='SECURITYGROUP')
super(OpenvswitchMechanismSGDisabledBaseTestCase, self).setUp()
class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismFlatTestCase):
pass
class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismVlanTestCase):
pass
class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase,
base.AgentMechanismGreTestCase):
pass
class OpenvswitchMechanismSGDisabledLocalTestCase(
OpenvswitchMechanismSGDisabledBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
| true | true |
1c2edb8e78463062d4857c48a4c77384850b37df | 660 | py | Python | myblog/migrations/0001_initial.py | dahn510/MySite | 5c2c0ccaf84e5f1e121742cd18a953cfb86f282d | [
"Apache-2.0"
] | null | null | null | myblog/migrations/0001_initial.py | dahn510/MySite | 5c2c0ccaf84e5f1e121742cd18a953cfb86f282d | [
"Apache-2.0"
] | null | null | null | myblog/migrations/0001_initial.py | dahn510/MySite | 5c2c0ccaf84e5f1e121742cd18a953cfb86f282d | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-30 07:08
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 26.4 | 117 | 0.592424 |
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| true | true |
1c2edf1fee0d85ae37227aad5c672476c5e53e16 | 1,807 | py | Python | _project/pullup/migrations/0005_auto_20181017_0710.py | SucheG/cayman-pullup.cz | a03bb58d5ff3ef3dba431bd4e900e6b3649c48f8 | [
"CC0-1.0"
] | null | null | null | _project/pullup/migrations/0005_auto_20181017_0710.py | SucheG/cayman-pullup.cz | a03bb58d5ff3ef3dba431bd4e900e6b3649c48f8 | [
"CC0-1.0"
] | null | null | null | _project/pullup/migrations/0005_auto_20181017_0710.py | SucheG/cayman-pullup.cz | a03bb58d5ff3ef3dba431bd4e900e6b3649c48f8 | [
"CC0-1.0"
] | 1 | 2018-10-08T16:56:40.000Z | 2018-10-08T16:56:40.000Z | # Generated by Django 2.1 on 2018-10-17 05:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pullup', '0004_auto_20181013_2129'),
]
operations = [
migrations.AlterField(
model_name='cvik',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='cvik',
name='telo',
field=models.ManyToManyField(blank=True, to='pullup.Telo'),
),
migrations.AlterField(
model_name='cvik',
name='varianty',
field=models.ManyToManyField(blank=True, through='pullup.Varianta', to='pullup.Cvik'),
),
migrations.AlterField(
model_name='cvik',
name='vybaveni',
field=models.ManyToManyField(blank=True, through='pullup.Potrebuje', to='pullup.Vybaveni'),
),
migrations.AlterField(
model_name='media',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='misto',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='misto',
name='vybaveni',
field=models.ManyToManyField(blank=True, to='pullup.Vybaveni'),
),
migrations.AlterField(
model_name='telo',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='vybaveni',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
]
| 30.627119 | 103 | 0.55451 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pullup', '0004_auto_20181013_2129'),
]
operations = [
migrations.AlterField(
model_name='cvik',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='cvik',
name='telo',
field=models.ManyToManyField(blank=True, to='pullup.Telo'),
),
migrations.AlterField(
model_name='cvik',
name='varianty',
field=models.ManyToManyField(blank=True, through='pullup.Varianta', to='pullup.Cvik'),
),
migrations.AlterField(
model_name='cvik',
name='vybaveni',
field=models.ManyToManyField(blank=True, through='pullup.Potrebuje', to='pullup.Vybaveni'),
),
migrations.AlterField(
model_name='media',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='misto',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='misto',
name='vybaveni',
field=models.ManyToManyField(blank=True, to='pullup.Vybaveni'),
),
migrations.AlterField(
model_name='telo',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
migrations.AlterField(
model_name='vybaveni',
name='popis',
field=models.CharField(blank=True, max_length=500),
),
]
| true | true |
1c2edf4d52e16f95da9865d8966cfbe46406c474 | 1,469 | py | Python | openstack_dashboard/test/integration_tests/tests/test_keypair.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/integration_tests/tests/test_keypair.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/integration_tests/tests/test_keypair.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from openstack_dashboard.test.integration_tests import helpers
class TestKeypair(helpers.TestCase):
"""Checks that the user is able to create/delete keypair."""
KEYPAIR_NAME = 'horizonkeypair' + str(random.randint(0, 1000))
def test_keypair(self):
accesssecurity_page = self.home_pg.go_to_accesssecurity_page()
keypair_page = accesssecurity_page.go_to_keypair_page()
keypair_page.create_keypair(self.KEYPAIR_NAME)
accesssecurity_page = self.home_pg.go_to_accesssecurity_page()
keypair_page = accesssecurity_page.go_to_keypair_page()
self.assertTrue(keypair_page.get_keypair_status(self.KEYPAIR_NAME))
keypair_page.delete_keypair(self.KEYPAIR_NAME)
self.assertFalse(keypair_page.get_keypair_status(self.KEYPAIR_NAME))
| 40.805556 | 78 | 0.750851 |
import random
from openstack_dashboard.test.integration_tests import helpers
class TestKeypair(helpers.TestCase):
KEYPAIR_NAME = 'horizonkeypair' + str(random.randint(0, 1000))
def test_keypair(self):
accesssecurity_page = self.home_pg.go_to_accesssecurity_page()
keypair_page = accesssecurity_page.go_to_keypair_page()
keypair_page.create_keypair(self.KEYPAIR_NAME)
accesssecurity_page = self.home_pg.go_to_accesssecurity_page()
keypair_page = accesssecurity_page.go_to_keypair_page()
self.assertTrue(keypair_page.get_keypair_status(self.KEYPAIR_NAME))
keypair_page.delete_keypair(self.KEYPAIR_NAME)
self.assertFalse(keypair_page.get_keypair_status(self.KEYPAIR_NAME))
| true | true |
1c2ee0a267c36a7162841471d07e86b2c2ec3724 | 890 | py | Python | alumno/migrations/0027_auto_20180822_2035.py | saulmestanza/Solicitudes | 080f396025f75f21065251bd2af3f696d293ba3a | [
"Apache-2.0"
] | 2 | 2018-08-17T20:32:20.000Z | 2019-05-24T15:38:43.000Z | alumno/migrations/0027_auto_20180822_2035.py | saulmestanza/Solicitudes | 080f396025f75f21065251bd2af3f696d293ba3a | [
"Apache-2.0"
] | null | null | null | alumno/migrations/0027_auto_20180822_2035.py | saulmestanza/Solicitudes | 080f396025f75f21065251bd2af3f696d293ba3a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-22 20:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alumno', '0026_auto_20180822_2028'),
]
operations = [
migrations.AddField(
model_name='procesoalumno',
name='is_ok',
field=models.BooleanField(default=False, verbose_name='Estudiante de acuerdo con su nota'),
),
migrations.AlterField(
model_name='procesoalumno',
name='status',
field=models.CharField(choices=[(b'IN', 'Ingresado'), (b'ER', 'En Revisi\xf3n'), (b'ET', 'En Tr\xe1nsito'), (b'CN', 'Cancelado'), (b'DR', 'Evaluaci\xf3n por Docentes Recalificadores'), (b'FN', 'Finalizado')], default='IN', max_length=2, verbose_name='Estado'),
),
]
| 34.230769 | 272 | 0.619101 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alumno', '0026_auto_20180822_2028'),
]
operations = [
migrations.AddField(
model_name='procesoalumno',
name='is_ok',
field=models.BooleanField(default=False, verbose_name='Estudiante de acuerdo con su nota'),
),
migrations.AlterField(
model_name='procesoalumno',
name='status',
field=models.CharField(choices=[(b'IN', 'Ingresado'), (b'ER', 'En Revisi\xf3n'), (b'ET', 'En Tr\xe1nsito'), (b'CN', 'Cancelado'), (b'DR', 'Evaluaci\xf3n por Docentes Recalificadores'), (b'FN', 'Finalizado')], default='IN', max_length=2, verbose_name='Estado'),
),
]
| true | true |
1c2ee25f38f6e94291b0701ca56f2e66b052b814 | 760 | py | Python | pari/article/rich_text_filter.py | theju/pari | 318a4ffba08362e78253ded100a63f5b5c6eadf9 | [
"BSD-3-Clause"
] | null | null | null | pari/article/rich_text_filter.py | theju/pari | 318a4ffba08362e78253ded100a63f5b5c6eadf9 | [
"BSD-3-Clause"
] | null | null | null | pari/article/rich_text_filter.py | theju/pari | 318a4ffba08362e78253ded100a63f5b5c6eadf9 | [
"BSD-3-Clause"
] | null | null | null | import os
from lxml import html
from mezzanine.conf import settings
from pari.article.templatetags import article_tags
def article_content_filter(content):
html_content = html.fromstring(content)
images = html_content.cssselect('img')
for image in images:
image_source = image.attrib['src']
if image_source.startswith("/"):
image_width = image.attrib.get('width')
image_height = image.attrib.get('height')
if image_width or image_height:
image_thumbnail_source = os.path.join(settings.MEDIA_URL, article_tags.thumbnail(image_source, image_width, image_height))
image.attrib['src'] = image_thumbnail_source
return html.tostring(html_content).encode('utf8')
| 36.190476 | 138 | 0.701316 | import os
from lxml import html
from mezzanine.conf import settings
from pari.article.templatetags import article_tags
def article_content_filter(content):
html_content = html.fromstring(content)
images = html_content.cssselect('img')
for image in images:
image_source = image.attrib['src']
if image_source.startswith("/"):
image_width = image.attrib.get('width')
image_height = image.attrib.get('height')
if image_width or image_height:
image_thumbnail_source = os.path.join(settings.MEDIA_URL, article_tags.thumbnail(image_source, image_width, image_height))
image.attrib['src'] = image_thumbnail_source
return html.tostring(html_content).encode('utf8')
| true | true |
1c2ee300834393447e84078d49feb8c57b8a59bd | 830 | py | Python | app/analysis/validation_cluster.py | ayushmaskey/log_analysis | c777f48117ec8e14845aa8d2deccc7f974ca232a | [
"MIT"
] | null | null | null | app/analysis/validation_cluster.py | ayushmaskey/log_analysis | c777f48117ec8e14845aa8d2deccc7f974ca232a | [
"MIT"
] | null | null | null | app/analysis/validation_cluster.py | ayushmaskey/log_analysis | c777f48117ec8e14845aa8d2deccc7f974ca232a | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
from csv_to_pandas import csv_into_dict_of_data
from wavelet_transformation import csv_into_wavelet_transformed_dict_of_dataframe
from training_cluster import reorganize_data
def get_validation_dataset(df):
column_list = [col for col in df.columns if col >= '2019-03-01']
df = reorganize_data(df, column_list)
return df
def vaidation():
df_dict = csv_into_dict_of_data()
key_list = list(df_dict.keys())
for key in key_list:
df = get_validation_dataset(df_dict[key])
df_dict[key] = df
return df_dict
def test_new_df_dict():
df_dict = vaidation()
key_list = list(df_dict.keys())
print(key_list)
for key in key_list:
print(key, list(df_dict[key].columns))
if __name__ == "__main__":
test_new_df_dict() | 21.842105 | 81 | 0.773494 | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
from csv_to_pandas import csv_into_dict_of_data
from wavelet_transformation import csv_into_wavelet_transformed_dict_of_dataframe
from training_cluster import reorganize_data
def get_validation_dataset(df):
column_list = [col for col in df.columns if col >= '2019-03-01']
df = reorganize_data(df, column_list)
return df
def vaidation():
df_dict = csv_into_dict_of_data()
key_list = list(df_dict.keys())
for key in key_list:
df = get_validation_dataset(df_dict[key])
df_dict[key] = df
return df_dict
def test_new_df_dict():
df_dict = vaidation()
key_list = list(df_dict.keys())
print(key_list)
for key in key_list:
print(key, list(df_dict[key].columns))
if __name__ == "__main__":
test_new_df_dict() | true | true |
1c2ee3d2606b49529f7b9264442bbf83e899ef4b | 649 | py | Python | detection/configs/lit/retinanet_lit_ti_fpn_1x_coco.py | MonashAI/LIT | ec0f1f5aad2cb95b1cdaff33fa13927650214e3d | [
"Apache-2.0"
] | 22 | 2021-06-07T06:50:52.000Z | 2021-08-17T06:43:08.000Z | detection/configs/lit/retinanet_lit_ti_fpn_1x_coco.py | zip-group/LIT | f076db8ab1aa15026ec2b2c018836c9b7aca8f63 | [
"Apache-2.0"
] | 3 | 2022-01-05T03:38:45.000Z | 2022-03-10T08:30:04.000Z | detection/configs/lit/retinanet_lit_ti_fpn_1x_coco.py | zip-group/LIT | f076db8ab1aa15026ec2b2c018836c9b7aca8f63 | [
"Apache-2.0"
] | 5 | 2021-06-10T01:05:32.000Z | 2021-08-07T10:07:40.000Z | _base_ = [
'../_base_/models/retinanet_fpn_lit_ti.py',
'../_base_/datasets/coco_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
pretrained='pretrained/lit_ti.pth',
neck=dict(
type='FPN',
in_channels=[64, 128, 320, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5))
# optimizer
optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
| 24.961538 | 62 | 0.640986 | _base_ = [
'../_base_/models/retinanet_fpn_lit_ti.py',
'../_base_/datasets/coco_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
pretrained='pretrained/lit_ti.pth',
neck=dict(
type='FPN',
in_channels=[64, 128, 320, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5))
optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
| true | true |
1c2ee3d7b80f2d1f8891c87acc7d7425cc975d13 | 2,902 | py | Python | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryDeviceEventDataRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryDeviceEventDataRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryDeviceEventDataRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class QueryDeviceEventDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'QueryDeviceEventData','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_IotId(self):
return self.get_query_params().get('IotId')
def set_IotId(self,IotId):
self.add_query_param('IotId',IotId)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Identifier(self):
return self.get_query_params().get('Identifier')
def set_Identifier(self,Identifier):
self.add_query_param('Identifier',Identifier)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_Asc(self):
return self.get_query_params().get('Asc')
def set_Asc(self,Asc):
self.add_query_param('Asc',Asc)
def get_DeviceName(self):
return self.get_query_params().get('DeviceName')
def set_DeviceName(self,DeviceName):
self.add_query_param('DeviceName',DeviceName)
def get_EventType(self):
return self.get_query_params().get('EventType')
def set_EventType(self,EventType):
self.add_query_param('EventType',EventType) | 31.543478 | 79 | 0.754652 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class QueryDeviceEventDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'QueryDeviceEventData','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_IotId(self):
return self.get_query_params().get('IotId')
def set_IotId(self,IotId):
self.add_query_param('IotId',IotId)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Identifier(self):
return self.get_query_params().get('Identifier')
def set_Identifier(self,Identifier):
self.add_query_param('Identifier',Identifier)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_Asc(self):
return self.get_query_params().get('Asc')
def set_Asc(self,Asc):
self.add_query_param('Asc',Asc)
def get_DeviceName(self):
return self.get_query_params().get('DeviceName')
def set_DeviceName(self,DeviceName):
self.add_query_param('DeviceName',DeviceName)
def get_EventType(self):
return self.get_query_params().get('EventType')
def set_EventType(self,EventType):
self.add_query_param('EventType',EventType) | true | true |
1c2ee45118f224be9f3980dfcd157435795988ae | 11,863 | py | Python | pcdet/datasets/augmentor/data_augmentor.py | xiangruhuang/OpenPCDet | d82d9594a0629ffed0c457aedc304e0805e93221 | [
"Apache-2.0"
] | null | null | null | pcdet/datasets/augmentor/data_augmentor.py | xiangruhuang/OpenPCDet | d82d9594a0629ffed0c457aedc304e0805e93221 | [
"Apache-2.0"
] | null | null | null | pcdet/datasets/augmentor/data_augmentor.py | xiangruhuang/OpenPCDet | d82d9594a0629ffed0c457aedc304e0805e93221 | [
"Apache-2.0"
] | null | null | null | from functools import partial
import numpy as np
from ...utils import common_utils
from . import augmentor_utils, database_sampler, semantic_sampler, semantic_seg_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def semantic_sampling(self, config=None):
seg_sampler = semantic_sampler.SemanticSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return seg_sampler
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def semantic_seg_sampling(self, config=None):
db_sampler = semantic_seg_sampler.SemanticSegDataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
origin = data_dict['scene_wise']['top_lidar_origin']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
gt_boxes, points, origin = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, origin=origin
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
data_dict['scene_wise']['top_lidar_origin'] = origin
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
origin = data_dict['scene_wise']['top_lidar_origin']
gt_boxes, points, origin = augmentor_utils.global_rotation(
gt_boxes, points, rot_range=rot_range, origin=origin
)
data_dict['scene_wise']['top_lidar_origin'] = origin
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
origin = data_dict['scene_wise']['top_lidar_origin']
gt_boxes, points, origin = augmentor_utils.global_scaling(
gt_boxes, points, config['WORLD_SCALE_RANGE'], origin=origin
)
data_dict['scene_wise']['top_lidar_origin'] = origin
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
#def random_image_flip(self, data_dict=None, config=None):
# if data_dict is None:
# return partial(self.random_image_flip, config=config)
# images = data_dict["images"]
# depth_maps = data_dict["depth_maps"]
# gt_boxes = data_dict['object_wise']['gt_box_attr']
# gt_boxes2d = data_dict["gt_boxes2d"]
# calib = data_dict["calib"]
# for cur_axis in config['ALONG_AXIS_LIST']:
# assert cur_axis in ['horizontal']
# images, depth_maps, gt_boxes = getattr(augmentor_utils, 'random_image_flip_%s' % cur_axis)(
# images, depth_maps, gt_boxes, calib,
# )
#
# data_dict['images'] = images
# data_dict['depth_maps'] = depth_maps
# data_dict['object_wise']['gt_box_attr'] = gt_boxes
# return data_dict
def random_world_translation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_translation, config=config)
noise_translate_std = config['NOISE_TRANSLATE_STD']
if noise_translate_std == 0:
return data_dict
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y', 'z']
gt_boxes, points = getattr(augmentor_utils, 'random_translation_along_%s' % cur_axis)(
gt_boxes, points, noise_translate_std,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_translation(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_translation, config=config)
offset_range = config['LOCAL_TRANSLATION_RANGE']
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y', 'z']
gt_boxes, points = getattr(augmentor_utils, 'random_local_translation_along_%s' % cur_axis)(
gt_boxes, points, offset_range,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_rotation(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_rotation, config=config)
rot_range = config['LOCAL_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
gt_boxes, points = augmentor_utils.local_rotation(
gt_boxes, points, rot_range=rot_range
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_scaling(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_scaling, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
gt_boxes, points = augmentor_utils.local_scaling(
gt_boxes, points, config['LOCAL_SCALE_RANGE']
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_world_frustum_dropout(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_world_frustum_dropout, config=config)
intensity_range = config['INTENSITY_RANGE']
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for direction in config['DIRECTION']:
assert direction in ['top', 'bottom', 'left', 'right']
gt_boxes, points = getattr(augmentor_utils, 'global_frustum_dropout_%s' % direction)(
gt_boxes, points, intensity_range,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_frustum_dropout(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_frustum_dropout, config=config)
intensity_range = config['INTENSITY_RANGE']
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for direction in config['DIRECTION']:
assert direction in ['top', 'bottom', 'left', 'right']
gt_boxes, points = getattr(augmentor_utils, 'local_frustum_dropout_%s' % direction)(
gt_boxes, points, intensity_range,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_pyramid_aug(self, data_dict=None, config=None):
"""
Refer to the paper:
SE-SSD: Self-Ensembling Single-Stage Object Detector From Point Cloud
"""
if data_dict is None:
return partial(self.random_local_pyramid_aug, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
gt_boxes, points, pyramids = augmentor_utils.local_pyramid_dropout(gt_boxes, points, config['DROP_PROB'])
gt_boxes, points, pyramids = augmentor_utils.local_pyramid_sparsify(gt_boxes, points,
config['SPARSIFY_PROB'],
config['SPARSIFY_MAX_NUM'],
pyramids)
gt_boxes, points = augmentor_utils.local_pyramid_swap(gt_boxes, points,
config['SWAP_PROB'],
config['SWAP_MAX_NUM'],
pyramids)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
data_dict['object_wise']['gt_box_attr'][:, 6] = common_utils.limit_period(
data_dict['object_wise']['gt_box_attr'][:, 6], offset=0.5, period=2 * np.pi
)
return data_dict
| 41.479021 | 113 | 0.601956 | from functools import partial
import numpy as np
from ...utils import common_utils
from . import augmentor_utils, database_sampler, semantic_sampler, semantic_seg_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def semantic_sampling(self, config=None):
seg_sampler = semantic_sampler.SemanticSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return seg_sampler
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def semantic_seg_sampling(self, config=None):
db_sampler = semantic_seg_sampler.SemanticSegDataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
origin = data_dict['scene_wise']['top_lidar_origin']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
gt_boxes, points, origin = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, origin=origin
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
data_dict['scene_wise']['top_lidar_origin'] = origin
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
origin = data_dict['scene_wise']['top_lidar_origin']
gt_boxes, points, origin = augmentor_utils.global_rotation(
gt_boxes, points, rot_range=rot_range, origin=origin
)
data_dict['scene_wise']['top_lidar_origin'] = origin
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
origin = data_dict['scene_wise']['top_lidar_origin']
gt_boxes, points, origin = augmentor_utils.global_scaling(
gt_boxes, points, config['WORLD_SCALE_RANGE'], origin=origin
)
data_dict['scene_wise']['top_lidar_origin'] = origin
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_world_translation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_translation, config=config)
noise_translate_std = config['NOISE_TRANSLATE_STD']
if noise_translate_std == 0:
return data_dict
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y', 'z']
gt_boxes, points = getattr(augmentor_utils, 'random_translation_along_%s' % cur_axis)(
gt_boxes, points, noise_translate_std,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_translation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_local_translation, config=config)
offset_range = config['LOCAL_TRANSLATION_RANGE']
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y', 'z']
gt_boxes, points = getattr(augmentor_utils, 'random_local_translation_along_%s' % cur_axis)(
gt_boxes, points, offset_range,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_local_rotation, config=config)
rot_range = config['LOCAL_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
gt_boxes, points = augmentor_utils.local_rotation(
gt_boxes, points, rot_range=rot_range
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_local_scaling, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
gt_boxes, points = augmentor_utils.local_scaling(
gt_boxes, points, config['LOCAL_SCALE_RANGE']
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_world_frustum_dropout(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_frustum_dropout, config=config)
intensity_range = config['INTENSITY_RANGE']
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for direction in config['DIRECTION']:
assert direction in ['top', 'bottom', 'left', 'right']
gt_boxes, points = getattr(augmentor_utils, 'global_frustum_dropout_%s' % direction)(
gt_boxes, points, intensity_range,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_frustum_dropout(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_local_frustum_dropout, config=config)
intensity_range = config['INTENSITY_RANGE']
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
for direction in config['DIRECTION']:
assert direction in ['top', 'bottom', 'left', 'right']
gt_boxes, points = getattr(augmentor_utils, 'local_frustum_dropout_%s' % direction)(
gt_boxes, points, intensity_range,
)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def random_local_pyramid_aug(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_local_pyramid_aug, config=config)
gt_boxes = data_dict['object_wise']['gt_box_attr']
points = data_dict['point_wise']['points']
gt_boxes, points, pyramids = augmentor_utils.local_pyramid_dropout(gt_boxes, points, config['DROP_PROB'])
gt_boxes, points, pyramids = augmentor_utils.local_pyramid_sparsify(gt_boxes, points,
config['SPARSIFY_PROB'],
config['SPARSIFY_MAX_NUM'],
pyramids)
gt_boxes, points = augmentor_utils.local_pyramid_swap(gt_boxes, points,
config['SWAP_PROB'],
config['SWAP_MAX_NUM'],
pyramids)
data_dict['object_wise']['gt_box_attr'] = gt_boxes
data_dict['point_wise']['points'] = points
return data_dict
def forward(self, data_dict):
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
data_dict['object_wise']['gt_box_attr'][:, 6] = common_utils.limit_period(
data_dict['object_wise']['gt_box_attr'][:, 6], offset=0.5, period=2 * np.pi
)
return data_dict
| true | true |
1c2ee4726c1c1699a6bb0c73e2dd9b9878fac668 | 7,562 | py | Python | lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_udld.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_udld.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_udld.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: nxos_udld
extends_documentation_fragment:
- cisco.nxos.nxos
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
version_added: 1.0.0
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
choices:
- enabled
- disabled
type: str
msg_time:
description:
- Message time in seconds for UDLD packets or keyword 'default'.
type: str
reset:
description:
- Ability to reset all ports shut down by UDLD. 'state' parameter cannot be 'absent'
when this is present.
type: bool
default: no
state:
description:
- Manage the state of the resource. When set to 'absent', aggressive and msg_time
are set to their default values.
default: present
choices:
- present
- absent
type: str
"""
EXAMPLES = """
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- cisco.nxos.nxos_udld:
aggressive: disabled
msg_time: 20
host: '{{ inventory_hostname }}'
username: '{{ un }}'
password: '{{ pwd }}'
# Ensure agg mode is globally enabled and msg time is 15
- cisco.nxos.nxos_udld:
aggressive: enabled
msg_time: 15
host: '{{ inventory_hostname }}'
username: '{{ un }}'
password: '{{ pwd }}'
"""
RETURN = """
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
"""
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
load_config,
run_commands,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
nxos_argument_spec,
)
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {"msg_time": "15"}
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset, existing):
commands = []
for param, value in delta.items():
if param == "aggressive":
command = (
"udld aggressive"
if value == "enabled"
else "no udld aggressive"
)
commands.append(command)
elif param == "msg_time":
if value == "default":
if existing.get("msg_time") != PARAM_TO_DEFAULT_KEYMAP.get(
"msg_time"
):
commands.append("no udld message-time")
else:
commands.append("udld message-time " + value)
if reset:
command = "udld reset"
commands.append(command)
return commands
def get_commands_remove_udld_global(existing):
commands = []
if existing.get("aggressive") == "enabled":
command = "no udld aggressive"
commands.append(command)
if existing.get("msg_time") != PARAM_TO_DEFAULT_KEYMAP.get("msg_time"):
command = "no udld message-time"
commands.append(command)
return commands
def get_udld_global(module):
command = "show udld global | json"
udld_table = run_commands(module, [command])[0]
status = str(udld_table.get("udld-global-mode", None))
if status == "enabled-aggressive":
aggressive = "enabled"
else:
aggressive = "disabled"
interval = str(udld_table.get("message-interval", None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=["enabled", "disabled"]),
msg_time=dict(required=False, type="str"),
reset=dict(required=False, type="bool"),
state=dict(choices=["absent", "present"], default="present"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = list()
aggressive = module.params["aggressive"]
msg_time = module.params["msg_time"]
reset = module.params["reset"]
state = module.params["state"]
if reset and state == "absent":
module.fail_json(msg="state must be present when using reset flag.")
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == "present":
if delta:
command = get_commands_config_udld_global(
dict(delta), reset, existing
)
commands.append(command)
elif state == "absent":
command = get_commands_remove_udld_global(existing)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if "configure" in cmds:
cmds.pop(0)
results = {}
results["proposed"] = proposed
results["existing"] = existing
results["end_state"] = end_state
results["updates"] = cmds
results["changed"] = changed
results["warnings"] = warnings
module.exit_json(**results)
if __name__ == "__main__":
main()
| 28.216418 | 88 | 0.648109 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: nxos_udld
extends_documentation_fragment:
- cisco.nxos.nxos
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
version_added: 1.0.0
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
choices:
- enabled
- disabled
type: str
msg_time:
description:
- Message time in seconds for UDLD packets or keyword 'default'.
type: str
reset:
description:
- Ability to reset all ports shut down by UDLD. 'state' parameter cannot be 'absent'
when this is present.
type: bool
default: no
state:
description:
- Manage the state of the resource. When set to 'absent', aggressive and msg_time
are set to their default values.
default: present
choices:
- present
- absent
type: str
"""
EXAMPLES = """
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- cisco.nxos.nxos_udld:
aggressive: disabled
msg_time: 20
host: '{{ inventory_hostname }}'
username: '{{ un }}'
password: '{{ pwd }}'
# Ensure agg mode is globally enabled and msg time is 15
- cisco.nxos.nxos_udld:
aggressive: enabled
msg_time: 15
host: '{{ inventory_hostname }}'
username: '{{ un }}'
password: '{{ pwd }}'
"""
RETURN = """
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
"""
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
load_config,
run_commands,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
nxos_argument_spec,
)
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {"msg_time": "15"}
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset, existing):
commands = []
for param, value in delta.items():
if param == "aggressive":
command = (
"udld aggressive"
if value == "enabled"
else "no udld aggressive"
)
commands.append(command)
elif param == "msg_time":
if value == "default":
if existing.get("msg_time") != PARAM_TO_DEFAULT_KEYMAP.get(
"msg_time"
):
commands.append("no udld message-time")
else:
commands.append("udld message-time " + value)
if reset:
command = "udld reset"
commands.append(command)
return commands
def get_commands_remove_udld_global(existing):
commands = []
if existing.get("aggressive") == "enabled":
command = "no udld aggressive"
commands.append(command)
if existing.get("msg_time") != PARAM_TO_DEFAULT_KEYMAP.get("msg_time"):
command = "no udld message-time"
commands.append(command)
return commands
def get_udld_global(module):
command = "show udld global | json"
udld_table = run_commands(module, [command])[0]
status = str(udld_table.get("udld-global-mode", None))
if status == "enabled-aggressive":
aggressive = "enabled"
else:
aggressive = "disabled"
interval = str(udld_table.get("message-interval", None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=["enabled", "disabled"]),
msg_time=dict(required=False, type="str"),
reset=dict(required=False, type="bool"),
state=dict(choices=["absent", "present"], default="present"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = list()
aggressive = module.params["aggressive"]
msg_time = module.params["msg_time"]
reset = module.params["reset"]
state = module.params["state"]
if reset and state == "absent":
module.fail_json(msg="state must be present when using reset flag.")
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == "present":
if delta:
command = get_commands_config_udld_global(
dict(delta), reset, existing
)
commands.append(command)
elif state == "absent":
command = get_commands_remove_udld_global(existing)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if "configure" in cmds:
cmds.pop(0)
results = {}
results["proposed"] = proposed
results["existing"] = existing
results["end_state"] = end_state
results["updates"] = cmds
results["changed"] = changed
results["warnings"] = warnings
module.exit_json(**results)
if __name__ == "__main__":
main()
| true | true |
1c2ee5d85db0b66a1aff18c7ef35bafdbba2899c | 5,309 | py | Python | transformer.py | maremun/mix-attend-break-sticks | db8221447fb993194641ba781e85005180f55421 | [
"MIT"
] | null | null | null | transformer.py | maremun/mix-attend-break-sticks | db8221447fb993194641ba781e85005180f55421 | [
"MIT"
] | null | null | null | transformer.py | maremun/mix-attend-break-sticks | db8221447fb993194641ba781e85005180f55421 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from modules import Linear, PosEncoding
from layers import DecoderLayer
import const
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def proj_prob_simplex(inputs):
# project updated weights onto a probability simplex
# see https://arxiv.org/pdf/1101.6081.pdf
sorted_inputs, sorted_idx = torch.sort(inputs.view(-1), descending=True)
dim = len(sorted_inputs)
for i in reversed(range(dim)):
t = (sorted_inputs[:i+1].sum() - 1) / (i+1)
if sorted_inputs[i] > t:
break
return torch.clamp(inputs-t, min=0.0)
def get_attn_pad_mask(seq_q, seq_k):
assert seq_q.dim() == 2 and seq_k.dim() == 2
b_size, len_q = seq_q.size()
b_size, len_k = seq_k.size()
#pad_attn_mask = seq_k.eq(const.PAD).unsqueeze(1) # b_size x 1 x len_k
pad_attn_mask = seq_k.eq(-1).unsqueeze(1) # b_size x 1 x len_k
pad_attn_mask = pad_attn_mask.expand(b_size, len_q, len_k) # b_size x len_q x len_k
return pad_attn_mask
def get_attn_subsequent_mask(seq):
assert seq.dim() == 2
attn_shape = [seq.size(1), seq.size(1)]
subsequent_mask = torch.triu(torch.ones(attn_shape, device=device), diagonal=1).byte()
return subsequent_mask
class Decoder(nn.Module):
def __init__(self, n_layers, d_k, d_v, d_model, d_ff, n_heads,
max_seq_len, tgt_vocab_size, dropout=0.1, weighted=False):
super(Decoder, self).__init__()
self.d_model = d_model
self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model) #, padding_idx=const.PAD)
self.pos_emb = PosEncoding(max_seq_len * 10, d_model) # TODO: *10 fix
self.dropout_emb = nn.Dropout(dropout)
self.layer_type = DecoderLayer if not weighted else WeightedDecoderLayer
self.layers = nn.ModuleList(
[self.layer_type(d_k, d_v, d_model, d_ff, n_heads, dropout) for _ in range(n_layers)])
def forward(self, dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn=False):
dec_outputs = self.tgt_emb(dec_inputs)
dec_outputs += self.pos_emb(dec_inputs_len)
dec_outputs = self.dropout_emb(dec_outputs)
dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_inputs)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0)
if enc_inputs is not None:
dec_enc_attn_pad_mask = get_attn_pad_mask(dec_inputs, enc_inputs)
else:
dec_enc_attn_pad_mask = None
dec_self_attns, dec_enc_attns = [], []
for layer in self.layers:
dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs,
self_attn_mask=dec_self_attn_mask,
enc_attn_mask=dec_enc_attn_pad_mask)
if return_attn:
dec_self_attns.append(dec_self_attn)
dec_enc_attns.append(dec_enc_attn)
return dec_outputs, dec_self_attns, dec_enc_attns
class LMTransformer(nn.Module):
def __init__(self, n_layers, d_k, d_v, d_model, d_ff, n_heads,
max_tgt_seq_len, tgt_vocab_size, dropout,
weighted_model, share_proj_weight):
super(LMTransformer, self).__init__()
self.decoder = Decoder(n_layers, d_k, d_v, d_model, d_ff, n_heads,
max_tgt_seq_len, tgt_vocab_size, dropout, weighted_model)
self.tgt_proj = Linear(d_model, tgt_vocab_size, bias=False)
self.weighted_model = weighted_model
if share_proj_weight:
print('Sharing target embedding and projection..')
self.tgt_proj.weight = self.decoder.tgt_emb.weight
def trainable_params(self):
# Avoid updating the position encoding
params = filter(lambda p: p[1].requires_grad, self.named_parameters())
# Add a separate parameter group for the weighted_model
param_groups = []
base_params = {'params': [], 'type': 'base'}
weighted_params = {'params': [], 'type': 'weighted'}
for name, param in params:
if 'w_kp' in name or 'w_a' in name:
weighted_params['params'].append(param)
else:
base_params['params'].append(param)
param_groups.append(base_params)
param_groups.append(weighted_params)
return param_groups
def decode(self, dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn=False):
return self.decoder(dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn)
def forward(self, dec_inputs, dec_inputs_len, return_attn=False):
dec_outputs, dec_self_attns, _ = \
self.decoder(dec_inputs, dec_inputs_len, None, None, return_attn)
dec_logits = self.tgt_proj(dec_outputs)
return dec_logits.view(-1, dec_logits.size(-1)), dec_self_attns
def proj_grad(self):
if self.weighted_model:
for name, param in self.named_parameters():
if 'w_kp' in name or 'w_a' in name:
param.data = proj_prob_simplex(param.data)
else:
pass
| 40.838462 | 98 | 0.650782 | import torch
import torch.nn as nn
from modules import Linear, PosEncoding
from layers import DecoderLayer
import const
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def proj_prob_simplex(inputs):
sorted_inputs, sorted_idx = torch.sort(inputs.view(-1), descending=True)
dim = len(sorted_inputs)
for i in reversed(range(dim)):
t = (sorted_inputs[:i+1].sum() - 1) / (i+1)
if sorted_inputs[i] > t:
break
return torch.clamp(inputs-t, min=0.0)
def get_attn_pad_mask(seq_q, seq_k):
assert seq_q.dim() == 2 and seq_k.dim() == 2
b_size, len_q = seq_q.size()
b_size, len_k = seq_k.size()
seq_k.eq(-1).unsqueeze(1)
pad_attn_mask = pad_attn_mask.expand(b_size, len_q, len_k)
return pad_attn_mask
def get_attn_subsequent_mask(seq):
assert seq.dim() == 2
attn_shape = [seq.size(1), seq.size(1)]
subsequent_mask = torch.triu(torch.ones(attn_shape, device=device), diagonal=1).byte()
return subsequent_mask
class Decoder(nn.Module):
def __init__(self, n_layers, d_k, d_v, d_model, d_ff, n_heads,
max_seq_len, tgt_vocab_size, dropout=0.1, weighted=False):
super(Decoder, self).__init__()
self.d_model = d_model
self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model)
self.pos_emb = PosEncoding(max_seq_len * 10, d_model)
self.dropout_emb = nn.Dropout(dropout)
self.layer_type = DecoderLayer if not weighted else WeightedDecoderLayer
self.layers = nn.ModuleList(
[self.layer_type(d_k, d_v, d_model, d_ff, n_heads, dropout) for _ in range(n_layers)])
def forward(self, dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn=False):
dec_outputs = self.tgt_emb(dec_inputs)
dec_outputs += self.pos_emb(dec_inputs_len)
dec_outputs = self.dropout_emb(dec_outputs)
dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_inputs)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0)
if enc_inputs is not None:
dec_enc_attn_pad_mask = get_attn_pad_mask(dec_inputs, enc_inputs)
else:
dec_enc_attn_pad_mask = None
dec_self_attns, dec_enc_attns = [], []
for layer in self.layers:
dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs,
self_attn_mask=dec_self_attn_mask,
enc_attn_mask=dec_enc_attn_pad_mask)
if return_attn:
dec_self_attns.append(dec_self_attn)
dec_enc_attns.append(dec_enc_attn)
return dec_outputs, dec_self_attns, dec_enc_attns
class LMTransformer(nn.Module):
def __init__(self, n_layers, d_k, d_v, d_model, d_ff, n_heads,
max_tgt_seq_len, tgt_vocab_size, dropout,
weighted_model, share_proj_weight):
super(LMTransformer, self).__init__()
self.decoder = Decoder(n_layers, d_k, d_v, d_model, d_ff, n_heads,
max_tgt_seq_len, tgt_vocab_size, dropout, weighted_model)
self.tgt_proj = Linear(d_model, tgt_vocab_size, bias=False)
self.weighted_model = weighted_model
if share_proj_weight:
print('Sharing target embedding and projection..')
self.tgt_proj.weight = self.decoder.tgt_emb.weight
def trainable_params(self):
params = filter(lambda p: p[1].requires_grad, self.named_parameters())
param_groups = []
base_params = {'params': [], 'type': 'base'}
weighted_params = {'params': [], 'type': 'weighted'}
for name, param in params:
if 'w_kp' in name or 'w_a' in name:
weighted_params['params'].append(param)
else:
base_params['params'].append(param)
param_groups.append(base_params)
param_groups.append(weighted_params)
return param_groups
def decode(self, dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn=False):
return self.decoder(dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn)
def forward(self, dec_inputs, dec_inputs_len, return_attn=False):
dec_outputs, dec_self_attns, _ = \
self.decoder(dec_inputs, dec_inputs_len, None, None, return_attn)
dec_logits = self.tgt_proj(dec_outputs)
return dec_logits.view(-1, dec_logits.size(-1)), dec_self_attns
def proj_grad(self):
if self.weighted_model:
for name, param in self.named_parameters():
if 'w_kp' in name or 'w_a' in name:
param.data = proj_prob_simplex(param.data)
else:
pass
| true | true |
1c2ee79c50e5332807a24a1c5c70089c0090c76c | 91 | py | Python | loadCSVdata.py | christostsekouronas/academyposttestanalysis | 913a0c13ad0482927a323b2fb3a97a8e2ca26517 | [
"MIT"
] | null | null | null | loadCSVdata.py | christostsekouronas/academyposttestanalysis | 913a0c13ad0482927a323b2fb3a97a8e2ca26517 | [
"MIT"
] | null | null | null | loadCSVdata.py | christostsekouronas/academyposttestanalysis | 913a0c13ad0482927a323b2fb3a97a8e2ca26517 | [
"MIT"
] | null | null | null | import pandas as pd
def loadTest(filepath):
df = pd.read_csv(filepath)
return df | 13 | 30 | 0.692308 | import pandas as pd
def loadTest(filepath):
df = pd.read_csv(filepath)
return df | true | true |
1c2ee820ca300e6484368fbe2c84cdbd5011a1de | 16,352 | py | Python | socrata/sources.py | sbuss/socrata-py | cc909cf988cb027c75c948261ef622c1e7d93f89 | [
"Apache-2.0"
] | null | null | null | socrata/sources.py | sbuss/socrata-py | cc909cf988cb027c75c948261ef622c1e7d93f89 | [
"Apache-2.0"
] | null | null | null | socrata/sources.py | sbuss/socrata-py | cc909cf988cb027c75c948261ef622c1e7d93f89 | [
"Apache-2.0"
] | null | null | null | import json
import io
import webbrowser
import types
from time import sleep
from socrata.http import post, put, patch, get, noop, UnexpectedResponseException
from socrata.resource import Resource, Collection, ChildResourceSpec
from socrata.input_schema import InputSchema
from socrata.builders.parse_options import ParseOptionBuilder
from socrata.lazy_pool import LazyThreadPoolExecutor
from threading import Lock
from urllib3.exceptions import NewConnectionError
class Sources(Collection):
def path(self):
return 'https://{domain}/api/publishing/v1/source'.format(
domain = self.auth.domain
)
def lookup(self, source_id):
"""
Lookup a source
Args:
```
source_id (int): The id
```
Returns:
```
Source: Returns the new Source The Source resulting from this API call, or an error
```
"""
return self._subresource(Source, get(
self.path() + '/' + str(source_id),
auth = self.auth
))
def create_upload(self, filename):
"""
Create a new source. Takes a `body` param, which must contain a `filename`
of the file.
Args:
```
filename (str): The name of the file you are uploading
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
upload = revision.create_upload('foo.csv')
```
"""
return self._subresource(Source, post(
self.path(),
auth = self.auth,
data = json.dumps({
'source_type' : {
'type': 'upload',
'filename': filename
}
})
))
class ChunkIterator(object):
def __init__(self, filelike, chunk_size):
self._filelike = filelike
self.lock = Lock()
self._chunk_size = chunk_size
self.seq_num = 0
self.byte_offset = 0
def __iter__(self):
return self
def __next__(self):
with self.lock:
read = self._filelike.read(self._chunk_size)
if not read:
raise StopIteration
this_seq = self.seq_num
this_byte_offset = self.byte_offset
self.seq_num = self.seq_num + 1
self.byte_offset = self.byte_offset + len(read)
return (this_seq, this_byte_offset, self.byte_offset, read)
def next(self):
return self.__next__()
class FileLikeGenerator(object):
def __init__(self, gen):
self.gen = gen
self.done = False
def read(self, how_much):
if self.done:
return None
buf = []
consumed = 0
while consumed < how_much:
try:
chunk = next(self.gen)
consumed += len(chunk)
buf.append(chunk)
except StopIteration:
self.done = True
break
return b''.join(buf)
class Source(Resource, ParseOptionBuilder):
def initiate(self, uri, content_type):
return post(
self.path(uri),
auth = self.auth,
data = json.dumps({ 'content_type': content_type })
)
def chunk(self, uri, seq_num, byte_offset, bytes):
return post(
self.path(uri).format(seq_num=seq_num, byte_offset=byte_offset),
auth = self.auth,
data = bytes,
headers = { 'content-type': 'application/octet-stream' }
)
def commit(self, uri, seq_num, byte_offset):
return post(
self.path(uri).format(seq_num=seq_num, byte_offset=byte_offset),
auth = self.auth
)
def _chunked_bytes(self, file_or_string_or_bytes_or_generator, content_type, **kwargs):
if type(file_or_string_or_bytes_or_generator) is str:
file_handle = io.StringIO(file_or_string_or_bytes_or_generator)
elif type(file_or_string_or_bytes_or_generator) is bytes:
file_handle = io.BytesIO(file_or_string_or_bytes_or_generator)
elif isinstance(file_or_string_or_bytes_or_generator, types.GeneratorType):
file_handle = FileLikeGenerator(file_or_string_or_bytes_or_generator)
elif hasattr(file_or_string_or_bytes_or_generator, 'read'):
file_handle = file_or_string_or_bytes_or_generator
else:
raise ValueError("The thing to upload must be a file, string, bytes, or generator which yields bytes")
init = self.initiate(content_type)
chunk_size = init['preferred_chunk_size']
parallelism = init['preferred_upload_parallelism']
max_retries = kwargs.get('max_retries', 5)
backoff_seconds = kwargs.get('backoff_seconds', 2)
def sendit(chunk, attempts = 0):
(seq_num, byte_offset, end_byte_offset, bytes) = chunk
try:
self.chunk(seq_num, byte_offset, bytes)
except NewConnectionError as e:
return retry(chunk, e, attempts)
except UnexpectedResponseException as e:
if e.status in [500, 502]:
return retry(chunk, e, attempts)
else:
raise e
return (seq_num, byte_offset, end_byte_offset)
def retry(chunk, e, attempts):
if attempts < max_retries:
attempts = attempts + 1
sleep(attempts * attempts * backoff_seconds)
return sendit(chunk, attempts)
else:
raise e
pool = LazyThreadPoolExecutor(parallelism)
results = [r for r in pool.map(sendit, ChunkIterator(file_handle, chunk_size))]
(seq_num, byte_offset, end_byte_offset) = sorted(results, key=lambda x: x[0])[-1]
self.commit(seq_num, end_byte_offset)
return self.show()
"""
Uploads bytes into the source. Requires content_type argument
be set correctly for the file handle. It's advised you don't
use this method directly, instead use one of the csv, xls, xlsx,
or tsv methods which will correctly set the content_type for you.
"""
def bytes(self, uri, file_handle, content_type, **kwargs):
# This is just for backwards compat
self._chunked_bytes(file_handle, content_type, **kwargs)
def load(self, uri = None):
"""
Forces the source to load, if it's a view source.
Returns:
```
Source: Returns the new Source
```
"""
return self._mutate(put(
self.path(uri or (self.links['show'] + "/load")),
auth = self.auth,
data = {},
headers = {
'content-type': 'application/json'
}
))
def child_specs(self):
return [
ChildResourceSpec(
self,
'input_schemas',
'input_schema_links',
'schemas',
InputSchema,
'input_schema_id'
)
]
def blob(self, file_handle, **kwargs):
"""
Uploads a Blob dataset. A blob is a file that will not be parsed as a data file,
ie: an image, video, etc.
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-blob.jpg', 'rb') as f:
upload = upload.blob(f)
```
"""
source = self
if self.attributes['parse_options']['parse_source']:
source = self.change_parse_option('parse_source').to(False).run()
return source._chunked_bytes(file_handle, "application/octet-stream", **kwargs)
def csv(self, file_handle, **kwargs):
"""
Upload a CSV, returns the new input schema.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-file.csv', 'rb') as f:
upload = upload.csv(f)
```
"""
return self._chunked_bytes(file_handle, "text/csv", **kwargs)
def xls(self, file_handle, **kwargs):
"""
Upload an XLS, returns the new input schema
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-file.xls', 'rb') as f:
upload = upload.xls(f)
```
"""
return self._chunked_bytes(file_handle, "application/vnd.ms-excel", **kwargs)
def xlsx(self, file_handle, **kwargs):
"""
Upload an XLSX, returns the new input schema.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-file.xlsx', 'rb') as f:
upload = upload.xlsx(f)
```
"""
return self._chunked_bytes(file_handle, "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", **kwargs)
def tsv(self, file_handle, **kwargs):
"""
Upload a TSV, returns the new input schema.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-file.tsv', 'rb') as f:
upload = upload.tsv(f)
```
"""
return self._chunked_bytes(file_handle, "text/tab-separated-values", **kwargs)
def shapefile(self, file_handle, **kwargs):
"""
Upload a Shapefile, returns the new input schema.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-shapefile-archive.zip', 'rb') as f:
upload = upload.shapefile(f)
```
"""
return self._chunked_bytes(file_handle, "application/zip", **kwargs)
def kml(self, file_handle, **kwargs):
"""
Upload a KML file, returns the new input schema.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-kml-file.kml', 'rb') as f:
upload = upload.kml(f)
```
"""
return self._chunked_bytes(file_handle, "application/vnd.google-earth.kml+xml", **kwargs)
def geojson(self, file_handle, **kwargs):
"""
Upload a geojson file, returns the new input schema.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
with open('my-geojson-file.geojson', 'rb') as f:
upload = upload.geojson(f)
```
"""
return self._chunked_bytes(file_handle, "application/vnd.geo+json", **kwargs)
def df(self, dataframe, **kwargs):
"""
Upload a pandas DataFrame, returns the new source.
Args:
```
file_handle: The file handle, as returned by the python function `open()`
max_retries (integer): Optional retry limit per chunk in the upload. Defaults to 5.
backoff_seconds (integer): Optional amount of time to backoff upon a chunk upload failure. Defaults to 2.
```
Returns:
```
Source: Returns the new Source
```
Examples:
```python
import pandas
df = pandas.read_csv('test/fixtures/simple.csv')
upload = upload.df(df)
```
"""
s = io.StringIO()
dataframe.to_csv(s, index=False)
return self._chunked_bytes(bytes(s.getvalue().encode()),"text/csv", **kwargs)
def add_to_revision(self, uri, revision):
"""
Associate this Source with the given revision.
"""
return self._clone(patch(
self.path(uri),
auth = self.auth,
data = json.dumps({
'revision': {
'fourfour': revision.attributes['fourfour'],
'revision_seq': revision.attributes['revision_seq']
}
})
))
def update(self, uri, body):
return self._clone(post(
self.path(uri),
auth = self.auth,
data = json.dumps(body)
))
def show_input_schema(self, uri, input_schema_id):
res = get(
self.path(uri.format(input_schema_id = input_schema_id)),
auth = self.auth
)
return self._subresource(InputSchema, res)
def get_latest_input_schema(self):
return max(self.input_schemas, key = lambda s: s.attributes['id'])
def wait_for_finish(self, progress = noop, timeout = None, sleeptime = 1):
"""
Wait for this dataset to finish transforming and validating. Accepts a progress function
and a timeout.
"""
return self._wait_for_finish(
is_finished = lambda m: m.attributes['finished_at'],
is_failed = lambda m: m.attributes['failed_at'],
progress = progress,
timeout = timeout,
sleeptime = sleeptime
)
def ui_url(self):
"""
This is the URL to the landing page in the UI for the sources
Returns:
```
url (str): URL you can paste into a browser to view the source UI
```
"""
if not self.parent:
raise NotImplementedError("UI for revisionless sources is not implemented (yet). Sorry!")
revision = self.parent
return revision.ui_url() + '/sources/{source_id}/preview'.format(
source_id = self.attributes['id']
)
def open_in_browser(self):
"""
Open this source in your browser, this will open a window
"""
webbrowser.open(self.ui_url(), new = 2)
| 30.621723 | 126 | 0.561705 | import json
import io
import webbrowser
import types
from time import sleep
from socrata.http import post, put, patch, get, noop, UnexpectedResponseException
from socrata.resource import Resource, Collection, ChildResourceSpec
from socrata.input_schema import InputSchema
from socrata.builders.parse_options import ParseOptionBuilder
from socrata.lazy_pool import LazyThreadPoolExecutor
from threading import Lock
from urllib3.exceptions import NewConnectionError
class Sources(Collection):
def path(self):
return 'https://{domain}/api/publishing/v1/source'.format(
domain = self.auth.domain
)
def lookup(self, source_id):
return self._subresource(Source, get(
self.path() + '/' + str(source_id),
auth = self.auth
))
def create_upload(self, filename):
return self._subresource(Source, post(
self.path(),
auth = self.auth,
data = json.dumps({
'source_type' : {
'type': 'upload',
'filename': filename
}
})
))
class ChunkIterator(object):
def __init__(self, filelike, chunk_size):
self._filelike = filelike
self.lock = Lock()
self._chunk_size = chunk_size
self.seq_num = 0
self.byte_offset = 0
def __iter__(self):
return self
def __next__(self):
with self.lock:
read = self._filelike.read(self._chunk_size)
if not read:
raise StopIteration
this_seq = self.seq_num
this_byte_offset = self.byte_offset
self.seq_num = self.seq_num + 1
self.byte_offset = self.byte_offset + len(read)
return (this_seq, this_byte_offset, self.byte_offset, read)
def next(self):
return self.__next__()
class FileLikeGenerator(object):
def __init__(self, gen):
self.gen = gen
self.done = False
def read(self, how_much):
if self.done:
return None
buf = []
consumed = 0
while consumed < how_much:
try:
chunk = next(self.gen)
consumed += len(chunk)
buf.append(chunk)
except StopIteration:
self.done = True
break
return b''.join(buf)
class Source(Resource, ParseOptionBuilder):
def initiate(self, uri, content_type):
return post(
self.path(uri),
auth = self.auth,
data = json.dumps({ 'content_type': content_type })
)
def chunk(self, uri, seq_num, byte_offset, bytes):
return post(
self.path(uri).format(seq_num=seq_num, byte_offset=byte_offset),
auth = self.auth,
data = bytes,
headers = { 'content-type': 'application/octet-stream' }
)
def commit(self, uri, seq_num, byte_offset):
return post(
self.path(uri).format(seq_num=seq_num, byte_offset=byte_offset),
auth = self.auth
)
def _chunked_bytes(self, file_or_string_or_bytes_or_generator, content_type, **kwargs):
if type(file_or_string_or_bytes_or_generator) is str:
file_handle = io.StringIO(file_or_string_or_bytes_or_generator)
elif type(file_or_string_or_bytes_or_generator) is bytes:
file_handle = io.BytesIO(file_or_string_or_bytes_or_generator)
elif isinstance(file_or_string_or_bytes_or_generator, types.GeneratorType):
file_handle = FileLikeGenerator(file_or_string_or_bytes_or_generator)
elif hasattr(file_or_string_or_bytes_or_generator, 'read'):
file_handle = file_or_string_or_bytes_or_generator
else:
raise ValueError("The thing to upload must be a file, string, bytes, or generator which yields bytes")
init = self.initiate(content_type)
chunk_size = init['preferred_chunk_size']
parallelism = init['preferred_upload_parallelism']
max_retries = kwargs.get('max_retries', 5)
backoff_seconds = kwargs.get('backoff_seconds', 2)
def sendit(chunk, attempts = 0):
(seq_num, byte_offset, end_byte_offset, bytes) = chunk
try:
self.chunk(seq_num, byte_offset, bytes)
except NewConnectionError as e:
return retry(chunk, e, attempts)
except UnexpectedResponseException as e:
if e.status in [500, 502]:
return retry(chunk, e, attempts)
else:
raise e
return (seq_num, byte_offset, end_byte_offset)
def retry(chunk, e, attempts):
if attempts < max_retries:
attempts = attempts + 1
sleep(attempts * attempts * backoff_seconds)
return sendit(chunk, attempts)
else:
raise e
pool = LazyThreadPoolExecutor(parallelism)
results = [r for r in pool.map(sendit, ChunkIterator(file_handle, chunk_size))]
(seq_num, byte_offset, end_byte_offset) = sorted(results, key=lambda x: x[0])[-1]
self.commit(seq_num, end_byte_offset)
return self.show()
def bytes(self, uri, file_handle, content_type, **kwargs):
self._chunked_bytes(file_handle, content_type, **kwargs)
def load(self, uri = None):
return self._mutate(put(
self.path(uri or (self.links['show'] + "/load")),
auth = self.auth,
data = {},
headers = {
'content-type': 'application/json'
}
))
def child_specs(self):
return [
ChildResourceSpec(
self,
'input_schemas',
'input_schema_links',
'schemas',
InputSchema,
'input_schema_id'
)
]
def blob(self, file_handle, **kwargs):
source = self
if self.attributes['parse_options']['parse_source']:
source = self.change_parse_option('parse_source').to(False).run()
return source._chunked_bytes(file_handle, "application/octet-stream", **kwargs)
def csv(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "text/csv", **kwargs)
def xls(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "application/vnd.ms-excel", **kwargs)
def xlsx(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", **kwargs)
def tsv(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "text/tab-separated-values", **kwargs)
def shapefile(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "application/zip", **kwargs)
def kml(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "application/vnd.google-earth.kml+xml", **kwargs)
def geojson(self, file_handle, **kwargs):
return self._chunked_bytes(file_handle, "application/vnd.geo+json", **kwargs)
def df(self, dataframe, **kwargs):
s = io.StringIO()
dataframe.to_csv(s, index=False)
return self._chunked_bytes(bytes(s.getvalue().encode()),"text/csv", **kwargs)
def add_to_revision(self, uri, revision):
return self._clone(patch(
self.path(uri),
auth = self.auth,
data = json.dumps({
'revision': {
'fourfour': revision.attributes['fourfour'],
'revision_seq': revision.attributes['revision_seq']
}
})
))
def update(self, uri, body):
return self._clone(post(
self.path(uri),
auth = self.auth,
data = json.dumps(body)
))
def show_input_schema(self, uri, input_schema_id):
res = get(
self.path(uri.format(input_schema_id = input_schema_id)),
auth = self.auth
)
return self._subresource(InputSchema, res)
def get_latest_input_schema(self):
return max(self.input_schemas, key = lambda s: s.attributes['id'])
def wait_for_finish(self, progress = noop, timeout = None, sleeptime = 1):
return self._wait_for_finish(
is_finished = lambda m: m.attributes['finished_at'],
is_failed = lambda m: m.attributes['failed_at'],
progress = progress,
timeout = timeout,
sleeptime = sleeptime
)
def ui_url(self):
if not self.parent:
raise NotImplementedError("UI for revisionless sources is not implemented (yet). Sorry!")
revision = self.parent
return revision.ui_url() + '/sources/{source_id}/preview'.format(
source_id = self.attributes['id']
)
def open_in_browser(self):
webbrowser.open(self.ui_url(), new = 2)
| true | true |
1c2ee82a4987c9c1f21543e89621500bb82375ac | 1,686 | py | Python | pycom_wpa2enterprise/pycom_eduroam/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
] | null | null | null | pycom_wpa2enterprise/pycom_eduroam/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
] | null | null | null | pycom_wpa2enterprise/pycom_eduroam/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
] | null | null | null | import machine
import ubinascii
from network import WLAN
from time import sleep
import socket
# import urllib.urequest
# Network
SSID = 'eduroam' # Network Name
User = 'fablab@cardiffmet.ac.uk'
Password = 'Fa6La6!' # Network password
deviceID = 'LoPy4Test'
certPath = '/flash/cert/pfencehaca.cer'
# WiFi init:
station = WLAN(mode=WLAN.STA)
def WiFiConnect():
# Connect
station.connect(ssid=SSID, auth=(WLAN.WPA2_ENT, User, Password), identity=deviceID, certfile=certPath)
# Wait for connection
print("connecting...")
while not station.isconnected():
print("...")
sleep(5)
print("Connected!\n")
WiFiConnect()
def http_get(url):
_, _, host, path = url.split('/', 3)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
print(str(data, 'utf8'), end='')
else:
break
s.close()
def socketTest():
addr_info = socket.getaddrinfo("towel.blinkenlights.nl", 23)
addr = addr_info[0][-1]
s = socket.socket()
s.connect(addr)
data = s.recv(500)
print(str(data, 'utf8'), end='')
while True:
print('ip address, netmask, gateway, DNS:')
print(station.ifconfig()) # reveal the devices ip address
print('')
print('Device MAC = ', ubinascii.hexlify(machine.unique_id(),':').decode())
print('')
sleep(3)
#try:
#socketTest()
http_get('http://micropython.org/ks/test.html')
#contents = urllib.urequest.urlopen("http://google.com")
#contents.close()
sleep(10)
| 24.085714 | 106 | 0.619217 | import machine
import ubinascii
from network import WLAN
from time import sleep
import socket
SSID = 'eduroam'
User = 'fablab@cardiffmet.ac.uk'
Password = 'Fa6La6!'
deviceID = 'LoPy4Test'
certPath = '/flash/cert/pfencehaca.cer'
station = WLAN(mode=WLAN.STA)
def WiFiConnect():
station.connect(ssid=SSID, auth=(WLAN.WPA2_ENT, User, Password), identity=deviceID, certfile=certPath)
print("connecting...")
while not station.isconnected():
print("...")
sleep(5)
print("Connected!\n")
WiFiConnect()
def http_get(url):
_, _, host, path = url.split('/', 3)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
print(str(data, 'utf8'), end='')
else:
break
s.close()
def socketTest():
addr_info = socket.getaddrinfo("towel.blinkenlights.nl", 23)
addr = addr_info[0][-1]
s = socket.socket()
s.connect(addr)
data = s.recv(500)
print(str(data, 'utf8'), end='')
while True:
print('ip address, netmask, gateway, DNS:')
print(station.ifconfig())
print('')
print('Device MAC = ', ubinascii.hexlify(machine.unique_id(),':').decode())
print('')
sleep(3)
http_get('http://micropython.org/ks/test.html')
sleep(10)
| true | true |
1c2ee9c5427419198923611955b780899e8542f4 | 657 | py | Python | appengine_config.py | dhermes/google-auth-on-gae | c1679def9f045761364c878259b6f269f361db21 | [
"Apache-2.0"
] | null | null | null | appengine_config.py | dhermes/google-auth-on-gae | c1679def9f045761364c878259b6f269f361db21 | [
"Apache-2.0"
] | null | null | null | appengine_config.py | dhermes/google-auth-on-gae | c1679def9f045761364c878259b6f269f361db21 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import vendor
vendor.add('lib')
| 34.578947 | 74 | 0.762557 |
from google.appengine.ext import vendor
vendor.add('lib')
| true | true |
1c2eeb7cba3beb1dfcbc46aa03ae33a079d25fe3 | 26,780 | py | Python | atomate/vasp/workflows/tests/test_vasp_workflows.py | rwoodsrobinson/atomate | 231566fd16e0b89637efc60ad2bf35417f03164a | [
"BSD-3-Clause-LBNL"
] | 3 | 2021-08-02T09:19:20.000Z | 2022-03-28T17:37:47.000Z | atomate/vasp/workflows/tests/test_vasp_workflows.py | rwoodsrobinson/atomate | 231566fd16e0b89637efc60ad2bf35417f03164a | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/vasp/workflows/tests/test_vasp_workflows.py | rwoodsrobinson/atomate | 231566fd16e0b89637efc60ad2bf35417f03164a | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-07-27T06:12:56.000Z | 2021-07-27T06:12:56.000Z | # coding: utf-8
import json
import os
import unittest
import zlib
import gridfs
from pymongo import DESCENDING
from fireworks import FWorker
from fireworks.core.rocket_launcher import rapidfire
from atomate.vasp.powerups import use_custodian, add_namefile, use_fake_vasp, add_trackers, add_bandgap_check, use_potcar_spec
from atomate.vasp.workflows.base.core import get_wf
from atomate.utils.testing import AtomateTest
from atomate.vasp.firetasks.parse_outputs import VaspDrone
from atomate.vasp.database import VaspCalcDb
from pymatgen.io.vasp import Incar
from pymatgen.io.vasp.sets import MPRelaxSet, MPStaticSet, MPScanRelaxSet
from pymatgen.util.testing import PymatgenTest
from pymatgen.core import Structure
__author__ = 'Anubhav Jain, Kiran Mathew'
__email__ = 'ajain@lbl.gov, kmathew@lbl.gov'
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(module_dir, "..", "..", "..", "common", "test_files")
reference_dir = os.path.join(module_dir, "..", "..", "test_files")
ref_dirs_si = {"structure optimization": os.path.join(reference_dir, "Si_structure_optimization"),
"static": os.path.join(reference_dir, "Si_static"),
"nscf uniform": os.path.join(reference_dir, "Si_nscf_uniform"),
"nscf line": os.path.join(reference_dir, "Si_nscf_line")}
_fworker = FWorker(env={"db_file": os.path.join(db_dir, "db.json")})
DEBUG_MODE = False # If true, retains the database and output dirs at the end of the test
VASP_CMD = None # If None, runs a "fake" VASP. Otherwise, runs VASP with this command...
class TestVaspWorkflows(AtomateTest):
def setUp(self):
super(TestVaspWorkflows, self).setUp()
self.struct_si = PymatgenTest.get_structure("Si")
def _check_run(self, d, mode):
if mode not in ["structure optimization", "static", "nscf uniform",
"nscf line", "additional field"]:
raise ValueError("Invalid mode!")
self.assertEqual(d["formula_pretty"], "Si")
self.assertEqual(d["formula_anonymous"], "A")
self.assertEqual(d["nelements"], 1)
self.assertEqual(d["state"], "successful")
self.assertAlmostEqual(d["calcs_reversed"][0]["output"]["structure"]["lattice"]["a"], 3.867, 2)
self.assertEqual(d["output"]["is_gap_direct"], False)
if mode in ["structure optimization", "static"]:
self.assertAlmostEqual(d["output"]["energy"], -10.850, 2)
self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.425, 2)
if mode == "additional field":
self.assertAlmostEqual(d["test_additional_field"]["lattice"]["a"], 3.8401979337)
elif mode in ["ncsf uniform"]:
self.assertAlmostEqual(d["output"]["energy"], -10.828, 2)
self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.414, 2)
self.assertAlmostEqual(d["output"]["bandgap"], 0.65, 1)
if "nscf" in mode:
self.assertEqual(d["calcs_reversed"][0]["output"]["outcar"]["total_magnetization"], None)
else:
self.assertAlmostEqual(d["calcs_reversed"][0]["output"]["outcar"]["total_magnetization"], 0, 3)
self.assertLess(d["run_stats"]["overall"]["Elapsed time (sec)"], 180) # run should take under 3 minutes
# check the DOS and band structure
if mode == "nscf uniform" or mode == "nscf line":
fs = gridfs.GridFS(self.get_task_database(), 'bandstructure_fs')
# check the band structure
bs_fs_id = d["calcs_reversed"][0]["bandstructure_fs_id"]
bs_json = zlib.decompress(fs.get(bs_fs_id).read())
bs = json.loads(bs_json.decode())
self.assertEqual(bs["is_spin_polarized"], False)
self.assertEqual(bs["band_gap"]["direct"], False)
self.assertAlmostEqual(bs["band_gap"]["energy"], 0.65, 1)
self.assertEqual(bs["is_metal"], False)
if mode == "nscf uniform":
for k in ["is_spin_polarized", "band_gap", "structure",
"kpoints", "is_metal", "vbm", "cbm", "labels_dict",
"projections", "lattice_rec", "bands"]:
self.assertTrue(k in bs)
self.assertIsNotNone(bs[k])
self.assertEqual(bs["@class"], "BandStructure")
else:
for k in ["is_spin_polarized", "band_gap", "structure",
"kpoints", "is_metal", "vbm", "cbm", "labels_dict",
"projections", "lattice_rec", "bands", "branches"]:
self.assertTrue(k in bs)
self.assertIsNotNone(bs[k])
self.assertEqual(bs["@class"], "BandStructureSymmLine")
# check the DOS
if mode == "nscf uniform":
fs = gridfs.GridFS(self.get_task_database(), 'dos_fs')
dos_fs_id = d["calcs_reversed"][0]["dos_fs_id"]
dos_json = zlib.decompress(fs.get(dos_fs_id).read())
dos = json.loads(dos_json.decode())
for k in ["densities", "energies", "pdos", "spd_dos", "atom_dos", "structure"]:
self.assertTrue(k in dos)
self.assertIsNotNone(dos[k])
self.assertAlmostEqual(dos["spd_dos"]["p"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["atom_dos"]["Si"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["structure"]["lattice"]["a"], 3.867, 2)
self.assertAlmostEqual(dos["spd_dos"]["p"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["atom_dos"]["Si"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["structure"]["lattice"]["a"], 3.867, 2)
def test_single_Vasp(self):
# add the workflow
structure = self.struct_si
my_wf = get_wf(structure, "optimize_only.yaml", vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
self.lp.add_wf(my_wf)
# run the workflow
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one({"task_label": "structure optimization"})
self._check_run(d, mode="structure optimization")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_single_Vasp_dbinsertion(self):
# add the workflow
structure = self.struct_si
# instructs to use db_file set by FWorker, see env_chk
my_wf = get_wf(structure, "optimize_only.yaml", vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
# add an msonable object to additional fields
my_wf.fws[0].tasks[-1]['additional_fields'].update(
{"test_additional_field": self.struct_si})
self.lp.add_wf(my_wf)
# run the workflow
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one()
self._check_run(d, mode="structure optimization")
self._check_run(d, mode="additional field")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_bandstructure_Vasp(self):
# add the workflow
structure = self.struct_si
# instructs to use db_file set by FWorker, see env_chk
my_wf = get_wf(structure, "bandstructure.yaml",
vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf = add_namefile(my_wf) # add a slug of fw-name to output files
self.lp.add_wf(my_wf)
# run the workflow
# set the db_file variable
rapidfire(self.lp, fworker=_fworker)
# make sure the structure relaxation ran OK
d = self.get_task_collection().find_one({"task_label": "structure optimization"},
sort=[("_id", DESCENDING)])
self._check_run(d, mode="structure optimization")
# make sure the static run ran OK
d = self.get_task_collection().find_one({"task_label": "static"}, sort=[("_id", DESCENDING)])
self._check_run(d, mode="static")
# make sure the uniform run ran OK
d = self.get_task_collection().find_one({"task_label": "nscf uniform"}, sort=[("_id", DESCENDING)])
self._check_run(d, mode="nscf uniform")
# make sure the uniform run ran OK
d = self.get_task_collection().find_one({"task_label": "nscf line"}, sort=[("_id", DESCENDING)])
self._check_run(d, mode="nscf line")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_bandgap_check_Vasp(self):
# add the workflow
structure = self.struct_si
# instructs to use db_file set by FWorker, see env_chk
my_wf = get_wf(structure, "bandstructure.yaml",
vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf = add_namefile(my_wf) # add a slug of fw-name to output files
my_wf = add_bandgap_check(my_wf, check_bandgap_params={"max_gap": 0.1}, fw_name_constraint="structure optimization")
self.lp.add_wf(my_wf)
# run the workflow
# set the db_file variable
rapidfire(self.lp, fworker=_fworker)
# structure optimization should be completed
self.assertEqual(self.lp.fireworks.find_one(
{"name": "Si-structure optimization"}, {"state": 1})["state"],
"COMPLETED")
self.assertEqual(self.lp.fireworks.find_one(
{"name": "Si-static"}, {"state": 1})["state"],
"DEFUSED")
def test_trackers(self):
# add the workflow
structure = self.struct_si
my_wf = get_wf(structure, "optimize_only.yaml",
vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf = add_trackers(my_wf)
self.lp.add_wf(my_wf)
# run the workflow
rapidfire(self.lp, fworker=_fworker)
for x in self.lp.get_tracker_data(1):
for t in x["trackers"]:
self.assertGreater(len(t.content.split("\n")), 20)
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_chgcar_db_read_write(self):
# generate a doc from the test folder
drone = VaspDrone(parse_chgcar=True, parse_aeccar=True)
print(ref_dirs_si['static'])
doc = drone.assimilate(ref_dirs_si['static']+'/outputs')
# insert the doc make sure that the
cc = doc['calcs_reversed'][0]['chgcar']
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 8.0, 4)
cc = doc['calcs_reversed'][0]['aeccar0']
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 23.253588293583313, 4)
cc = doc['calcs_reversed'][0]['aeccar2']
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 8.01314480789829, 4)
mmdb = VaspCalcDb.from_db_file(os.path.join(db_dir, "db.json"))
t_id = mmdb.insert_task(doc, use_gridfs=True)
# space is freed up after uploading the document
self.assertRaises(KeyError, lambda: doc['calcs_reversed'][0]['chgcar'])
self.assertRaises(KeyError, lambda: doc['calcs_reversed'][0]['aeccar0'])
self.assertRaises(KeyError, lambda: doc['calcs_reversed'][0]['aeccar2'])
cc = mmdb.get_chgcar(task_id=t_id)
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 8.0, 4)
dcc = mmdb.get_aeccar(task_id=t_id)
self.assertAlmostEqual(dcc['aeccar0'].data['total'].sum()/cc.ngridpts, 23.253588293583313, 4)
self.assertAlmostEqual(dcc['aeccar2'].data['total'].sum()/cc.ngridpts, 8.01314480789829, 4)
# check the retrieve_task function for the same fake calculation
ret_task = mmdb.retrieve_task(t_id)
ret_chgcar = ret_task['calcs_reversed'][0]['chgcar']
ret_aeccar0 = ret_task['calcs_reversed'][0]['aeccar0']
ret_aeccar2 = ret_task['calcs_reversed'][0]['aeccar2']
ret_aeccar = ret_aeccar0 + ret_aeccar2
self.assertAlmostEqual(ret_chgcar.data['total'].sum()/ret_chgcar.ngridpts, 8.0, 4)
self.assertAlmostEqual(ret_aeccar.data['total'].sum()/ret_aeccar.ngridpts, 31.2667331015, 4)
def test_chgcar_db_read(self):
# add the workflow
structure = self.struct_si
# instructs to use db_file set by FWorker, see env_chk
my_wf = get_wf(structure, "static_only.yaml", vis=MPStaticSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
# set the flags for storing charge densties
my_wf.fws[0].tasks[-1]["parse_chgcar"] = True
my_wf.fws[0].tasks[-1]["parse_aeccar"] = True
self.lp.add_wf(my_wf)
# run the workflow
# set the db_file variable
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one()
self._check_run(d, mode="static")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
chgcar_fs_id = d["calcs_reversed"][0]["chgcar_fs_id"]
accar0_fs_id = d["calcs_reversed"][0]["aeccar0_fs_id"]
accar2_fs_id = d["calcs_reversed"][0]["aeccar2_fs_id"]
self.assertTrue(bool(chgcar_fs_id))
self.assertTrue(bool(accar0_fs_id))
self.assertTrue(bool(accar2_fs_id))
class TestScanOptimizeWorkflow(AtomateTest):
def setUp(self):
super(TestScanOptimizeWorkflow, self).setUp()
def _run_scan_relax(self, wf, dir_name):
if not VASP_CMD:
wf = use_fake_vasp(wf,
{"SCAN structure optimization": os.path.join(
reference_dir, dir_name)},
check_kpoints=False,
check_potcar=False,
clear_inputs=False,
check_incar=False
)
else:
wf = use_custodian(wf)
wf = use_potcar_spec(wf)
self.lp.add_wf(wf)
# run the workflow
rapidfire(self.lp, fworker=_fworker)
def _get_launch_dir(self):
# retrieve the launcher directory
d = list(self.get_task_collection().find({"task_label": "SCAN structure optimization"}))[-1]
launch_dir = d["dir_name"].split(":")[1]
return launch_dir
def test_SCAN_no_bandgap(self):
# A structure with bandgap = 0 (default) should have KSPACING equal to 0.22
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_Al/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_Al")
# Check INCAR.orig
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_Al/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM": # Ignore MAGMOM b/c structure initialized from POSCAR cannot have a MAGMOM
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
# Check INCAR.relax1
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
# Check INCAR.relax2
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
# Check INCAR.relax3
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertEqual(incar[p], 0.22)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_small_bandgap(self):
# A structure with a small bandgap (LiH) should result in a KSPACING
# value of 0.351275
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiH/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiH")
# Check INCAR.orig
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiH/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM": # Ignore MAGMOM b/c structure initialized from POSCAR cannot have a MAGMOM
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
# Check INCAR.relax1
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
# Check INCAR.relax2
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
# Check INCAR.relax3
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertAlmostEqual(incar[p], 0.351275, 4)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
elif p == "ISMEAR":
self.assertEqual(incar[p], -5)
elif p == "SIGMA":
self.assertEqual(incar[p], 0.05)
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_large_bandgap(self):
# A structure with a large bandgap (LiF) should result in KSPACING
# hitting the maximum allowed value of 0.44
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiF")
# Check INCAR.orig generated by the InputSet
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM": # Ignore MAGMOM b/c structure initialized from POSCAR cannot have a MAGMOM
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
# Check INCAR.relax1 generated by the Workflow
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
# Check INCAR.relax2
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
# Check INCAR.relax3 for the correct kspacing
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertEqual(incar[p], 0.44)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
elif p == "ISMEAR":
self.assertEqual(incar[p], -5)
elif p == "SIGMA":
self.assertEqual(incar[p], 0.05)
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_with_vdw(self):
# Verify appropriate changes to the INCAR when VdW is enabled
# VdW should be off for relax1 (GGA) and re-enabled for relax2 (SCAN)
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF_vdw/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure, vdw="rvv10"),
common_params={"vasp_cmd": VASP_CMD, "vdw_kernel_dir": os.path.join(reference_dir, "SCAN_structure_optimization_LiF_vdw/inputs")})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiF_vdw")
# Check INCAR.orig
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF_vdw/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM": # Ignore MAGMOM b/c structure initialized from POSCAR cannot have a MAGMOM
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
# Check INCAR.relax1
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertIsNone(incar.get("LUSE_VDW", None))
self.assertIsNone(incar.get("BPARAM", None))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
# Check INCAR.relax2
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
# Check INCAR.relax3
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertEqual(incar[p], 0.44)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
elif p == "ISMEAR":
self.assertEqual(incar[p], -5)
elif p == "SIGMA":
self.assertEqual(incar[p], 0.05)
elif p == "MAGMOM": # Ignore MAGMOM b/c structure initialized from POSCAR cannot have a MAGMOM
pass
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_incar_override(self):
# user incar settings should be passed all the way through the workflow
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiH/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml",
vis=MPScanRelaxSet(structure,
user_potcar_functional="PBE_52",
user_incar_settings={"NSW": 10, "SYMPREC": 1e-6, "SIGMA": 0.1}
),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiH")
# Check INCAR.orig
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
self.assertEqual(incar["NSW"], 10)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
# Check INCAR.relax1
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["NSW"], 10)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
# Check INCAR.relax2
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
# Check INCAR.relax3
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
self.assertEqual(incar["NSW"], 10)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
if __name__ == "__main__":
unittest.main()
| 43.973727 | 153 | 0.606983 |
import json
import os
import unittest
import zlib
import gridfs
from pymongo import DESCENDING
from fireworks import FWorker
from fireworks.core.rocket_launcher import rapidfire
from atomate.vasp.powerups import use_custodian, add_namefile, use_fake_vasp, add_trackers, add_bandgap_check, use_potcar_spec
from atomate.vasp.workflows.base.core import get_wf
from atomate.utils.testing import AtomateTest
from atomate.vasp.firetasks.parse_outputs import VaspDrone
from atomate.vasp.database import VaspCalcDb
from pymatgen.io.vasp import Incar
from pymatgen.io.vasp.sets import MPRelaxSet, MPStaticSet, MPScanRelaxSet
from pymatgen.util.testing import PymatgenTest
from pymatgen.core import Structure
__author__ = 'Anubhav Jain, Kiran Mathew'
__email__ = 'ajain@lbl.gov, kmathew@lbl.gov'
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(module_dir, "..", "..", "..", "common", "test_files")
reference_dir = os.path.join(module_dir, "..", "..", "test_files")
ref_dirs_si = {"structure optimization": os.path.join(reference_dir, "Si_structure_optimization"),
"static": os.path.join(reference_dir, "Si_static"),
"nscf uniform": os.path.join(reference_dir, "Si_nscf_uniform"),
"nscf line": os.path.join(reference_dir, "Si_nscf_line")}
_fworker = FWorker(env={"db_file": os.path.join(db_dir, "db.json")})
DEBUG_MODE = False
VASP_CMD = None
class TestVaspWorkflows(AtomateTest):
def setUp(self):
super(TestVaspWorkflows, self).setUp()
self.struct_si = PymatgenTest.get_structure("Si")
def _check_run(self, d, mode):
if mode not in ["structure optimization", "static", "nscf uniform",
"nscf line", "additional field"]:
raise ValueError("Invalid mode!")
self.assertEqual(d["formula_pretty"], "Si")
self.assertEqual(d["formula_anonymous"], "A")
self.assertEqual(d["nelements"], 1)
self.assertEqual(d["state"], "successful")
self.assertAlmostEqual(d["calcs_reversed"][0]["output"]["structure"]["lattice"]["a"], 3.867, 2)
self.assertEqual(d["output"]["is_gap_direct"], False)
if mode in ["structure optimization", "static"]:
self.assertAlmostEqual(d["output"]["energy"], -10.850, 2)
self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.425, 2)
if mode == "additional field":
self.assertAlmostEqual(d["test_additional_field"]["lattice"]["a"], 3.8401979337)
elif mode in ["ncsf uniform"]:
self.assertAlmostEqual(d["output"]["energy"], -10.828, 2)
self.assertAlmostEqual(d["output"]["energy_per_atom"], -5.414, 2)
self.assertAlmostEqual(d["output"]["bandgap"], 0.65, 1)
if "nscf" in mode:
self.assertEqual(d["calcs_reversed"][0]["output"]["outcar"]["total_magnetization"], None)
else:
self.assertAlmostEqual(d["calcs_reversed"][0]["output"]["outcar"]["total_magnetization"], 0, 3)
self.assertLess(d["run_stats"]["overall"]["Elapsed time (sec)"], 180)
if mode == "nscf uniform" or mode == "nscf line":
fs = gridfs.GridFS(self.get_task_database(), 'bandstructure_fs')
bs_fs_id = d["calcs_reversed"][0]["bandstructure_fs_id"]
bs_json = zlib.decompress(fs.get(bs_fs_id).read())
bs = json.loads(bs_json.decode())
self.assertEqual(bs["is_spin_polarized"], False)
self.assertEqual(bs["band_gap"]["direct"], False)
self.assertAlmostEqual(bs["band_gap"]["energy"], 0.65, 1)
self.assertEqual(bs["is_metal"], False)
if mode == "nscf uniform":
for k in ["is_spin_polarized", "band_gap", "structure",
"kpoints", "is_metal", "vbm", "cbm", "labels_dict",
"projections", "lattice_rec", "bands"]:
self.assertTrue(k in bs)
self.assertIsNotNone(bs[k])
self.assertEqual(bs["@class"], "BandStructure")
else:
for k in ["is_spin_polarized", "band_gap", "structure",
"kpoints", "is_metal", "vbm", "cbm", "labels_dict",
"projections", "lattice_rec", "bands", "branches"]:
self.assertTrue(k in bs)
self.assertIsNotNone(bs[k])
self.assertEqual(bs["@class"], "BandStructureSymmLine")
if mode == "nscf uniform":
fs = gridfs.GridFS(self.get_task_database(), 'dos_fs')
dos_fs_id = d["calcs_reversed"][0]["dos_fs_id"]
dos_json = zlib.decompress(fs.get(dos_fs_id).read())
dos = json.loads(dos_json.decode())
for k in ["densities", "energies", "pdos", "spd_dos", "atom_dos", "structure"]:
self.assertTrue(k in dos)
self.assertIsNotNone(dos[k])
self.assertAlmostEqual(dos["spd_dos"]["p"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["atom_dos"]["Si"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["structure"]["lattice"]["a"], 3.867, 2)
self.assertAlmostEqual(dos["spd_dos"]["p"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["atom_dos"]["Si"]["efermi"], 5.625, 1)
self.assertAlmostEqual(dos["structure"]["lattice"]["a"], 3.867, 2)
def test_single_Vasp(self):
structure = self.struct_si
my_wf = get_wf(structure, "optimize_only.yaml", vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
self.lp.add_wf(my_wf)
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one({"task_label": "structure optimization"})
self._check_run(d, mode="structure optimization")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_single_Vasp_dbinsertion(self):
structure = self.struct_si
my_wf = get_wf(structure, "optimize_only.yaml", vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf.fws[0].tasks[-1]['additional_fields'].update(
{"test_additional_field": self.struct_si})
self.lp.add_wf(my_wf)
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one()
self._check_run(d, mode="structure optimization")
self._check_run(d, mode="additional field")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_bandstructure_Vasp(self):
structure = self.struct_si
my_wf = get_wf(structure, "bandstructure.yaml",
vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf = add_namefile(my_wf)
self.lp.add_wf(my_wf)
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one({"task_label": "structure optimization"},
sort=[("_id", DESCENDING)])
self._check_run(d, mode="structure optimization")
d = self.get_task_collection().find_one({"task_label": "static"}, sort=[("_id", DESCENDING)])
self._check_run(d, mode="static")
d = self.get_task_collection().find_one({"task_label": "nscf uniform"}, sort=[("_id", DESCENDING)])
self._check_run(d, mode="nscf uniform")
d = self.get_task_collection().find_one({"task_label": "nscf line"}, sort=[("_id", DESCENDING)])
self._check_run(d, mode="nscf line")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_bandgap_check_Vasp(self):
structure = self.struct_si
my_wf = get_wf(structure, "bandstructure.yaml",
vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf = add_namefile(my_wf)
my_wf = add_bandgap_check(my_wf, check_bandgap_params={"max_gap": 0.1}, fw_name_constraint="structure optimization")
self.lp.add_wf(my_wf)
rapidfire(self.lp, fworker=_fworker)
self.assertEqual(self.lp.fireworks.find_one(
{"name": "Si-structure optimization"}, {"state": 1})["state"],
"COMPLETED")
self.assertEqual(self.lp.fireworks.find_one(
{"name": "Si-static"}, {"state": 1})["state"],
"DEFUSED")
def test_trackers(self):
structure = self.struct_si
my_wf = get_wf(structure, "optimize_only.yaml",
vis=MPRelaxSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf = add_trackers(my_wf)
self.lp.add_wf(my_wf)
rapidfire(self.lp, fworker=_fworker)
for x in self.lp.get_tracker_data(1):
for t in x["trackers"]:
self.assertGreater(len(t.content.split("\n")), 20)
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
def test_chgcar_db_read_write(self):
drone = VaspDrone(parse_chgcar=True, parse_aeccar=True)
print(ref_dirs_si['static'])
doc = drone.assimilate(ref_dirs_si['static']+'/outputs')
cc = doc['calcs_reversed'][0]['chgcar']
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 8.0, 4)
cc = doc['calcs_reversed'][0]['aeccar0']
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 23.253588293583313, 4)
cc = doc['calcs_reversed'][0]['aeccar2']
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 8.01314480789829, 4)
mmdb = VaspCalcDb.from_db_file(os.path.join(db_dir, "db.json"))
t_id = mmdb.insert_task(doc, use_gridfs=True)
self.assertRaises(KeyError, lambda: doc['calcs_reversed'][0]['chgcar'])
self.assertRaises(KeyError, lambda: doc['calcs_reversed'][0]['aeccar0'])
self.assertRaises(KeyError, lambda: doc['calcs_reversed'][0]['aeccar2'])
cc = mmdb.get_chgcar(task_id=t_id)
self.assertAlmostEqual(cc.data['total'].sum()/cc.ngridpts, 8.0, 4)
dcc = mmdb.get_aeccar(task_id=t_id)
self.assertAlmostEqual(dcc['aeccar0'].data['total'].sum()/cc.ngridpts, 23.253588293583313, 4)
self.assertAlmostEqual(dcc['aeccar2'].data['total'].sum()/cc.ngridpts, 8.01314480789829, 4)
ret_task = mmdb.retrieve_task(t_id)
ret_chgcar = ret_task['calcs_reversed'][0]['chgcar']
ret_aeccar0 = ret_task['calcs_reversed'][0]['aeccar0']
ret_aeccar2 = ret_task['calcs_reversed'][0]['aeccar2']
ret_aeccar = ret_aeccar0 + ret_aeccar2
self.assertAlmostEqual(ret_chgcar.data['total'].sum()/ret_chgcar.ngridpts, 8.0, 4)
self.assertAlmostEqual(ret_aeccar.data['total'].sum()/ret_aeccar.ngridpts, 31.2667331015, 4)
def test_chgcar_db_read(self):
structure = self.struct_si
my_wf = get_wf(structure, "static_only.yaml", vis=MPStaticSet(structure, force_gamma=True),
common_params={"vasp_cmd": VASP_CMD,
"db_file": ">>db_file<<"})
if not VASP_CMD:
my_wf = use_fake_vasp(my_wf, ref_dirs_si)
else:
my_wf = use_custodian(my_wf)
my_wf.fws[0].tasks[-1]["parse_chgcar"] = True
my_wf.fws[0].tasks[-1]["parse_aeccar"] = True
self.lp.add_wf(my_wf)
rapidfire(self.lp, fworker=_fworker)
d = self.get_task_collection().find_one()
self._check_run(d, mode="static")
wf = self.lp.get_wf_by_fw_id(1)
self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
chgcar_fs_id = d["calcs_reversed"][0]["chgcar_fs_id"]
accar0_fs_id = d["calcs_reversed"][0]["aeccar0_fs_id"]
accar2_fs_id = d["calcs_reversed"][0]["aeccar2_fs_id"]
self.assertTrue(bool(chgcar_fs_id))
self.assertTrue(bool(accar0_fs_id))
self.assertTrue(bool(accar2_fs_id))
class TestScanOptimizeWorkflow(AtomateTest):
def setUp(self):
super(TestScanOptimizeWorkflow, self).setUp()
def _run_scan_relax(self, wf, dir_name):
if not VASP_CMD:
wf = use_fake_vasp(wf,
{"SCAN structure optimization": os.path.join(
reference_dir, dir_name)},
check_kpoints=False,
check_potcar=False,
clear_inputs=False,
check_incar=False
)
else:
wf = use_custodian(wf)
wf = use_potcar_spec(wf)
self.lp.add_wf(wf)
rapidfire(self.lp, fworker=_fworker)
def _get_launch_dir(self):
d = list(self.get_task_collection().find({"task_label": "SCAN structure optimization"}))[-1]
launch_dir = d["dir_name"].split(":")[1]
return launch_dir
def test_SCAN_no_bandgap(self):
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_Al/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_Al")
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_Al/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM":
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertEqual(incar[p], 0.22)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_small_bandgap(self):
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiH/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiH")
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiH/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM":
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertAlmostEqual(incar[p], 0.351275, 4)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
elif p == "ISMEAR":
self.assertEqual(incar[p], -5)
elif p == "SIGMA":
self.assertEqual(incar[p], 0.05)
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_large_bandgap(self):
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiF")
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM":
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertEqual(incar[p], 0.44)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
elif p == "ISMEAR":
self.assertEqual(incar[p], -5)
elif p == "SIGMA":
self.assertEqual(incar[p], 0.05)
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_with_vdw(self):
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF_vdw/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml", vis=MPScanRelaxSet(structure, vdw="rvv10"),
common_params={"vasp_cmd": VASP_CMD, "vdw_kernel_dir": os.path.join(reference_dir, "SCAN_structure_optimization_LiF_vdw/inputs")})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiF_vdw")
incar_orig = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
ref_incar = Incar.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiF_vdw/inputs", "INCAR.orig"))
for p in incar_orig.keys():
if p == "MAGMOM":
pass
else:
self.assertEqual(incar_orig[p], ref_incar[p])
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertIsNone(incar.get("LUSE_VDW", None))
self.assertIsNone(incar.get("BPARAM", None))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["LWAVE"], False)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["METAGGA"], "None")
self.assertEqual(incar["LWAVE"], True)
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["EDIFFG"], -0.05)
self.assertEqual(incar["ICHARG"], 1)
self.assertEqual(incar["ISTART"], 0)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
for p in incar.keys():
if p == "KSPACING":
self.assertEqual(incar[p], 0.44)
elif p == "ICHARG" or p == "ISTART":
self.assertEqual(incar[p], 1)
elif p == "ISMEAR":
self.assertEqual(incar[p], -5)
elif p == "SIGMA":
self.assertEqual(incar[p], 0.05)
elif p == "MAGMOM":
pass
else:
self.assertEqual(incar_orig[p], incar[p])
def test_SCAN_incar_override(self):
structure = Structure.from_file(os.path.join(reference_dir, "SCAN_structure_optimization_LiH/inputs", "POSCAR"))
my_wf = get_wf(structure, "SCAN_optimization.yaml",
vis=MPScanRelaxSet(structure,
user_potcar_functional="PBE_52",
user_incar_settings={"NSW": 10, "SYMPREC": 1e-6, "SIGMA": 0.1}
),
common_params={"vasp_cmd": VASP_CMD})
self._run_scan_relax(my_wf, "SCAN_structure_optimization_LiH")
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.orig.gz"))
self.assertEqual(incar["NSW"], 10)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax1.gz"))
self.assertEqual(incar["NSW"], 10)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax2.gz"))
self.assertEqual(incar["NSW"], 0)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
incar = Incar.from_file(os.path.join(self._get_launch_dir(), "INCAR.relax3.gz"))
self.assertEqual(incar["NSW"], 10)
self.assertEqual(incar["SYMPREC"], 1e-6)
self.assertEqual(incar["SIGMA"], 0.1)
if __name__ == "__main__":
unittest.main()
| true | true |
1c2eebecf3748d6fc1074e03462bcc4d87ea5db7 | 6,599 | py | Python | crawling/crawling_py/jungangilbo.py | ossteam8/oss8_proj | 341ba45ed47d633665f9a8337cd8df7227cb16c2 | [
"MIT"
] | 3 | 2021-06-08T08:38:13.000Z | 2021-06-08T08:38:58.000Z | crawling/crawling_py/jungangilbo.py | ossteam8/K-news_keyword | 341ba45ed47d633665f9a8337cd8df7227cb16c2 | [
"MIT"
] | 15 | 2021-06-04T16:33:34.000Z | 2021-06-06T10:05:17.000Z | crawling/crawling_py/jungangilbo.py | ossteam8/oss8_proj | 341ba45ed47d633665f9a8337cd8df7227cb16c2 | [
"MIT"
] | null | null | null | import re
from goose3 import Goose
from goose3.text import StopWordsKorean
from bs4 import BeautifulSoup
from datetime import datetime
from dateutil.relativedelta import relativedelta
from urllib.request import Request, urlopen
class Jungang_crawling:
def __init__(self):
self.categories = ['정치','경제','사회']
self.article_url = ""
self.urls = []
self.choose_category=0
self.articles = [] # 각 기사들의 정보들을 담을 리스트
self.check_valid = True # 검색했을때 나오는 데이터가 나오는지 안나오는지를 비교
def get_date(self, now):
now = str(now)
year = now[:4]
month = now[5:7]
day = now[8:10]
return year+month+day
def crawling(self):
News_end = False
now = datetime.now()
before_one_week = now-relativedelta(days=1) # 여기서 days값이 몇일전을의미 테스트용으론 1이 적당
before_one_week = self.get_date(before_one_week) # 일주 전을 의미
while(not News_end):
try:
req = Request(self.article_url,headers={'User-Agent': 'Mozilla/5.0'})
with urlopen(req) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
try:
article_list = soup.find("div",{"class":"list_basic"})
article_list = article_list.find("ul")
article_list = article_list.find_all("li")
for article in article_list:
article_time = article.find("span",{"class":"byline"}).string # 날짜를 읽어옴
article_time = self.get_date(article_time)
if(int(article_time)>int(before_one_week)):
continue
if(int(article_time)<int(before_one_week)): # 일주 전까지의 자료만 필요하다
return
link = article.find("a")
self.urls.append(link['href'])
except:
print("error1")
return
next_url = ""
try:
pages = soup.find("div",{"class":"paging_comm"})
current_page = pages.find("em")
current_page = current_page.string # 현재 페이지 번호
next_button = pages.find_all("span",{"class":"icon"})
next_button = next_button[1]
pages = pages.find_all("a",{"class":"link_page"})
for page in pages:
if(int(current_page)<=int(page.string)):
next_url = page['href']
break
if(next_url!=""):
pass
elif next_button.string=="다음페이지":
next_url = next_button.parent['href']
elif next_button.string == "다음페이지 없음":
News_end = True
if(not News_end):
self.article_url = "https://news.joins.com" + next_url
except:
print('페이징 실패')
return
except:
print('사이트 접속 오류')
return
def category_crawling(self, choose_category):
if choose_category==1: #정치
self.article_url = "https://news.joins.com/politics?cloc=joongang-home-gnb2"
self.choose_category = 1
elif choose_category==2: # 경제
self.article_url="https://news.joins.com/money?cloc=joongang-home-gnb3"
self.choose_category = 2
else: #사회
self.article_url = "https://news.joins.com/society?cloc=joongang-home-gnb4"
self.choose_category = 3
self.crawling()
def read_article_contents(self,url):
try:
req = Request(url,headers={'User-Agent': 'Mozilla/5.0'})
with urlopen(req) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
article_contents = soup.find("div",{"id":"article_body"})
text = ""
try:
text = text + ' '+ article_contents.get_text(' ', strip=True)
except:
print("error" , url)
return text
except:
return ""
def get_news(self):# 실제로 url에 들어가 기사들을 읽어온다 , 첫번째 카테고리만으로 검색했을때 데이터를 가져와준다
print('기사 추출 시작')
articles = []
for url in self.urls:
article_info = {"title":"","contents":"","url":"","category":""}
checkc = True
category = self.categories[self.choose_category-1]
try:
g = Goose({'stopwords_class':StopWordsKorean})
article = g.extract(url=url)
title = article.title
except:
continue
if title=="":
continue
contents = self.read_article_contents(url)
if(contents==""):
continue
find_email = re.compile('[a-zA-Z0-9_-]+@[a-z]+.[a-z]+').finditer(contents)
for email in find_email:
contents = contents[:email.start()]
article_info["category"] = category
article_info["contents"] = contents
article_info["title"] = title
article_info["url"] = url
articles.append(article_info)
return articles
if __name__ == "__main__":
# 단순 카테고리만 할시에는 jungang_crawling(1)이것으로 초기화를하고,
# category_crawling( 카테고리 번호 )에서 카테고리 번호를 넣어준다(외부에서 받아올 예정)
# 그리고 그 번호를 get_news에다가도 넣어준다
A = Jungang_crawling()
A.category_crawling(2)
ll = A.get_news()
with open("aaaaaaaaa.txt","w",encoding='utf-8') as f:
for i in ll:
f.write(i['contents'])
f.write('\n\n\n')
# print(ll)
# A = jungang_crawling(2)
# 반대로 단순 검색시에는 2번으로 초기화를 하고
# 안에는 검색어를 넣는다
# 얜 검색결과가 없을떄를 대비해 check_valid를 넣어서 확인을 한다
# A.searching_category("이명박")
# for i in A.urls:
# print(i)
# if(A.check_valid):
# ll = A.get_news()
# else:
# print("검색결과가 없습니다")
# print(len(ll))
# 마지막으로 검색과 카테고리 검색 두개다 할때는,
# 메소드는 검색어랑 같은 메소드를 쓴다
# 다만 jungang_crawling(3)이 번호가 3이고, 입력받은 카테고리 번호를
# get_news에다가 매개변수로 넣어주면된다
| 36.458564 | 99 | 0.499167 | import re
from goose3 import Goose
from goose3.text import StopWordsKorean
from bs4 import BeautifulSoup
from datetime import datetime
from dateutil.relativedelta import relativedelta
from urllib.request import Request, urlopen
class Jungang_crawling:
def __init__(self):
self.categories = ['정치','경제','사회']
self.article_url = ""
self.urls = []
self.choose_category=0
self.articles = []
self.check_valid = True
def get_date(self, now):
now = str(now)
year = now[:4]
month = now[5:7]
day = now[8:10]
return year+month+day
def crawling(self):
News_end = False
now = datetime.now()
before_one_week = now-relativedelta(days=1)
before_one_week = self.get_date(before_one_week)
while(not News_end):
try:
req = Request(self.article_url,headers={'User-Agent': 'Mozilla/5.0'})
with urlopen(req) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
try:
article_list = soup.find("div",{"class":"list_basic"})
article_list = article_list.find("ul")
article_list = article_list.find_all("li")
for article in article_list:
article_time = article.find("span",{"class":"byline"}).string
article_time = self.get_date(article_time)
if(int(article_time)>int(before_one_week)):
continue
if(int(article_time)<int(before_one_week)):
return
link = article.find("a")
self.urls.append(link['href'])
except:
print("error1")
return
next_url = ""
try:
pages = soup.find("div",{"class":"paging_comm"})
current_page = pages.find("em")
current_page = current_page.string
next_button = pages.find_all("span",{"class":"icon"})
next_button = next_button[1]
pages = pages.find_all("a",{"class":"link_page"})
for page in pages:
if(int(current_page)<=int(page.string)):
next_url = page['href']
break
if(next_url!=""):
pass
elif next_button.string=="다음페이지":
next_url = next_button.parent['href']
elif next_button.string == "다음페이지 없음":
News_end = True
if(not News_end):
self.article_url = "https://news.joins.com" + next_url
except:
print('페이징 실패')
return
except:
print('사이트 접속 오류')
return
def category_crawling(self, choose_category):
if choose_category==1:
self.article_url = "https://news.joins.com/politics?cloc=joongang-home-gnb2"
self.choose_category = 1
elif choose_category==2:
self.article_url="https://news.joins.com/money?cloc=joongang-home-gnb3"
self.choose_category = 2
else:
self.article_url = "https://news.joins.com/society?cloc=joongang-home-gnb4"
self.choose_category = 3
self.crawling()
def read_article_contents(self,url):
try:
req = Request(url,headers={'User-Agent': 'Mozilla/5.0'})
with urlopen(req) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
article_contents = soup.find("div",{"id":"article_body"})
text = ""
try:
text = text + ' '+ article_contents.get_text(' ', strip=True)
except:
print("error" , url)
return text
except:
return ""
def get_news(self):
print('기사 추출 시작')
articles = []
for url in self.urls:
article_info = {"title":"","contents":"","url":"","category":""}
checkc = True
category = self.categories[self.choose_category-1]
try:
g = Goose({'stopwords_class':StopWordsKorean})
article = g.extract(url=url)
title = article.title
except:
continue
if title=="":
continue
contents = self.read_article_contents(url)
if(contents==""):
continue
find_email = re.compile('[a-zA-Z0-9_-]+@[a-z]+.[a-z]+').finditer(contents)
for email in find_email:
contents = contents[:email.start()]
article_info["category"] = category
article_info["contents"] = contents
article_info["title"] = title
article_info["url"] = url
articles.append(article_info)
return articles
if __name__ == "__main__":
A = Jungang_crawling()
A.category_crawling(2)
ll = A.get_news()
with open("aaaaaaaaa.txt","w",encoding='utf-8') as f:
for i in ll:
f.write(i['contents'])
f.write('\n\n\n')
| true | true |
1c2eec932eef4602e4643852719061196d05fb90 | 3,993 | py | Python | meshgraphnets/cloth_model.py | juliandwain/deepmind-research | eca5fe66ad770027f4dd758d3a659cd8261bace5 | [
"Apache-2.0"
] | null | null | null | meshgraphnets/cloth_model.py | juliandwain/deepmind-research | eca5fe66ad770027f4dd758d3a659cd8261bace5 | [
"Apache-2.0"
] | null | null | null | meshgraphnets/cloth_model.py | juliandwain/deepmind-research | eca5fe66ad770027f4dd758d3a659cd8261bace5 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model for FlagSimple."""
import sonnet as snt
import tensorflow.compat.v1 as tf
import common
import core_model
import normalization
class Model(snt.AbstractModule):
"""Model for static cloth simulation."""
def __init__(self, learned_model, name='Model'):
super(Model, self).__init__(name=name)
with self._enter_variable_scope():
self._learned_model = learned_model
self._output_normalizer = normalization.Normalizer(
size=3, name='output_normalizer')
self._node_normalizer = normalization.Normalizer(
size=3+common.NodeType.SIZE, name='node_normalizer')
self._edge_normalizer = normalization.Normalizer(
size=7, name='edge_normalizer') # 2D coord + 3D coord + 2*length = 7
def _build_graph(self, inputs, is_training):
"""Builds input graph."""
# construct graph nodes
velocity = inputs['world_pos'] - inputs['prev|world_pos']
node_type = tf.one_hot(inputs['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([velocity, node_type], axis=-1)
# construct graph edges
senders, receivers = common.triangles_to_edges(inputs['cells'])
relative_world_pos = (tf.gather(inputs['world_pos'], senders) -
tf.gather(inputs['world_pos'], receivers))
relative_mesh_pos = (tf.gather(inputs['mesh_pos'], senders) -
tf.gather(inputs['mesh_pos'], receivers))
edge_features = tf.concat([
relative_world_pos,
tf.norm(relative_world_pos, axis=-1, keepdims=True),
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
mesh_edges = core_model.EdgeSet(
name='mesh_edges',
features=self._edge_normalizer(edge_features, is_training),
receivers=receivers,
senders=senders)
return core_model.MultiGraph(
node_features=self._node_normalizer(node_features, is_training),
edge_sets=[mesh_edges])
def _build(self, inputs):
graph = self._build_graph(inputs, is_training=False)
per_node_network_output = self._learned_model(graph)
return self._update(inputs, per_node_network_output)
@snt.reuse_variables
def loss(self, inputs):
"""L2 loss on position."""
graph = self._build_graph(inputs, is_training=True)
network_output = self._learned_model(graph)
# build target acceleration
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
target_position = inputs['target|world_pos']
target_acceleration = target_position - 2*cur_position + prev_position
target_normalized = self._output_normalizer(target_acceleration)
# build loss
loss_mask = tf.equal(inputs['node_type'][:, 0], common.NodeType.NORMAL)
error = tf.reduce_sum((target_normalized - network_output)**2, axis=1)
loss = tf.reduce_mean(error[loss_mask])
return loss
def _update(self, inputs, per_node_network_output):
"""Integrate model outputs."""
acceleration = self._output_normalizer.inverse(per_node_network_output)
# integrate forward
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
position = 2*cur_position + acceleration - prev_position
return position
| 39.93 | 79 | 0.699725 |
import sonnet as snt
import tensorflow.compat.v1 as tf
import common
import core_model
import normalization
class Model(snt.AbstractModule):
def __init__(self, learned_model, name='Model'):
super(Model, self).__init__(name=name)
with self._enter_variable_scope():
self._learned_model = learned_model
self._output_normalizer = normalization.Normalizer(
size=3, name='output_normalizer')
self._node_normalizer = normalization.Normalizer(
size=3+common.NodeType.SIZE, name='node_normalizer')
self._edge_normalizer = normalization.Normalizer(
size=7, name='edge_normalizer')
def _build_graph(self, inputs, is_training):
velocity = inputs['world_pos'] - inputs['prev|world_pos']
node_type = tf.one_hot(inputs['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([velocity, node_type], axis=-1)
senders, receivers = common.triangles_to_edges(inputs['cells'])
relative_world_pos = (tf.gather(inputs['world_pos'], senders) -
tf.gather(inputs['world_pos'], receivers))
relative_mesh_pos = (tf.gather(inputs['mesh_pos'], senders) -
tf.gather(inputs['mesh_pos'], receivers))
edge_features = tf.concat([
relative_world_pos,
tf.norm(relative_world_pos, axis=-1, keepdims=True),
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
mesh_edges = core_model.EdgeSet(
name='mesh_edges',
features=self._edge_normalizer(edge_features, is_training),
receivers=receivers,
senders=senders)
return core_model.MultiGraph(
node_features=self._node_normalizer(node_features, is_training),
edge_sets=[mesh_edges])
def _build(self, inputs):
graph = self._build_graph(inputs, is_training=False)
per_node_network_output = self._learned_model(graph)
return self._update(inputs, per_node_network_output)
@snt.reuse_variables
def loss(self, inputs):
graph = self._build_graph(inputs, is_training=True)
network_output = self._learned_model(graph)
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
target_position = inputs['target|world_pos']
target_acceleration = target_position - 2*cur_position + prev_position
target_normalized = self._output_normalizer(target_acceleration)
loss_mask = tf.equal(inputs['node_type'][:, 0], common.NodeType.NORMAL)
error = tf.reduce_sum((target_normalized - network_output)**2, axis=1)
loss = tf.reduce_mean(error[loss_mask])
return loss
def _update(self, inputs, per_node_network_output):
acceleration = self._output_normalizer.inverse(per_node_network_output)
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
position = 2*cur_position + acceleration - prev_position
return position
| true | true |
1c2eecb12f118abb6490f29e8567d6fc8180197e | 1,622 | py | Python | 36. tkinter basics 1 - Intro.py | JatinR05/Python-3-basics-series | e4b3d8056e2074602c9ed0cd201676484dd0d179 | [
"MIT"
] | 41 | 2015-05-12T12:49:35.000Z | 2021-07-13T11:07:09.000Z | 36. tkinter basics 1 - Intro.py | JatinR05/Python-3-basics-series | e4b3d8056e2074602c9ed0cd201676484dd0d179 | [
"MIT"
] | null | null | null | 36. tkinter basics 1 - Intro.py | JatinR05/Python-3-basics-series | e4b3d8056e2074602c9ed0cd201676484dd0d179 | [
"MIT"
] | 37 | 2016-10-13T04:02:09.000Z | 2021-12-16T18:28:27.000Z | '''
Hello and welcome to a basic intro to TKinter, which is the Python
binding to TK, which is a toolkit that works around the Tcl language.
The tkinter module purpose to to generate GUIs, like windows. Python is not very
popularly used for this purpose, but it is more than capable of being used
'''
# Simple enough, just import everything from tkinter.
from tkinter import *
# Here, we are creating our class, Window, and inheriting from the Frame
# class. Frame is a class from the tkinter module. (see Lib/tkinter/__init__)
class Window(Frame):
# Define settings upon initialization. Here you can specify
def __init__(self, master=None):
# parameters that you want to send through the Frame class.
# self, and this is the parent widget
# if you are wondering what self is... it is the object
# created from the class. You can actually call it anything
# you want... people just use "self"
Frame.__init__(self, master)
#reference to the master widget, which is the tk window
self.master = master
#with that, we want to then run init_window, which doesn't yet exist
#self.init_window()
#Creation of init_window
#def init_window(self):
# changing the title of our master widget
# self.master.title("GUI")
# root window created. Here, that would be the only window, but
# you can later have windows within windows.
root = Tk()
#///root.geometry("250x150+300+300")
#creation of an instance
app = Window(root)
#mainloop
root.mainloop()
| 27.033333 | 80 | 0.673243 |
from tkinter import *
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
#self.init_window()
#Creation of init_window
#def init_window(self):
# changing the title of our master widget
# self.master.title("GUI")
# root window created. Here, that would be the only window, but
# you can later have windows within windows.
root = Tk()
#///root.geometry("250x150+300+300")
#creation of an instance
app = Window(root)
#mainloop
root.mainloop()
| true | true |
1c2eecf1ee9a695c0785654a2597cefd3865d1b2 | 36,206 | py | Python | maraboupy/MarabouNetworkONNX.py | noyahoch/Marabou | 03eb551498287e5372d462e3c2ad4fcc3210a5fa | [
"BSD-3-Clause"
] | null | null | null | maraboupy/MarabouNetworkONNX.py | noyahoch/Marabou | 03eb551498287e5372d462e3c2ad4fcc3210a5fa | [
"BSD-3-Clause"
] | null | null | null | maraboupy/MarabouNetworkONNX.py | noyahoch/Marabou | 03eb551498287e5372d462e3c2ad4fcc3210a5fa | [
"BSD-3-Clause"
] | null | null | null | '''
/* ******************* */
/*! \file MarabouNetworkONNX.py
** \verbatim
** Top contributors (to current version):
** Kyle Julian
** This file is part of the Marabou project.
** Copyright (c) 2017-2019 by the authors listed in the file AUTHORS
** in the top-level source directory) and their institutional affiliations.
** All rights reserved. See the file COPYING in the top-level source
** directory for licensing information.\endverbatim
**
** \brief [[ Add one-line brief description here ]]
**
** [[ Add lengthier description here ]]
**/
'''
import numpy as np
import onnx
import onnxruntime
from onnx import numpy_helper
from onnx.helper import get_attribute_value
from maraboupy import MarabouUtils
from maraboupy import MarabouNetwork
from onnx import TensorProto
class MarabouNetworkONNX(MarabouNetwork.MarabouNetwork):
def __init__(self, filename, inputNames=None, outputName=None):
"""
Constructs a MarabouNetworkONNX object from an ONNX file
Args:
filename: (string) Path to the ONNX file
inputNames: (list of strings) optional, list of node names corresponding to inputs.
outputName: (string) optional, name of node corresponding to output.
Returns:
marabouNetworkONNX: (MarabouNetworkONNX) representing network
"""
super().__init__()
self.readONNX(filename, inputNames, outputName)
def clear(self):
"""
Reset values to represent empty network
"""
super().clear()
self.madeGraphEquations = []
self.varMap = dict()
self.constantMap = dict()
self.shapeMap = dict()
self.inputNames = None
self.outputName = None
self.graph = None
def readONNX(self, filename, inputNames, outputName):
"""
Constructs a MarabouNetworkONNX object from an ONNX file
Args:
filename: (string) Path to the ONNX file
inputNames: (list of strings) optional, list of names corresponding to inputs.
outputName: (string) optional, name of node corresponding to output.
Returns:
marabouNetworkONNX: (MarabouNetworkONNX) representing network
"""
self.filename = filename
self.graph = onnx.load(filename).graph
# Get default inputs/output if no names are provided
if not inputNames:
assert len(self.graph.input) >= 1
initNames = [node.name for node in self.graph.initializer]
inputNames = [inp.name for inp in self.graph.input if inp.name not in initNames]
if not outputName:
assert len(self.graph.output) == 1
outputName = self.graph.output[0].name
# Check that input/outputs are in the graph
for name in inputNames:
if not len([nde for nde in self.graph.node if name in nde.input]):
print("Input %s not found in graph!" % name)
raise RuntimeError
if not len([nde for nde in self.graph.node if outputName in nde.output]):
print("Output %s not found in graph!" % outputName)
raise RuntimeError
self.inputNames = inputNames
self.outputName = outputName
# Process the shapes and values of the graph while making Marabou equations and constraints
self.foundnInputFlags = 0
self.processGraph()
assert self.foundnInputFlags == len(self.inputNames)
# If the given inputNames/outputName specify only a portion of the network, then we will have
# shape information saved not relevant to the portion of the network. Remove extra shapes.
self.cleanShapes()
# Other Marabou input parsers assign output variables immediately after input variables and before any
# intermediate variables. This function reassigns variable numbering to match other parsers.
# If this is skipped, the output variables will be the last variables defined.
self.reassignOutputVariables()
def processGraph(self):
"""
Processes the ONNX graph to produce Marabou equations
"""
# Add shapes for the graph's inputs
for node in self.graph.input:
self.shapeMap[node.name] = list([dim.dim_value if dim.dim_value > 0 else 1 for dim in node.type.tensor_type.shape.dim])
self.madeGraphEquations += [node.name]
# If we find one of the specified inputs, create new variables
if node.name in self.inputNames:
self.foundnInputFlags += 1
self.makeNewVariables(node.name)
self.inputVars += [np.array(self.varMap[node.name])]
# Add shapes for constants
for node in self.graph.initializer:
self.shapeMap[node.name] = list(node.dims)
self.madeGraphEquations += [node.name]
# Recursively create remaining shapes and equations as needed
self.makeGraphEquations(self.outputName, True)
def makeGraphEquations(self, nodeName, makeEquations):
"""
Recursively populates self.shapeMap, self.varMap, and self.constantMap while creating Marabou equations
and constraints as needed
Arguments:
nodeName: (str) name of node for making the shape
makeEquations: (bool) create Marabou equations for this node
"""
if nodeName in self.madeGraphEquations:
return
if nodeName in self.inputNames:
self.foundnInputFlags += 1
# If an inputName is an intermediate layer of the network, we don't need to create Marabou
# equations for its inputs. However, we still need to call makeMarabouEquations in order to
# compute shapes. We just need to set the makeEquations flag to false
makeEquations = False
self.madeGraphEquations += [nodeName]
# Recursively call makeGraphEquations, then call makeMarabouEquations
# This ensures that shapes and values of a node's inputs have been computed first
for inNodeName in self.getInputNodes(nodeName):
self.makeGraphEquations(inNodeName, makeEquations)
# Compute node's shape and create Marabou equations as needed
self.makeMarabouEquations(nodeName, makeEquations)
# Create new variables when we find one of the inputs
if nodeName in self.inputNames:
self.makeNewVariables(nodeName)
self.inputVars += [np.array(self.varMap[nodeName])]
def makeMarabouEquations(self, nodeName, makeEquations):
"""
Compute the shape and values of a node assuming the input shapes and values have been computed already.
Arguments:
nodeName: (str) name of node for which we want to compute the output shape
makeEquations: (bool) create Marabou equations for this node
"""
node = self.getNode(nodeName)
if node.op_type == 'Identity':
self.identity(node)
elif node.op_type == 'Cast':
self.cast(node)
elif node.op_type == 'Reshape':
self.reshape(node)
elif node.op_type == "Transpose":
self.transpose(node)
elif node.op_type == "MaxPool":
self.maxpoolEquations(node, makeEquations)
elif node.op_type == "Conv":
self.convEquations(node, makeEquations)
elif node.op_type == 'Gemm':
self.gemmEquations(node, makeEquations)
elif node.op_type == 'MatMul':
self.matMulEquations(node, makeEquations)
elif node.op_type == 'Add':
self.addEquations(node, makeEquations)
elif node.op_type == 'Relu':
self.reluEquations(node, makeEquations)
else:
print("Operation %s not implemented" % (node.op_type))
raise NotImplementedError
def getNode(self, nodeName):
"""
Find the node in the graph corresponding to the given name
Arguments:
nodeName: (str) name of node to find in graph
Returns:
ONNX node named nodeName
"""
node = [node for node in self.graph.node if nodeName in node.output]
if len(node) > 0:
return node[0]
return None
def makeNewVariables(self, nodeName):
"""
Assuming the node's shape is known, return a set of new variables in the same shape
Arguments:
nodeName: (str) name of node
Returns:
v: (np.array) array of variable numbers
"""
assert nodeName not in self.varMap
shape = self.shapeMap[nodeName]
size = np.prod(shape)
v = np.array([self.getNewVariable() for _ in range(size)]).reshape(shape)
self.varMap[nodeName] = v
assert all([np.equal(np.mod(i, 1), 0) for i in v.reshape(-1)]) # check if integers
return v
def getInputNodes(self, nodeName):
"""
Get names of nodes that are inputs to the given node
Arguments:
nodeName: (str) name of node
saveConstant: (bool) if true, save constant variables to self.constantMap
Returns:
inNodes: (list of str) names of nodes that are inputs to the given node
"""
node = self.getNode(nodeName)
inNodes = []
for inp in node.input:
if len([nde for nde in self.graph.node if inp in nde.output]):
inNodes += [inp]
elif len([nde for nde in self.graph.initializer if nde.name == inp]):
self.constantMap[inp] = [numpy_helper.to_array(init) for init in self.graph.initializer if init.name == inp][0]
return inNodes
def identity(self, node):
"""
Function representing identity
Arguments:
node: (node) representing identity operation
"""
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
if inputName in self.varMap:
self.varMap[nodeName] = self.varMap[inputName]
elif inputName in self.constantMap:
self.constantMap[nodeName] = self.constantMap[inputName]
def cast(self, node):
"""
Function representing cast
Arguments:
node: (node) representing cast operation
"""
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
# Try to find type to cast to. If not found, raise error
to = None
for attr in node.attribute:
if attr.name == "to":
to = get_attribute_value(attr)
if to is None:
print("Casting type not specified with attribute 'to'")
raise RuntimeError
# Cast input array to correct type, and throw error if type is unknown
if inputName in self.constantMap:
if to == TensorProto.FLOAT16:
self.constantMap[nodeName] = self.constantMap[inputName].astype('float16')
elif to == TensorProto.FLOAT:
self.constantMap[nodeName] = self.constantMap[inputName].astype('float32')
elif to == TensorProto.DOUBLE:
self.constantMap[nodeName] = self.constantMap[inputName].astype('double')
elif to == TensorProto.UINT8:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint8')
elif to == TensorProto.UINT16:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint16')
elif to == TensorProto.UINT32:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint32')
elif to == TensorProto.UINT64:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint64')
elif to == TensorProto.INT8:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int8')
elif to == TensorProto.INT16:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int16')
elif to == TensorProto.INT32:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int32')
elif to == TensorProto.INT64:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int64')
else:
print("Unknown type for casting: %d" % to)
print("Check here for ONNX TensorProto: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto")
raise NotImplementedError
# We shouldn't be casting variables to different types, since Marabou assumes variables have double precision
elif inputName in self.varMap:
print("Casting variables not allowed with Marabou")
raise NotImplementedError
def reshape(self, node):
"""
Function representing reshape
Arguments:
node: (node) representing reshape operation
"""
nodeName = node.output[0]
inputName1, inputName2 = node.input
# Assume first input is array to be reshaped, second input is the new shape array
reshapeVals = self.constantMap[inputName2]
self.shapeMap[nodeName] = list(np.zeros(self.shapeMap[inputName1]).reshape(reshapeVals).shape)
if inputName1 in self.varMap:
self.varMap[nodeName] = self.varMap[inputName1].reshape(reshapeVals)
elif inputName1 in self.constantMap:
self.constantMap[nodeName] = self.constantMap[inputName1].reshape(reshapeVals)
def transpose(self, node):
"""
Function representing transpose
Arguments:
node: (node) representing transpose operation
"""
nodeName = node.output[0]
inputName = node.input[0]
# Get attributes
perm = None
for attr in node.attribute:
if attr.name == "perm":
perm = get_attribute_value(attr)
if perm is None:
print("Permutation indices not specified by attibute 'perm'")
raise RuntimeError
self.shapeMap[nodeName] = [self.shapeMap[inputName][p] for p in perm]
if inputName in self.varMap:
self.varMap[nodeName] = np.transpose(self.varMap[node.input[0]], perm)
elif inputName in self.constantMap:
self.constantMap[nodeName] = np.transpose(self.constant[inputName], perm)
def maxpoolEquations(self, node, makeEquations):
"""
Function to generate maxpooling equations
Arguments:
node: (node) representing maxpool operation
makeEquations: (bool) True if we need to create new variables and maxpool constraints
"""
nodeName = node.output[0]
### Get variables and constants of inputs ###
inVars = self.varMap[node.input[0]]
inputShape = self.shapeMap[node.input[0]]
kernel_shape = [1, 1]
strides = [1, 1]
for attr in node.attribute:
if attr.name == 'kernel_shape':
kernel_shape = get_attribute_value(attr)
elif attr.name == 'strides':
strides = get_attribute_value(attr)
outputShape = [dim for dim in inputShape]
outputShape[2] = int(np.ceil((inputShape[2] - ((kernel_shape[0] - 1) + 1) + 1) / strides[0]))
outputShape[3] = int(np.ceil((inputShape[3] - ((kernel_shape[1] - 1) + 1) + 1) / strides[1]))
self.shapeMap[nodeName] = outputShape
if makeEquations:
outVars = self.makeNewVariables(nodeName)
for i in range(outputShape[2]):
for j in range(outputShape[3]):
for k in range(outputShape[1]):
maxVars = set()
for di in range(strides[0]*i, strides[0]*i + kernel_shape[0]):
for dj in range(strides[1]*j, strides[1]*j + kernel_shape[1]):
if di < inputShape[2] and dj < inputShape[3]:
maxVars.add(inVars[0][k][di][dj])
self.addMaxConstraint(maxVars, outVars[0][k][i][j])
def convEquations(self, node, makeEquations):
"""
Function to generate maxpooling equations
Arguments:
node: (node) representing the 2D Convolution operation
makeEquations: (bool) True if we need to create new variables and write Marabou equations
"""
nodeName = node.output[0]
# Extract information about convolution
strides = [1, 1]
pads = [0, 0, 0, 0]
for attr in node.attribute:
if attr.name == 'strides':
strides = get_attribute_value(attr)
elif attr.name == 'pads':
pads = get_attribute_value(attr)
pad_left, pad_bottom, pad_right, pad_top = pads
# Get input shape information
# First input should be variable tensor, the second a weight matrix defining filters
shape0 = self.shapeMap[node.input[0]]
shape1 = self.shapeMap[node.input[1]]
input_channels = shape0[1]
input_width = shape0[2]
input_height = shape0[3]
num_filters = shape1[0]
filter_channels = shape1[1]
filter_width = shape1[2]
filter_height = shape1[3]
# The number of channels should match between input variable and filters
assert input_channels == filter_channels
# Compute output shape
out_width = (input_width - filter_width + pad_left + pad_right) // strides[0] + 1
out_height = (input_height - filter_height + pad_bottom + pad_top) // strides[1] + 1
out_channels = num_filters
self.shapeMap[nodeName] = [shape0[0], out_channels, out_width, out_height]
if makeEquations:
inVars = self.varMap[node.input[0]]
weights = self.constantMap[node.input[1]]
outVars = self.makeNewVariables(nodeName)
### Generate actual equations ###
# There is one equation for every output variable
for i in range(out_width):
for j in range(out_height):
for k in range(out_channels): # Out_channel corresponds to filter number
e = MarabouUtils.Equation()
# The equation convolves the filter with the specified input region
# Iterate over the filter
for di in range(filter_width):
for dj in range(filter_height):
for dk in range(filter_channels):
w_ind = int(strides[0]*i+di - pad_left)
h_ind = int(strides[1]*j+dj - pad_top)
if h_ind < input_height and h_ind >= 0 and w_ind < input_width and w_ind >= 0:
var = inVars[0][dk][w_ind][h_ind]
c = weights[k][dk][di][dj]
e.addAddend(c, var)
# Add output variable
e.addAddend(-1, outVars[0][k][i][j])
e.setScalar(0.0)
self.addEquation(e)
def gemmEquations(self, node, makeEquations):
"""
Function to generate equations corresponding to Gemm (general matrix multiplication)
Arguments:
node: (node) representing the Gemm operation
makeEquations: (bool) True if we need to create new variables and write Marabou equations
"""
nodeName = node.output[0]
# Get inputs
inputName1, inputName2, inputName3 = node.input
shape1 = self.shapeMap[inputName1]
shape2 = self.shapeMap[inputName2]
shape3 = self.shapeMap[inputName3]
input1 = self.varMap[inputName1]
input2 = self.constantMap[inputName2]
input3 = self.constantMap[inputName3]
self.shapeMap[nodeName] = self.shapeMap[inputName3]
if makeEquations:
# Pad shape if needed
if len(shape1) == 1:
shape1 = [1] + shape1
input1 = input1.reshape(shape1)
elif shape1[1] == 1:
shape1 = shape1[::-1]
input1 = input1.reshape(shape1)
if len(shape3) == 1:
shape3 = [1] + shape3
input3 = input3.reshape(shape3)
if shape1[0] != shape3[0]:
shape3 = shape3[::-1]
input3 = input3.reshape(shape3)
# Assume that first input is variables, second is Matrix for MatMul, and third is bias addition
assert shape1[-1] == shape2[0]
assert shape1[0] == shape3[0]
assert shape2[1] == shape3[1]
# Create new variables
self.shapeMap[nodeName] = self.shapeMap[node.input[2]]
outputVariables = self.makeNewVariables(nodeName)
outputVariables = outputVariables.reshape(shape3)
# Generate equations
for i in range(shape1[0]):
for j in range(shape2[1]):
e = MarabouUtils.Equation()
for k in range(shape1[1]):
e.addAddend(input2[k][j], input1[i][k])
# Put output variable as the last addend last
e.addAddend(-1, outputVariables[i][j])
e.setScalar(-input3[i][j])
self.addEquation(e)
def matMulEquations(self, node, makeEquations):
"""
Function to generate equations corresponding to matrix multiplication
Arguments:
node: (node) representing the MatMul operation
makeEquations: (bool) True if we need to create new variables and write Marabou equations
"""
nodeName = node.output[0]
# Get inputs and determine which inputs are constants and which are variables
inputName1, inputName2 = node.input
shape1 = self.shapeMap[inputName1]
shape2 = self.shapeMap[inputName2]
assert shape1[-1] == shape2[0]
self.shapeMap[nodeName] = shape1[:-1] + shape2[1:]
firstInputConstant = False; secondInputConstant = False
if inputName1 in self.constantMap:
input1 = self.constantMap[inputName1]
firstInputConstant = True
else:
input1 = self.varMap[inputName1]
if inputName2 in self.constantMap:
input2 = self.constantMap[inputName2]
secondInputConstant = True
else:
input2 = self.varMap[inputName2]
# Assume that at least one input is a constant (We cannot represent variable products with linear equations)
assert firstInputConstant or secondInputConstant
# If both inputs are constant, than the output is constant as well, and we don't need new variables or equations
if firstInputConstant and secondInputConstant:
self.constantMap[nodeName] = np.matmul(input1,input2)
return
if makeEquations:
# Create new variables
outputVariables = self.makeNewVariables(nodeName)
# Generate equations
for i in range(shape1[0]):
# Differntiate between matrix-vector multiplication and matrix-matrix multiplication
if len(shape2)>1:
for j in range(shape2[1]):
e = MarabouUtils.Equation()
for k in range(shape1[1]):
if firstInputConstant:
e.addAddend(input1[i][k], input2[k][j])
else:
e.addAddend(input2[k][j], input1[i][k])
# Put output variable as the last addend last
e.addAddend(-1, outputVariables[i][j])
e.setScalar(0.0)
self.addEquation(e)
else:
e = MarabouUtils.Equation()
for k in range(shape1[1]):
if firstInputConstant:
e.addAddend(input1[i][k], input2[k])
else:
e.addAddend(input2[k], input1[i][k])
# Put output variable as the last addend last
e.addAddend(-1, outputVariables[i])
e.setScalar(0.0)
self.addEquation(e)
def addEquations(self, node, makeEquations):
"""
Function to generate equations corresponding to addition
Arguments:
node: (node) representing the Add operation
makeEquations: (bool) True if we need to create new variables and write Marabou equations
"""
nodeName = node.output[0]
# Get the inputs
inputName1, inputName2 = node.input
shape1 = self.shapeMap[inputName1]
shape2 = self.shapeMap[inputName2]
self.shapeMap[nodeName] = shape1
# Decide which inputs are variables and which are constants
firstInputConstant = False; secondInputConstant = False
if inputName1 in self.constantMap:
# Broadcast the constant input1 to the same shape as input2
input1 = np.copy(self.constantMap[inputName1]) + np.zeros(shape2)
firstInputConstant = True
else:
input1 = self.varMap[inputName1]
if inputName2 in self.constantMap:
# Broadcast the constant input2 to the same shape as input1
input2 = np.copy(self.constantMap[inputName2]) + np.zeros(shape1)
secondInputConstant = True
else:
input2 = self.varMap[inputName2]
# The shape after broadcasting must match
assert input1.shape == input2.shape
self.shapeMap[nodeName] = shape1
# If both inputs to add are constant, then the output is constant too
# No new variables are needed, we just need to store the output in constantMap
if firstInputConstant and secondInputConstant:
self.constantMap[nodeName] = input1 + input2
# If both inputs are variables, then we need a new variable to represent
# the sum of the two variables
elif makeEquations and not firstInputConstant and not secondInputConstant:
outputVariables = self.makeNewVariables(nodeName)
input1 = input1.reshape(-1)
input2 = input2.reshape(-1)
outputVariables = outputVariables.reshape(-1)
for i in range(len(input1)):
e = MarabouUtils.Equation()
e.addAddend(1, input1[i])
e.addAddend(1, input2[i])
e.addAddend(-1, outputVariables[i])
e.setScalar(0.0)
self.addEquation(e)
# Otherwise, we are adding constants to variables.
# We don't need new equations or new variables if the input variable is the output of a linear equation.
# Instead, we can just edit the scalar term of the existing linear equation.
# However, if the input variables are not outputs of linear equations (input variables or outputs of
# activation functions) then we will need new equations.
elif makeEquations:
if firstInputConstant:
constInput = input1
varInput = input2
else:
constInput = input2
varInput = input1
constInput = constInput.reshape(-1)
varInput = varInput.reshape(-1)
# Adjust equations to incorporate the constant addition
numEquationsChanged = 0
for equ in self.equList:
(c,var) = equ.addendList[-1]
assert c == -1
if var in varInput:
ind = np.where(var == varInput)[0][0]
# Adjust the equation
equ.setScalar(equ.scalar-constInput[ind])
numEquationsChanged += 1
# If we changed one equation for every input variable, then
# we don't need any new equations
if numEquationsChanged == len(varInput):
self.varMap[nodeName] = varInput
else:
# Otherwise, assert no equations were changed, and we need to create new equations
assert numEquationsChanged == 0
outputVariables = self.makeNewVariables(nodeName).reshape(-1)
for i in range(len(outputVariables)):
e = MarabouUtils.Equation()
e.addAddend(1, varInput[i])
e.addAddend(-1, outputVariables[i])
e.setScalar(-constInput[i])
self.addEquation(e)
def reluEquations(self, node, makeEquations):
"""
Function to generate equations corresponding to pointwise Relu
Arguments:
node: (node) representing the Relu operation
makeEquations: (bool) True if we need to create new variables and add new Relus
"""
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
if makeEquations:
# Get variables
inputVars = self.varMap[inputName].reshape(-1)
outputVars = self.makeNewVariables(nodeName).reshape(-1)
assert len(inputVars) == len(outputVars)
# Generate equations
for i in range(len(inputVars)):
self.addRelu(inputVars[i], outputVars[i])
for f in outputVars:
self.setLowerBound(f, 0.0)
def cleanShapes(self):
"""
After constructing equations, remove shapes from self.shapeMap that are part of the graph but not
relevant for this input query. This is only cosmetic and does not impact Marabou
"""
for nodeName in [name for name in self.shapeMap]:
if nodeName not in self.varMap and nodeName not in self.constantMap:
self.shapeMap.pop(nodeName)
def reassignVariable(self, var, numInVars, outVars, newOutVars):
"""
This function computes what the given variable should be when the output variables are
moved to come after the input variables
Arguments:
var: (int) Original variable number
numInVars: (int) Number of input variables
outVars: (array of int) Original output variables
newOutVars: (array of int) New output variables
Returns:
(int) New variable assignment
"""
if var < numInVars:
return var
if var in outVars:
ind = np.where(var == outVars)[0][0]
return newOutVars[ind]
return var + len(outVars)
def reassignOutputVariables(self):
"""
Other input parsers assign output variables after input variables and before any intermediate variables.
This function reassigns the numbers for the output variables and shifts all other variables up to make space.
"""
outVars = self.varMap[self.outputName].reshape(-1)
numInVars = np.sum([np.prod(self.shapeMap[inputName]) for inputName in self.inputNames])
numOutVars = len(outVars)
newOutVars = np.array(range(numInVars,numInVars+numOutVars))
# Adjust equation variables
for eq in self.equList:
for i, (c,var) in enumerate(eq.addendList):
eq.addendList[i] = (c, self.reassignVariable(var, numInVars, outVars, newOutVars))
# Adjust relu list
for i, variables in enumerate(self.reluList):
self.reluList[i] = tuple([self.reassignVariable(var, numInVars, outVars, newOutVars) for var in variables])
# Adjust max pool list
for i, (elements, outVar) in enumerate(self.maxList):
newOutVar = self.reassignVariable(outVar, numInVars, outVars, newOutVars)
newElements = set()
for var in elements:
newElements.add(self.reassignVariable(var, numInVars, outVars, newOutVars))
self.maxList[i] = (newElements, newOutVar)
# Adjust upper/lower bounds
newLowerBounds = dict()
newUpperBounds = dict()
for var in self.lowerBounds:
newLowerBounds[self.reassignVariable(var, numInVars, outVars, newOutVars)] = self.lowerBounds[var]
for var in self.upperBounds:
newUpperBounds[self.reassignVariable(var, numInVars, outVars, newOutVars)] = self.upperBounds[var]
self.lowerBounds = newLowerBounds
self.upperBounds = newUpperBounds
# Adjust constraint variables list
newVarsParticipatingInConstraints = set()
for var in self.varsParticipatingInConstraints:
newVarsParticipatingInConstraints.add(self.reassignVariable(var, numInVars, outVars, newOutVars))
self.varsParticipatingInConstraints = newVarsParticipatingInConstraints
# Assign output variables to the new array
self.varMap[self.outputName] = newOutVars.reshape(self.shapeMap[self.outputName])
self.outputVars = self.varMap[self.outputName]
def evaluateWithoutMarabou(self, inputValues):
"""
Try to evaluate the network with the given inputs
Arguments:
inputValues: (list of np.arrays) input values representing input to network
Returns:
Output values of neural network
"""
# Check that all input variables are designated as inputs in the graph
# Unlike Tensorflow, ONNX only allows assignment of values to input/output nodes
onnxInputNames = [node.name for node in self.graph.input]
for inName in self.inputNames:
if inName not in onnxInputNames:
print("ONNX does not allow intermediate layers to be set as inputs!")
raise NotImplementedError
# Check that the output variable is designated as an output in the graph
# Unlike Tensorflow, ONNX only allows assignment of values to input/output nodes
onnxOutputNames = [node.name for node in self.graph.output]
if self.outputName not in onnxOutputNames:
print("ONNX does not allow intermediate layers to be set as the output!")
raise NotImplementedError
# Use onnxruntime session to evaluate the point
sess = onnxruntime.InferenceSession(self.filename)
input_dict = dict()
for i, inputName in enumerate(self.inputNames):
# Try to cast input to correct type
onnxType = sess.get_inputs()[i].type
if 'float' in onnxType:
inputType = 'float32'
elif 'int' in onnxType:
inputType = 'int64'
else:
printf("Not sure how to cast input to graph input of type %s" % onnxType)
raise NotImplementedError
input_dict[inputName] = inputValues[i].reshape(self.inputVars[i].shape).astype(inputType)
return sess.run([self.outputName],input_dict)[0] | 44.42454 | 131 | 0.587996 |
import numpy as np
import onnx
import onnxruntime
from onnx import numpy_helper
from onnx.helper import get_attribute_value
from maraboupy import MarabouUtils
from maraboupy import MarabouNetwork
from onnx import TensorProto
class MarabouNetworkONNX(MarabouNetwork.MarabouNetwork):
def __init__(self, filename, inputNames=None, outputName=None):
super().__init__()
self.readONNX(filename, inputNames, outputName)
def clear(self):
super().clear()
self.madeGraphEquations = []
self.varMap = dict()
self.constantMap = dict()
self.shapeMap = dict()
self.inputNames = None
self.outputName = None
self.graph = None
def readONNX(self, filename, inputNames, outputName):
self.filename = filename
self.graph = onnx.load(filename).graph
if not inputNames:
assert len(self.graph.input) >= 1
initNames = [node.name for node in self.graph.initializer]
inputNames = [inp.name for inp in self.graph.input if inp.name not in initNames]
if not outputName:
assert len(self.graph.output) == 1
outputName = self.graph.output[0].name
for name in inputNames:
if not len([nde for nde in self.graph.node if name in nde.input]):
print("Input %s not found in graph!" % name)
raise RuntimeError
if not len([nde for nde in self.graph.node if outputName in nde.output]):
print("Output %s not found in graph!" % outputName)
raise RuntimeError
self.inputNames = inputNames
self.outputName = outputName
self.foundnInputFlags = 0
self.processGraph()
assert self.foundnInputFlags == len(self.inputNames)
self.cleanShapes()
self.reassignOutputVariables()
def processGraph(self):
for node in self.graph.input:
self.shapeMap[node.name] = list([dim.dim_value if dim.dim_value > 0 else 1 for dim in node.type.tensor_type.shape.dim])
self.madeGraphEquations += [node.name]
# If we find one of the specified inputs, create new variables
if node.name in self.inputNames:
self.foundnInputFlags += 1
self.makeNewVariables(node.name)
self.inputVars += [np.array(self.varMap[node.name])]
# Add shapes for constants
for node in self.graph.initializer:
self.shapeMap[node.name] = list(node.dims)
self.madeGraphEquations += [node.name]
# Recursively create remaining shapes and equations as needed
self.makeGraphEquations(self.outputName, True)
def makeGraphEquations(self, nodeName, makeEquations):
if nodeName in self.madeGraphEquations:
return
if nodeName in self.inputNames:
self.foundnInputFlags += 1
# If an inputName is an intermediate layer of the network, we don't need to create Marabou
makeEquations = False
self.madeGraphEquations += [nodeName]
for inNodeName in self.getInputNodes(nodeName):
self.makeGraphEquations(inNodeName, makeEquations)
# Compute node's shape and create Marabou equations as needed
self.makeMarabouEquations(nodeName, makeEquations)
if nodeName in self.inputNames:
self.makeNewVariables(nodeName)
self.inputVars += [np.array(self.varMap[nodeName])]
def makeMarabouEquations(self, nodeName, makeEquations):
node = self.getNode(nodeName)
if node.op_type == 'Identity':
self.identity(node)
elif node.op_type == 'Cast':
self.cast(node)
elif node.op_type == 'Reshape':
self.reshape(node)
elif node.op_type == "Transpose":
self.transpose(node)
elif node.op_type == "MaxPool":
self.maxpoolEquations(node, makeEquations)
elif node.op_type == "Conv":
self.convEquations(node, makeEquations)
elif node.op_type == 'Gemm':
self.gemmEquations(node, makeEquations)
elif node.op_type == 'MatMul':
self.matMulEquations(node, makeEquations)
elif node.op_type == 'Add':
self.addEquations(node, makeEquations)
elif node.op_type == 'Relu':
self.reluEquations(node, makeEquations)
else:
print("Operation %s not implemented" % (node.op_type))
raise NotImplementedError
def getNode(self, nodeName):
node = [node for node in self.graph.node if nodeName in node.output]
if len(node) > 0:
return node[0]
return None
def makeNewVariables(self, nodeName):
assert nodeName not in self.varMap
shape = self.shapeMap[nodeName]
size = np.prod(shape)
v = np.array([self.getNewVariable() for _ in range(size)]).reshape(shape)
self.varMap[nodeName] = v
assert all([np.equal(np.mod(i, 1), 0) for i in v.reshape(-1)])
return v
def getInputNodes(self, nodeName):
node = self.getNode(nodeName)
inNodes = []
for inp in node.input:
if len([nde for nde in self.graph.node if inp in nde.output]):
inNodes += [inp]
elif len([nde for nde in self.graph.initializer if nde.name == inp]):
self.constantMap[inp] = [numpy_helper.to_array(init) for init in self.graph.initializer if init.name == inp][0]
return inNodes
def identity(self, node):
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
if inputName in self.varMap:
self.varMap[nodeName] = self.varMap[inputName]
elif inputName in self.constantMap:
self.constantMap[nodeName] = self.constantMap[inputName]
def cast(self, node):
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
to = None
for attr in node.attribute:
if attr.name == "to":
to = get_attribute_value(attr)
if to is None:
print("Casting type not specified with attribute 'to'")
raise RuntimeError
if inputName in self.constantMap:
if to == TensorProto.FLOAT16:
self.constantMap[nodeName] = self.constantMap[inputName].astype('float16')
elif to == TensorProto.FLOAT:
self.constantMap[nodeName] = self.constantMap[inputName].astype('float32')
elif to == TensorProto.DOUBLE:
self.constantMap[nodeName] = self.constantMap[inputName].astype('double')
elif to == TensorProto.UINT8:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint8')
elif to == TensorProto.UINT16:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint16')
elif to == TensorProto.UINT32:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint32')
elif to == TensorProto.UINT64:
self.constantMap[nodeName] = self.constantMap[inputName].astype('uint64')
elif to == TensorProto.INT8:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int8')
elif to == TensorProto.INT16:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int16')
elif to == TensorProto.INT32:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int32')
elif to == TensorProto.INT64:
self.constantMap[nodeName] = self.constantMap[inputName].astype('int64')
else:
print("Unknown type for casting: %d" % to)
print("Check here for ONNX TensorProto: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto")
raise NotImplementedError
elif inputName in self.varMap:
print("Casting variables not allowed with Marabou")
raise NotImplementedError
def reshape(self, node):
nodeName = node.output[0]
inputName1, inputName2 = node.input
# Assume first input is array to be reshaped, second input is the new shape array
reshapeVals = self.constantMap[inputName2]
self.shapeMap[nodeName] = list(np.zeros(self.shapeMap[inputName1]).reshape(reshapeVals).shape)
if inputName1 in self.varMap:
self.varMap[nodeName] = self.varMap[inputName1].reshape(reshapeVals)
elif inputName1 in self.constantMap:
self.constantMap[nodeName] = self.constantMap[inputName1].reshape(reshapeVals)
def transpose(self, node):
nodeName = node.output[0]
inputName = node.input[0]
# Get attributes
perm = None
for attr in node.attribute:
if attr.name == "perm":
perm = get_attribute_value(attr)
if perm is None:
print("Permutation indices not specified by attibute 'perm'")
raise RuntimeError
self.shapeMap[nodeName] = [self.shapeMap[inputName][p] for p in perm]
if inputName in self.varMap:
self.varMap[nodeName] = np.transpose(self.varMap[node.input[0]], perm)
elif inputName in self.constantMap:
self.constantMap[nodeName] = np.transpose(self.constant[inputName], perm)
def maxpoolEquations(self, node, makeEquations):
nodeName = node.output[0]
### Get variables and constants of inputs ###
inVars = self.varMap[node.input[0]]
inputShape = self.shapeMap[node.input[0]]
kernel_shape = [1, 1]
strides = [1, 1]
for attr in node.attribute:
if attr.name == 'kernel_shape':
kernel_shape = get_attribute_value(attr)
elif attr.name == 'strides':
strides = get_attribute_value(attr)
outputShape = [dim for dim in inputShape]
outputShape[2] = int(np.ceil((inputShape[2] - ((kernel_shape[0] - 1) + 1) + 1) / strides[0]))
outputShape[3] = int(np.ceil((inputShape[3] - ((kernel_shape[1] - 1) + 1) + 1) / strides[1]))
self.shapeMap[nodeName] = outputShape
if makeEquations:
outVars = self.makeNewVariables(nodeName)
for i in range(outputShape[2]):
for j in range(outputShape[3]):
for k in range(outputShape[1]):
maxVars = set()
for di in range(strides[0]*i, strides[0]*i + kernel_shape[0]):
for dj in range(strides[1]*j, strides[1]*j + kernel_shape[1]):
if di < inputShape[2] and dj < inputShape[3]:
maxVars.add(inVars[0][k][di][dj])
self.addMaxConstraint(maxVars, outVars[0][k][i][j])
def convEquations(self, node, makeEquations):
nodeName = node.output[0]
# Extract information about convolution
strides = [1, 1]
pads = [0, 0, 0, 0]
for attr in node.attribute:
if attr.name == 'strides':
strides = get_attribute_value(attr)
elif attr.name == 'pads':
pads = get_attribute_value(attr)
pad_left, pad_bottom, pad_right, pad_top = pads
# Get input shape information
# First input should be variable tensor, the second a weight matrix defining filters
shape0 = self.shapeMap[node.input[0]]
shape1 = self.shapeMap[node.input[1]]
input_channels = shape0[1]
input_width = shape0[2]
input_height = shape0[3]
num_filters = shape1[0]
filter_channels = shape1[1]
filter_width = shape1[2]
filter_height = shape1[3]
# The number of channels should match between input variable and filters
assert input_channels == filter_channels
# Compute output shape
out_width = (input_width - filter_width + pad_left + pad_right) // strides[0] + 1
out_height = (input_height - filter_height + pad_bottom + pad_top) // strides[1] + 1
out_channels = num_filters
self.shapeMap[nodeName] = [shape0[0], out_channels, out_width, out_height]
if makeEquations:
inVars = self.varMap[node.input[0]]
weights = self.constantMap[node.input[1]]
outVars = self.makeNewVariables(nodeName)
### Generate actual equations ###
# There is one equation for every output variable
for i in range(out_width):
for j in range(out_height):
for k in range(out_channels): # Out_channel corresponds to filter number
e = MarabouUtils.Equation()
# The equation convolves the filter with the specified input region
# Iterate over the filter
for di in range(filter_width):
for dj in range(filter_height):
for dk in range(filter_channels):
w_ind = int(strides[0]*i+di - pad_left)
h_ind = int(strides[1]*j+dj - pad_top)
if h_ind < input_height and h_ind >= 0 and w_ind < input_width and w_ind >= 0:
var = inVars[0][dk][w_ind][h_ind]
c = weights[k][dk][di][dj]
e.addAddend(c, var)
# Add output variable
e.addAddend(-1, outVars[0][k][i][j])
e.setScalar(0.0)
self.addEquation(e)
def gemmEquations(self, node, makeEquations):
nodeName = node.output[0]
# Get inputs
inputName1, inputName2, inputName3 = node.input
shape1 = self.shapeMap[inputName1]
shape2 = self.shapeMap[inputName2]
shape3 = self.shapeMap[inputName3]
input1 = self.varMap[inputName1]
input2 = self.constantMap[inputName2]
input3 = self.constantMap[inputName3]
self.shapeMap[nodeName] = self.shapeMap[inputName3]
if makeEquations:
# Pad shape if needed
if len(shape1) == 1:
shape1 = [1] + shape1
input1 = input1.reshape(shape1)
elif shape1[1] == 1:
shape1 = shape1[::-1]
input1 = input1.reshape(shape1)
if len(shape3) == 1:
shape3 = [1] + shape3
input3 = input3.reshape(shape3)
if shape1[0] != shape3[0]:
shape3 = shape3[::-1]
input3 = input3.reshape(shape3)
# Assume that first input is variables, second is Matrix for MatMul, and third is bias addition
assert shape1[-1] == shape2[0]
assert shape1[0] == shape3[0]
assert shape2[1] == shape3[1]
# Create new variables
self.shapeMap[nodeName] = self.shapeMap[node.input[2]]
outputVariables = self.makeNewVariables(nodeName)
outputVariables = outputVariables.reshape(shape3)
# Generate equations
for i in range(shape1[0]):
for j in range(shape2[1]):
e = MarabouUtils.Equation()
for k in range(shape1[1]):
e.addAddend(input2[k][j], input1[i][k])
# Put output variable as the last addend last
e.addAddend(-1, outputVariables[i][j])
e.setScalar(-input3[i][j])
self.addEquation(e)
def matMulEquations(self, node, makeEquations):
nodeName = node.output[0]
# Get inputs and determine which inputs are constants and which are variables
inputName1, inputName2 = node.input
shape1 = self.shapeMap[inputName1]
shape2 = self.shapeMap[inputName2]
assert shape1[-1] == shape2[0]
self.shapeMap[nodeName] = shape1[:-1] + shape2[1:]
firstInputConstant = False; secondInputConstant = False
if inputName1 in self.constantMap:
input1 = self.constantMap[inputName1]
firstInputConstant = True
else:
input1 = self.varMap[inputName1]
if inputName2 in self.constantMap:
input2 = self.constantMap[inputName2]
secondInputConstant = True
else:
input2 = self.varMap[inputName2]
# Assume that at least one input is a constant (We cannot represent variable products with linear equations)
assert firstInputConstant or secondInputConstant
# If both inputs are constant, than the output is constant as well, and we don't need new variables or equations
if firstInputConstant and secondInputConstant:
self.constantMap[nodeName] = np.matmul(input1,input2)
return
if makeEquations:
outputVariables = self.makeNewVariables(nodeName)
for i in range(shape1[0]):
if len(shape2)>1:
for j in range(shape2[1]):
e = MarabouUtils.Equation()
for k in range(shape1[1]):
if firstInputConstant:
e.addAddend(input1[i][k], input2[k][j])
else:
e.addAddend(input2[k][j], input1[i][k])
e.addAddend(-1, outputVariables[i][j])
e.setScalar(0.0)
self.addEquation(e)
else:
e = MarabouUtils.Equation()
for k in range(shape1[1]):
if firstInputConstant:
e.addAddend(input1[i][k], input2[k])
else:
e.addAddend(input2[k], input1[i][k])
e.addAddend(-1, outputVariables[i])
e.setScalar(0.0)
self.addEquation(e)
def addEquations(self, node, makeEquations):
nodeName = node.output[0]
inputName1, inputName2 = node.input
shape1 = self.shapeMap[inputName1]
shape2 = self.shapeMap[inputName2]
self.shapeMap[nodeName] = shape1
firstInputConstant = False; secondInputConstant = False
if inputName1 in self.constantMap:
input1 = np.copy(self.constantMap[inputName1]) + np.zeros(shape2)
firstInputConstant = True
else:
input1 = self.varMap[inputName1]
if inputName2 in self.constantMap:
input2 = np.copy(self.constantMap[inputName2]) + np.zeros(shape1)
secondInputConstant = True
else:
input2 = self.varMap[inputName2]
assert input1.shape == input2.shape
self.shapeMap[nodeName] = shape1
if firstInputConstant and secondInputConstant:
self.constantMap[nodeName] = input1 + input2
elif makeEquations and not firstInputConstant and not secondInputConstant:
outputVariables = self.makeNewVariables(nodeName)
input1 = input1.reshape(-1)
input2 = input2.reshape(-1)
outputVariables = outputVariables.reshape(-1)
for i in range(len(input1)):
e = MarabouUtils.Equation()
e.addAddend(1, input1[i])
e.addAddend(1, input2[i])
e.addAddend(-1, outputVariables[i])
e.setScalar(0.0)
self.addEquation(e)
# Instead, we can just edit the scalar term of the existing linear equation.
# However, if the input variables are not outputs of linear equations (input variables or outputs of
# activation functions) then we will need new equations.
elif makeEquations:
if firstInputConstant:
constInput = input1
varInput = input2
else:
constInput = input2
varInput = input1
constInput = constInput.reshape(-1)
varInput = varInput.reshape(-1)
# Adjust equations to incorporate the constant addition
numEquationsChanged = 0
for equ in self.equList:
(c,var) = equ.addendList[-1]
assert c == -1
if var in varInput:
ind = np.where(var == varInput)[0][0]
# Adjust the equation
equ.setScalar(equ.scalar-constInput[ind])
numEquationsChanged += 1
# If we changed one equation for every input variable, then
# we don't need any new equations
if numEquationsChanged == len(varInput):
self.varMap[nodeName] = varInput
else:
assert numEquationsChanged == 0
outputVariables = self.makeNewVariables(nodeName).reshape(-1)
for i in range(len(outputVariables)):
e = MarabouUtils.Equation()
e.addAddend(1, varInput[i])
e.addAddend(-1, outputVariables[i])
e.setScalar(-constInput[i])
self.addEquation(e)
def reluEquations(self, node, makeEquations):
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
if makeEquations:
inputVars = self.varMap[inputName].reshape(-1)
outputVars = self.makeNewVariables(nodeName).reshape(-1)
assert len(inputVars) == len(outputVars)
for i in range(len(inputVars)):
self.addRelu(inputVars[i], outputVars[i])
for f in outputVars:
self.setLowerBound(f, 0.0)
def cleanShapes(self):
for nodeName in [name for name in self.shapeMap]:
if nodeName not in self.varMap and nodeName not in self.constantMap:
self.shapeMap.pop(nodeName)
def reassignVariable(self, var, numInVars, outVars, newOutVars):
if var < numInVars:
return var
if var in outVars:
ind = np.where(var == outVars)[0][0]
return newOutVars[ind]
return var + len(outVars)
def reassignOutputVariables(self):
outVars = self.varMap[self.outputName].reshape(-1)
numInVars = np.sum([np.prod(self.shapeMap[inputName]) for inputName in self.inputNames])
numOutVars = len(outVars)
newOutVars = np.array(range(numInVars,numInVars+numOutVars))
for eq in self.equList:
for i, (c,var) in enumerate(eq.addendList):
eq.addendList[i] = (c, self.reassignVariable(var, numInVars, outVars, newOutVars))
for i, variables in enumerate(self.reluList):
self.reluList[i] = tuple([self.reassignVariable(var, numInVars, outVars, newOutVars) for var in variables])
for i, (elements, outVar) in enumerate(self.maxList):
newOutVar = self.reassignVariable(outVar, numInVars, outVars, newOutVars)
newElements = set()
for var in elements:
newElements.add(self.reassignVariable(var, numInVars, outVars, newOutVars))
self.maxList[i] = (newElements, newOutVar)
newLowerBounds = dict()
newUpperBounds = dict()
for var in self.lowerBounds:
newLowerBounds[self.reassignVariable(var, numInVars, outVars, newOutVars)] = self.lowerBounds[var]
for var in self.upperBounds:
newUpperBounds[self.reassignVariable(var, numInVars, outVars, newOutVars)] = self.upperBounds[var]
self.lowerBounds = newLowerBounds
self.upperBounds = newUpperBounds
newVarsParticipatingInConstraints = set()
for var in self.varsParticipatingInConstraints:
newVarsParticipatingInConstraints.add(self.reassignVariable(var, numInVars, outVars, newOutVars))
self.varsParticipatingInConstraints = newVarsParticipatingInConstraints
self.varMap[self.outputName] = newOutVars.reshape(self.shapeMap[self.outputName])
self.outputVars = self.varMap[self.outputName]
def evaluateWithoutMarabou(self, inputValues):
onnxInputNames = [node.name for node in self.graph.input]
for inName in self.inputNames:
if inName not in onnxInputNames:
print("ONNX does not allow intermediate layers to be set as inputs!")
raise NotImplementedError
onnxOutputNames = [node.name for node in self.graph.output]
if self.outputName not in onnxOutputNames:
print("ONNX does not allow intermediate layers to be set as the output!")
raise NotImplementedError
sess = onnxruntime.InferenceSession(self.filename)
input_dict = dict()
for i, inputName in enumerate(self.inputNames):
onnxType = sess.get_inputs()[i].type
if 'float' in onnxType:
inputType = 'float32'
elif 'int' in onnxType:
inputType = 'int64'
else:
printf("Not sure how to cast input to graph input of type %s" % onnxType)
raise NotImplementedError
input_dict[inputName] = inputValues[i].reshape(self.inputVars[i].shape).astype(inputType)
return sess.run([self.outputName],input_dict)[0] | true | true |
1c2eed504e370d1e47ed2225ee6c0c10dfed73e2 | 7,601 | py | Python | app/pre_demultiplexing_data_api.py | imperial-genomics-facility/IGFPortal | 0a61ecbfc1ac71775ad12d7cf13d09512ad71380 | [
"Apache-2.0"
] | null | null | null | app/pre_demultiplexing_data_api.py | imperial-genomics-facility/IGFPortal | 0a61ecbfc1ac71775ad12d7cf13d09512ad71380 | [
"Apache-2.0"
] | null | null | null | app/pre_demultiplexing_data_api.py | imperial-genomics-facility/IGFPortal | 0a61ecbfc1ac71775ad12d7cf13d09512ad71380 | [
"Apache-2.0"
] | null | null | null | import json, logging
from flask_appbuilder import ModelRestApi
from flask import request
from flask_appbuilder.api import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import protect
from . import db
from .models import PreDeMultiplexingData
"""
Pre-demultiplexing data Api
"""
def search_predemultiplexing_data(run_name, samplesheet_tag):
try:
result = \
db.session.\
query(PreDeMultiplexingData).\
filter(PreDeMultiplexingData.run_name==run_name).\
filter(PreDeMultiplexingData.samplesheet_tag==samplesheet_tag).\
one_or_none()
return result
except Exception as e:
raise ValueError(
"Failed to search pre demultiplexing data, error: {0}".\
format(e))
def add_predemultiplexing_data(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
flowcell_cluster_plot = data.get("flowcell_cluster_plot")
if isinstance(flowcell_cluster_plot, dict):
flowcell_cluster_plot = json.dumps(flowcell_cluster_plot)
project_summary_table = data.get("project_summary_table")
if isinstance(project_summary_table, dict):
project_summary_table = json.dumps(project_summary_table)
project_summary_plot = data.get("project_summary_plot")
if isinstance(project_summary_plot, dict):
project_summary_plot = json.dumps(project_summary_plot)
sample_table = data.get("sample_table")
if isinstance(sample_table, dict):
sample_table = json.dumps(sample_table)
sample_plot = data.get("sample_plot")
if isinstance(sample_plot, dict):
sample_plot = json.dumps(sample_plot)
undetermined_table = data.get("undetermined_table")
if isinstance(undetermined_table, dict):
undetermined_table = json.dumps(undetermined_table)
undetermined_plot = data.get("undetermined_plot")
if isinstance(undetermined_plot, dict):
undetermined_plot = json.dumps(undetermined_plot)
predemult_data = \
PreDeMultiplexingData(
run_name=data.get("run_name"),
samplesheet_tag=data.get("samplesheet_tag"),
flowcell_cluster_plot=flowcell_cluster_plot,
project_summary_table=project_summary_table,
project_summary_plot=project_summary_plot,
sample_table=sample_table,
sample_plot=sample_plot,
undetermined_table=undetermined_table,
undetermined_plot=undetermined_plot)
try:
db.session.add(predemult_data)
db.session.flush()
db.session.commit()
except:
db.session.rollback()
raise
except Exception as e:
raise ValueError(
"Failed to add de-multiplex data, error: {0}".\
format(e))
def edit_predemultiplexing_data(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
if "run_name" not in data:
raise ValueError("Missing run name")
if "samplesheet_tag" not in data:
raise ValueError("Missing sampleshheet tag")
flowcell_cluster_plot = data.get("flowcell_cluster_plot")
if flowcell_cluster_plot is not None and \
isinstance(flowcell_cluster_plot, dict):
flowcell_cluster_plot = json.dumps(flowcell_cluster_plot)
data.update({"flowcell_cluster_plot": flowcell_cluster_plot})
project_summary_table = data.get("project_summary_table")
if project_summary_table is not None and \
isinstance(project_summary_table, dict):
project_summary_table = json.dumps(project_summary_table)
data.update({"project_summary_table": project_summary_table})
project_summary_plot = data.get("project_summary_plot")
if project_summary_plot is not None and \
isinstance(project_summary_plot, dict):
project_summary_plot = json.dumps(project_summary_plot)
data.update({"project_summary_plot": project_summary_plot})
sample_table = data.get("sample_table")
if sample_table is not None and \
isinstance(sample_table, dict):
sample_table = json.dumps(sample_table)
data.update({"sample_table": sample_table})
sample_plot = data.get("sample_plot")
if sample_plot is not None and \
isinstance(sample_plot, dict):
sample_plot = json.dumps(sample_plot)
data.update({"sample_plot": sample_plot})
undetermined_table = data.get("undetermined_table")
if undetermined_table is not None and \
isinstance(undetermined_table, dict):
undetermined_table = json.dumps(undetermined_table)
data.update({"undetermined_table": undetermined_table})
undetermined_plot = data.get("undetermined_plot")
if undetermined_plot is not None and \
isinstance(undetermined_plot, dict):
undetermined_plot = json.dumps(undetermined_plot)
data.update({"undetermined_plot": undetermined_plot})
try:
db.session.\
query(PreDeMultiplexingData).\
filter(PreDeMultiplexingData.run_name==data.get("run_name")).\
filter(PreDeMultiplexingData.samplesheet_tag==data.get("samplesheet_tag")).\
update(data)
db.session.commit()
except:
db.session.rollback()
raise
except Exception as e:
raise ValueError(
"Failed to update de-multiplex data, error: {0}".\
format(e))
def add_or_edit_predemultiplexing_data(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
if "run_name" not in data:
raise ValueError("Missing run name")
if "samplesheet_tag" not in data:
raise ValueError("Missing sampleshheet tag")
result = \
search_predemultiplexing_data(
run_name=data.get("run_name"),
samplesheet_tag=data.get("samplesheet_tag"))
if result is None:
add_predemultiplexing_data(data=data)
else:
edit_predemultiplexing_data(data=data)
except Exception as e:
raise ValueError(
"Failed to add or update de-multiplex data, error: {0}".\
format(e))
class PreDeMultiplexingDataApi(ModelRestApi):
resource_name = "predemultiplexing_data"
datamodel = SQLAInterface(PreDeMultiplexingData)
@expose('/add_or_edit_report', methods=['POST'])
@protect()
def add_or_edit_demult_report(self):
try:
if not request.files:
return self.response_400('No files')
file_objs = request.files.getlist('file')
file_obj = file_objs[0]
file_obj.seek(0)
json_data = file_obj.read()
add_or_edit_predemultiplexing_data(data=json_data)
return self.response(200, message='successfully added or updated demult data')
except Exception as e:
logging.error(e)
| 41.535519 | 92 | 0.63689 | import json, logging
from flask_appbuilder import ModelRestApi
from flask import request
from flask_appbuilder.api import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import protect
from . import db
from .models import PreDeMultiplexingData
def search_predemultiplexing_data(run_name, samplesheet_tag):
try:
result = \
db.session.\
query(PreDeMultiplexingData).\
filter(PreDeMultiplexingData.run_name==run_name).\
filter(PreDeMultiplexingData.samplesheet_tag==samplesheet_tag).\
one_or_none()
return result
except Exception as e:
raise ValueError(
"Failed to search pre demultiplexing data, error: {0}".\
format(e))
def add_predemultiplexing_data(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
flowcell_cluster_plot = data.get("flowcell_cluster_plot")
if isinstance(flowcell_cluster_plot, dict):
flowcell_cluster_plot = json.dumps(flowcell_cluster_plot)
project_summary_table = data.get("project_summary_table")
if isinstance(project_summary_table, dict):
project_summary_table = json.dumps(project_summary_table)
project_summary_plot = data.get("project_summary_plot")
if isinstance(project_summary_plot, dict):
project_summary_plot = json.dumps(project_summary_plot)
sample_table = data.get("sample_table")
if isinstance(sample_table, dict):
sample_table = json.dumps(sample_table)
sample_plot = data.get("sample_plot")
if isinstance(sample_plot, dict):
sample_plot = json.dumps(sample_plot)
undetermined_table = data.get("undetermined_table")
if isinstance(undetermined_table, dict):
undetermined_table = json.dumps(undetermined_table)
undetermined_plot = data.get("undetermined_plot")
if isinstance(undetermined_plot, dict):
undetermined_plot = json.dumps(undetermined_plot)
predemult_data = \
PreDeMultiplexingData(
run_name=data.get("run_name"),
samplesheet_tag=data.get("samplesheet_tag"),
flowcell_cluster_plot=flowcell_cluster_plot,
project_summary_table=project_summary_table,
project_summary_plot=project_summary_plot,
sample_table=sample_table,
sample_plot=sample_plot,
undetermined_table=undetermined_table,
undetermined_plot=undetermined_plot)
try:
db.session.add(predemult_data)
db.session.flush()
db.session.commit()
except:
db.session.rollback()
raise
except Exception as e:
raise ValueError(
"Failed to add de-multiplex data, error: {0}".\
format(e))
def edit_predemultiplexing_data(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
if "run_name" not in data:
raise ValueError("Missing run name")
if "samplesheet_tag" not in data:
raise ValueError("Missing sampleshheet tag")
flowcell_cluster_plot = data.get("flowcell_cluster_plot")
if flowcell_cluster_plot is not None and \
isinstance(flowcell_cluster_plot, dict):
flowcell_cluster_plot = json.dumps(flowcell_cluster_plot)
data.update({"flowcell_cluster_plot": flowcell_cluster_plot})
project_summary_table = data.get("project_summary_table")
if project_summary_table is not None and \
isinstance(project_summary_table, dict):
project_summary_table = json.dumps(project_summary_table)
data.update({"project_summary_table": project_summary_table})
project_summary_plot = data.get("project_summary_plot")
if project_summary_plot is not None and \
isinstance(project_summary_plot, dict):
project_summary_plot = json.dumps(project_summary_plot)
data.update({"project_summary_plot": project_summary_plot})
sample_table = data.get("sample_table")
if sample_table is not None and \
isinstance(sample_table, dict):
sample_table = json.dumps(sample_table)
data.update({"sample_table": sample_table})
sample_plot = data.get("sample_plot")
if sample_plot is not None and \
isinstance(sample_plot, dict):
sample_plot = json.dumps(sample_plot)
data.update({"sample_plot": sample_plot})
undetermined_table = data.get("undetermined_table")
if undetermined_table is not None and \
isinstance(undetermined_table, dict):
undetermined_table = json.dumps(undetermined_table)
data.update({"undetermined_table": undetermined_table})
undetermined_plot = data.get("undetermined_plot")
if undetermined_plot is not None and \
isinstance(undetermined_plot, dict):
undetermined_plot = json.dumps(undetermined_plot)
data.update({"undetermined_plot": undetermined_plot})
try:
db.session.\
query(PreDeMultiplexingData).\
filter(PreDeMultiplexingData.run_name==data.get("run_name")).\
filter(PreDeMultiplexingData.samplesheet_tag==data.get("samplesheet_tag")).\
update(data)
db.session.commit()
except:
db.session.rollback()
raise
except Exception as e:
raise ValueError(
"Failed to update de-multiplex data, error: {0}".\
format(e))
def add_or_edit_predemultiplexing_data(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
if "run_name" not in data:
raise ValueError("Missing run name")
if "samplesheet_tag" not in data:
raise ValueError("Missing sampleshheet tag")
result = \
search_predemultiplexing_data(
run_name=data.get("run_name"),
samplesheet_tag=data.get("samplesheet_tag"))
if result is None:
add_predemultiplexing_data(data=data)
else:
edit_predemultiplexing_data(data=data)
except Exception as e:
raise ValueError(
"Failed to add or update de-multiplex data, error: {0}".\
format(e))
class PreDeMultiplexingDataApi(ModelRestApi):
resource_name = "predemultiplexing_data"
datamodel = SQLAInterface(PreDeMultiplexingData)
@expose('/add_or_edit_report', methods=['POST'])
@protect()
def add_or_edit_demult_report(self):
try:
if not request.files:
return self.response_400('No files')
file_objs = request.files.getlist('file')
file_obj = file_objs[0]
file_obj.seek(0)
json_data = file_obj.read()
add_or_edit_predemultiplexing_data(data=json_data)
return self.response(200, message='successfully added or updated demult data')
except Exception as e:
logging.error(e)
| true | true |
1c2eef133d6fc75b858afe9960a2cac36a14f3f5 | 101 | py | Python | networking_mrv/__init__.py | iljatab/ml2-mech | 336873605f41769213d2895441cc7a1bd78fe6c0 | [
"Apache-1.1"
] | null | null | null | networking_mrv/__init__.py | iljatab/ml2-mech | 336873605f41769213d2895441cc7a1bd78fe6c0 | [
"Apache-1.1"
] | null | null | null | networking_mrv/__init__.py | iljatab/ml2-mech | 336873605f41769213d2895441cc7a1bd78fe6c0 | [
"Apache-1.1"
] | null | null | null |
import pbr.version
__version__ = pbr.version.VersionInfo(
'networking_mrv').version_string()
| 12.625 | 38 | 0.752475 |
import pbr.version
__version__ = pbr.version.VersionInfo(
'networking_mrv').version_string()
| true | true |
1c2ef06b5cec4cd98c295ed3006362984d2b223c | 12,565 | py | Python | caproto/_status.py | mrakitin/caproto | ad49ffbe1a69ddc63cac9ec7f1a3468a4965e465 | [
"BSD-3-Clause"
] | 12 | 2019-05-25T14:26:25.000Z | 2022-01-24T09:10:18.000Z | caproto/_status.py | mrakitin/caproto | ad49ffbe1a69ddc63cac9ec7f1a3468a4965e465 | [
"BSD-3-Clause"
] | 333 | 2017-06-22T03:10:15.000Z | 2019-05-07T16:37:20.000Z | caproto/_status.py | mrakitin/caproto | ad49ffbe1a69ddc63cac9ec7f1a3468a4965e465 | [
"BSD-3-Clause"
] | 17 | 2019-07-03T18:17:22.000Z | 2022-03-22T00:24:20.000Z | # Represent each CA Status Code as a namedtuple encapulating associated numeric
# codes and human-readable attributes.
# The CAStatus Enum maps each name (like 'ECA_NORMAL') to a CAStatusCode
# instance.
from enum import IntEnum, Enum
from collections import namedtuple
__all__ = ('CAStatus', 'CASeverity')
CAStatusCode = namedtuple('CAStatusCode',
'name code code_with_severity severity success '
'defunct description')
class CASeverity(IntEnum):
INFO = 3 # successful
ERROR = 2 # failed; continue
SUCCESS = 1 # successful
WARNING = 0 # unsuccessful
SEVERE = 4 # failed; quit
FATAL = (ERROR | SEVERE)
def __str__(self):
return self.name
def _ca_status(name, severity: CASeverity, code, desc, *, defunct=False):
'''Factory function for making a CAStatusCode
Parameters
----------
name : str
Status code string name
severity : CASeverity
Severity level
code : int
Base code number (0 to 60, as of time of writing)
desc : str
User-friendlyish description
defunct : bool, optional
Indicates that current release servers and client library will not
return this error code, but servers on earlier releases that
communicate with current clients might still generate exceptions
with these error constants.
'''
mask_msg = 0xFFF8
mask_severity = 0x0007
mask_success = 0x0001
shift_message = 0x03
shift_severity = 0x00
shift_success = 0x00
code_with_severity = (code << shift_message) & mask_msg
code_with_severity |= (severity << shift_severity) & mask_severity
success = (severity & mask_success) >> shift_success
assert ((severity & mask_severity) >> shift_severity) == severity
return CAStatusCode(name=name, code=code,
code_with_severity=code_with_severity,
severity=severity, success=success, description=desc,
defunct=defunct)
class CAStatus(Enum):
ECA_NORMAL = _ca_status(
'ECA_NORMAL', severity=CASeverity.SUCCESS, code=0,
desc="Normal successful completion")
ECA_MAXIOC = _ca_status(
'ECA_MAXIOC', severity=CASeverity.ERROR, code=1,
desc="Maximum simultaneous IOC connections exceeded",
defunct=True)
ECA_UKNHOST = _ca_status(
'ECA_UKNHOST', severity=CASeverity.ERROR, code=2,
desc="Unknown internet host",
defunct=True)
ECA_UKNSERV = _ca_status(
'ECA_UKNSERV', severity=CASeverity.ERROR, code=3,
desc="Unknown internet service",
defunct=True)
ECA_SOCK = _ca_status(
'ECA_SOCK', severity=CASeverity.ERROR, code=4,
desc="Unable to allocate a new socket",
defunct=True)
ECA_CONN = _ca_status(
'ECA_CONN', severity=CASeverity.WARNING, code=5,
desc="Unable to connect to internet host or service",
defunct=True)
ECA_ALLOCMEM = _ca_status(
'ECA_ALLOCMEM', severity=CASeverity.WARNING, code=6,
desc="Unable to allocate additional dynamic memory")
ECA_UKNCHAN = _ca_status(
'ECA_UKNCHAN', severity=CASeverity.WARNING, code=7,
desc="Unknown IO channel",
defunct=True)
ECA_UKNFIELD = _ca_status(
'ECA_UKNFIELD', severity=CASeverity.WARNING, code=8,
desc="Record field specified inappropriate for channel specified",
defunct=True)
ECA_TOLARGE = _ca_status(
'ECA_TOLARGE', severity=CASeverity.WARNING, code=9,
desc=("The requested data transfer is greater than available memory "
"or EPICS_CA_MAX_ARRAY_BYTES"))
ECA_TIMEOUT = _ca_status(
'ECA_TIMEOUT', severity=CASeverity.WARNING, code=10,
desc="User specified timeout on IO operation expired")
ECA_NOSUPPORT = _ca_status(
'ECA_NOSUPPORT', severity=CASeverity.WARNING, code=11,
desc="Sorry, that feature is planned but not supported at this time",
defunct=True)
ECA_STRTOBIG = _ca_status(
'ECA_STRTOBIG', severity=CASeverity.WARNING, code=12,
desc="The supplied string is unusually large",
defunct=True)
ECA_DISCONNCHID = _ca_status(
'ECA_DISCONNCHID', severity=CASeverity.ERROR, code=13,
desc=("The request was ignored because the specified channel is "
"disconnected"),
defunct=True)
ECA_BADTYPE = _ca_status(
'ECA_BADTYPE', severity=CASeverity.ERROR, code=14,
desc="The data type specifed is invalid")
ECA_CHIDNOTFND = _ca_status(
'ECA_CHIDNOTFND', severity=CASeverity.INFO, code=15,
desc="Remote Channel not found",
defunct=True)
ECA_CHIDRETRY = _ca_status(
'ECA_CHIDRETRY', severity=CASeverity.INFO, code=16,
desc="Unable to locate all user specified channels",
defunct=True)
ECA_INTERNAL = _ca_status(
'ECA_INTERNAL', severity=CASeverity.FATAL, code=17,
desc="Channel Access Internal Failure")
ECA_DBLCLFAIL = _ca_status(
'ECA_DBLCLFAIL', severity=CASeverity.WARNING, code=18,
desc="The requested local DB operation failed",
defunct=True)
ECA_GETFAIL = _ca_status(
'ECA_GETFAIL', severity=CASeverity.WARNING, code=19,
desc="Channel read request failed")
ECA_PUTFAIL = _ca_status(
'ECA_PUTFAIL', severity=CASeverity.WARNING, code=20,
desc="Channel write request failed")
ECA_ADDFAIL = _ca_status(
'ECA_ADDFAIL', severity=CASeverity.WARNING, code=21,
desc="Channel subscription request failed",
defunct=True)
ECA_BADCOUNT = _ca_status(
'ECA_BADCOUNT', severity=CASeverity.WARNING, code=22,
desc="Invalid element count requested")
ECA_BADSTR = _ca_status(
'ECA_BADSTR', severity=CASeverity.ERROR, code=23,
desc="Invalid string")
ECA_DISCONN = _ca_status(
'ECA_DISCONN', severity=CASeverity.WARNING, code=24,
desc="Virtual circuit disconnect")
ECA_DBLCHNL = _ca_status(
'ECA_DBLCHNL', severity=CASeverity.WARNING, code=25,
desc="Identical process variable name on multiple servers")
ECA_EVDISALLOW = _ca_status(
'ECA_EVDISALLOW', severity=CASeverity.ERROR, code=26,
desc=("Request inappropriate within subscription (monitor) update "
"callback"))
ECA_BUILDGET = _ca_status(
'ECA_BUILDGET', severity=CASeverity.WARNING, code=27,
desc=("Database value get for that channel failed during channel "
"search"),
defunct=True)
ECA_NEEDSFP = _ca_status(
'ECA_NEEDSFP', severity=CASeverity.WARNING, code=28,
desc=("Unable to initialize without the vxWorks VX_FP_TASKtask "
"option set"),
defunct=True)
ECA_OVEVFAIL = _ca_status(
'ECA_OVEVFAIL', severity=CASeverity.WARNING, code=29,
desc=("Event queue overflow has prevented first pass event after "
"event add"),
defunct=True)
ECA_BADMONID = _ca_status(
'ECA_BADMONID', severity=CASeverity.ERROR, code=30,
desc="Bad event subscription (monitor) identifier")
ECA_NEWADDR = _ca_status(
'ECA_NEWADDR', severity=CASeverity.WARNING, code=31,
desc="Remote channel has new network address",
defunct=True)
ECA_NEWCONN = _ca_status(
'ECA_NEWCONN', severity=CASeverity.INFO, code=32,
desc="New or resumed network connection",
defunct=True)
ECA_NOCACTX = _ca_status(
'ECA_NOCACTX', severity=CASeverity.WARNING, code=33,
desc="Specified task isnt a member of a CA context",
defunct=True)
ECA_DEFUNCT = _ca_status(
'ECA_DEFUNCT', severity=CASeverity.FATAL, code=34,
desc="Attempt to use defunct CA feature failed",
defunct=True)
ECA_EMPTYSTR = _ca_status(
'ECA_EMPTYSTR', severity=CASeverity.WARNING, code=35,
desc="The supplied string is empty",
defunct=True)
ECA_NOREPEATER = _ca_status(
'ECA_NOREPEATER', severity=CASeverity.WARNING, code=36,
desc=("Unable to spawn the CA repeater thread; auto reconnect will "
"fail"),
defunct=True)
ECA_NOCHANMSG = _ca_status(
'ECA_NOCHANMSG', severity=CASeverity.WARNING, code=37,
desc="No channel id match for search reply; search reply ignored",
defunct=True)
ECA_DLCKREST = _ca_status(
'ECA_DLCKREST', severity=CASeverity.WARNING, code=38,
desc="Reseting dead connection; will try to reconnect",
defunct=True)
ECA_SERVBEHIND = _ca_status(
'ECA_SERVBEHIND', severity=CASeverity.WARNING, code=39,
desc=("Server (IOC) has fallen behind or is not responding; still "
"waiting"),
defunct=True)
ECA_NOCAST = _ca_status(
'ECA_NOCAST', severity=CASeverity.WARNING, code=40,
desc="No internet interface with broadcast available",
defunct=True)
ECA_BADMASK = _ca_status(
'ECA_BADMASK', severity=CASeverity.ERROR, code=41,
desc="Invalid event selection mask")
ECA_IODONE = _ca_status(
'ECA_IODONE', severity=CASeverity.INFO, code=42,
desc="IO operations have completed")
ECA_IOINPROGRESS = _ca_status(
'ECA_IOINPROGRESS', severity=CASeverity.INFO, code=43,
desc="IO operations are in progress")
ECA_BADSYNCGRP = _ca_status(
'ECA_BADSYNCGRP', severity=CASeverity.ERROR, code=44,
desc="Invalid synchronous group identifier")
ECA_PUTCBINPROG = _ca_status(
'ECA_PUTCBINPROG', severity=CASeverity.ERROR, code=45,
desc="Put callback timed out")
ECA_NORDACCESS = _ca_status(
'ECA_NORDACCESS', severity=CASeverity.WARNING, code=46,
desc="Read access denied")
ECA_NOWTACCESS = _ca_status(
'ECA_NOWTACCESS', severity=CASeverity.WARNING, code=47,
desc="Write access denied")
ECA_ANACHRONISM = _ca_status(
'ECA_ANACHRONISM', severity=CASeverity.ERROR, code=48,
desc="Requested feature is no longer supported")
ECA_NOSEARCHADDR = _ca_status(
'ECA_NOSEARCHADDR', severity=CASeverity.WARNING, code=49,
desc="Empty PV search address list")
ECA_NOCONVERT = _ca_status(
'ECA_NOCONVERT', severity=CASeverity.WARNING, code=50,
desc="No reasonable data conversion between client and server types")
ECA_BADCHID = _ca_status(
'ECA_BADCHID', severity=CASeverity.ERROR, code=51,
desc="Invalid channel identifier")
ECA_BADFUNCPTR = _ca_status(
'ECA_BADFUNCPTR', severity=CASeverity.ERROR, code=52,
desc="Invalid function pointer")
ECA_ISATTACHED = _ca_status(
'ECA_ISATTACHED', severity=CASeverity.WARNING, code=53,
desc="Thread is already attached to a client context")
ECA_UNAVAILINSERV = _ca_status(
'ECA_UNAVAILINSERV', severity=CASeverity.WARNING, code=54,
desc="Not supported by attached service")
ECA_CHANDESTROY = _ca_status(
'ECA_CHANDESTROY', severity=CASeverity.WARNING, code=55,
desc="User destroyed channel")
ECA_BADPRIORITY = _ca_status(
'ECA_BADPRIORITY', severity=CASeverity.ERROR, code=56,
desc="Invalid channel priority")
ECA_NOTTHREADED = _ca_status(
'ECA_NOTTHREADED', severity=CASeverity.ERROR, code=57,
desc=("Preemptive callback not enabled - additional threads may not "
"join context"))
ECA_16KARRAYCLIENT = _ca_status(
'ECA_16KARRAYCLIENT', severity=CASeverity.WARNING, code=58,
desc=("Client’s protocol revision does not support transfers "
"exceeding 16k bytes"))
ECA_CONNSEQTMO = _ca_status(
'ECA_CONNSEQTMO', severity=CASeverity.WARNING, code=59,
desc="Virtual circuit connection sequence aborted")
ECA_UNRESPTMO = _ca_status(
'ECA_UNRESPTMO', severity=CASeverity.WARNING, code=60,
desc="Virtual circuit unresponsive")
# # dict mapping integer code_with_severity to CAStatusCode
eca_value_to_status = {member.value.code_with_severity: member.value
for member in CAStatus.__members__.values()}
def ensure_eca_value(status):
"{code_with_severity, CaStatusCode, CaStatus member} -> code_with_severity"
if isinstance(status, int):
return status
if isinstance(status, CAStatusCode):
return status.code_with_severity
if isinstance(status, CAStatus):
return status.value.code_with_severity
| 41.196721 | 79 | 0.669001 |
from enum import IntEnum, Enum
from collections import namedtuple
__all__ = ('CAStatus', 'CASeverity')
CAStatusCode = namedtuple('CAStatusCode',
'name code code_with_severity severity success '
'defunct description')
class CASeverity(IntEnum):
INFO = 3
ERROR = 2
SUCCESS = 1
WARNING = 0
SEVERE = 4
FATAL = (ERROR | SEVERE)
def __str__(self):
return self.name
def _ca_status(name, severity: CASeverity, code, desc, *, defunct=False):
mask_msg = 0xFFF8
mask_severity = 0x0007
mask_success = 0x0001
shift_message = 0x03
shift_severity = 0x00
shift_success = 0x00
code_with_severity = (code << shift_message) & mask_msg
code_with_severity |= (severity << shift_severity) & mask_severity
success = (severity & mask_success) >> shift_success
assert ((severity & mask_severity) >> shift_severity) == severity
return CAStatusCode(name=name, code=code,
code_with_severity=code_with_severity,
severity=severity, success=success, description=desc,
defunct=defunct)
class CAStatus(Enum):
ECA_NORMAL = _ca_status(
'ECA_NORMAL', severity=CASeverity.SUCCESS, code=0,
desc="Normal successful completion")
ECA_MAXIOC = _ca_status(
'ECA_MAXIOC', severity=CASeverity.ERROR, code=1,
desc="Maximum simultaneous IOC connections exceeded",
defunct=True)
ECA_UKNHOST = _ca_status(
'ECA_UKNHOST', severity=CASeverity.ERROR, code=2,
desc="Unknown internet host",
defunct=True)
ECA_UKNSERV = _ca_status(
'ECA_UKNSERV', severity=CASeverity.ERROR, code=3,
desc="Unknown internet service",
defunct=True)
ECA_SOCK = _ca_status(
'ECA_SOCK', severity=CASeverity.ERROR, code=4,
desc="Unable to allocate a new socket",
defunct=True)
ECA_CONN = _ca_status(
'ECA_CONN', severity=CASeverity.WARNING, code=5,
desc="Unable to connect to internet host or service",
defunct=True)
ECA_ALLOCMEM = _ca_status(
'ECA_ALLOCMEM', severity=CASeverity.WARNING, code=6,
desc="Unable to allocate additional dynamic memory")
ECA_UKNCHAN = _ca_status(
'ECA_UKNCHAN', severity=CASeverity.WARNING, code=7,
desc="Unknown IO channel",
defunct=True)
ECA_UKNFIELD = _ca_status(
'ECA_UKNFIELD', severity=CASeverity.WARNING, code=8,
desc="Record field specified inappropriate for channel specified",
defunct=True)
ECA_TOLARGE = _ca_status(
'ECA_TOLARGE', severity=CASeverity.WARNING, code=9,
desc=("The requested data transfer is greater than available memory "
"or EPICS_CA_MAX_ARRAY_BYTES"))
ECA_TIMEOUT = _ca_status(
'ECA_TIMEOUT', severity=CASeverity.WARNING, code=10,
desc="User specified timeout on IO operation expired")
ECA_NOSUPPORT = _ca_status(
'ECA_NOSUPPORT', severity=CASeverity.WARNING, code=11,
desc="Sorry, that feature is planned but not supported at this time",
defunct=True)
ECA_STRTOBIG = _ca_status(
'ECA_STRTOBIG', severity=CASeverity.WARNING, code=12,
desc="The supplied string is unusually large",
defunct=True)
ECA_DISCONNCHID = _ca_status(
'ECA_DISCONNCHID', severity=CASeverity.ERROR, code=13,
desc=("The request was ignored because the specified channel is "
"disconnected"),
defunct=True)
ECA_BADTYPE = _ca_status(
'ECA_BADTYPE', severity=CASeverity.ERROR, code=14,
desc="The data type specifed is invalid")
ECA_CHIDNOTFND = _ca_status(
'ECA_CHIDNOTFND', severity=CASeverity.INFO, code=15,
desc="Remote Channel not found",
defunct=True)
ECA_CHIDRETRY = _ca_status(
'ECA_CHIDRETRY', severity=CASeverity.INFO, code=16,
desc="Unable to locate all user specified channels",
defunct=True)
ECA_INTERNAL = _ca_status(
'ECA_INTERNAL', severity=CASeverity.FATAL, code=17,
desc="Channel Access Internal Failure")
ECA_DBLCLFAIL = _ca_status(
'ECA_DBLCLFAIL', severity=CASeverity.WARNING, code=18,
desc="The requested local DB operation failed",
defunct=True)
ECA_GETFAIL = _ca_status(
'ECA_GETFAIL', severity=CASeverity.WARNING, code=19,
desc="Channel read request failed")
ECA_PUTFAIL = _ca_status(
'ECA_PUTFAIL', severity=CASeverity.WARNING, code=20,
desc="Channel write request failed")
ECA_ADDFAIL = _ca_status(
'ECA_ADDFAIL', severity=CASeverity.WARNING, code=21,
desc="Channel subscription request failed",
defunct=True)
ECA_BADCOUNT = _ca_status(
'ECA_BADCOUNT', severity=CASeverity.WARNING, code=22,
desc="Invalid element count requested")
ECA_BADSTR = _ca_status(
'ECA_BADSTR', severity=CASeverity.ERROR, code=23,
desc="Invalid string")
ECA_DISCONN = _ca_status(
'ECA_DISCONN', severity=CASeverity.WARNING, code=24,
desc="Virtual circuit disconnect")
ECA_DBLCHNL = _ca_status(
'ECA_DBLCHNL', severity=CASeverity.WARNING, code=25,
desc="Identical process variable name on multiple servers")
ECA_EVDISALLOW = _ca_status(
'ECA_EVDISALLOW', severity=CASeverity.ERROR, code=26,
desc=("Request inappropriate within subscription (monitor) update "
"callback"))
ECA_BUILDGET = _ca_status(
'ECA_BUILDGET', severity=CASeverity.WARNING, code=27,
desc=("Database value get for that channel failed during channel "
"search"),
defunct=True)
ECA_NEEDSFP = _ca_status(
'ECA_NEEDSFP', severity=CASeverity.WARNING, code=28,
desc=("Unable to initialize without the vxWorks VX_FP_TASKtask "
"option set"),
defunct=True)
ECA_OVEVFAIL = _ca_status(
'ECA_OVEVFAIL', severity=CASeverity.WARNING, code=29,
desc=("Event queue overflow has prevented first pass event after "
"event add"),
defunct=True)
ECA_BADMONID = _ca_status(
'ECA_BADMONID', severity=CASeverity.ERROR, code=30,
desc="Bad event subscription (monitor) identifier")
ECA_NEWADDR = _ca_status(
'ECA_NEWADDR', severity=CASeverity.WARNING, code=31,
desc="Remote channel has new network address",
defunct=True)
ECA_NEWCONN = _ca_status(
'ECA_NEWCONN', severity=CASeverity.INFO, code=32,
desc="New or resumed network connection",
defunct=True)
ECA_NOCACTX = _ca_status(
'ECA_NOCACTX', severity=CASeverity.WARNING, code=33,
desc="Specified task isnt a member of a CA context",
defunct=True)
ECA_DEFUNCT = _ca_status(
'ECA_DEFUNCT', severity=CASeverity.FATAL, code=34,
desc="Attempt to use defunct CA feature failed",
defunct=True)
ECA_EMPTYSTR = _ca_status(
'ECA_EMPTYSTR', severity=CASeverity.WARNING, code=35,
desc="The supplied string is empty",
defunct=True)
ECA_NOREPEATER = _ca_status(
'ECA_NOREPEATER', severity=CASeverity.WARNING, code=36,
desc=("Unable to spawn the CA repeater thread; auto reconnect will "
"fail"),
defunct=True)
ECA_NOCHANMSG = _ca_status(
'ECA_NOCHANMSG', severity=CASeverity.WARNING, code=37,
desc="No channel id match for search reply; search reply ignored",
defunct=True)
ECA_DLCKREST = _ca_status(
'ECA_DLCKREST', severity=CASeverity.WARNING, code=38,
desc="Reseting dead connection; will try to reconnect",
defunct=True)
ECA_SERVBEHIND = _ca_status(
'ECA_SERVBEHIND', severity=CASeverity.WARNING, code=39,
desc=("Server (IOC) has fallen behind or is not responding; still "
"waiting"),
defunct=True)
ECA_NOCAST = _ca_status(
'ECA_NOCAST', severity=CASeverity.WARNING, code=40,
desc="No internet interface with broadcast available",
defunct=True)
ECA_BADMASK = _ca_status(
'ECA_BADMASK', severity=CASeverity.ERROR, code=41,
desc="Invalid event selection mask")
ECA_IODONE = _ca_status(
'ECA_IODONE', severity=CASeverity.INFO, code=42,
desc="IO operations have completed")
ECA_IOINPROGRESS = _ca_status(
'ECA_IOINPROGRESS', severity=CASeverity.INFO, code=43,
desc="IO operations are in progress")
ECA_BADSYNCGRP = _ca_status(
'ECA_BADSYNCGRP', severity=CASeverity.ERROR, code=44,
desc="Invalid synchronous group identifier")
ECA_PUTCBINPROG = _ca_status(
'ECA_PUTCBINPROG', severity=CASeverity.ERROR, code=45,
desc="Put callback timed out")
ECA_NORDACCESS = _ca_status(
'ECA_NORDACCESS', severity=CASeverity.WARNING, code=46,
desc="Read access denied")
ECA_NOWTACCESS = _ca_status(
'ECA_NOWTACCESS', severity=CASeverity.WARNING, code=47,
desc="Write access denied")
ECA_ANACHRONISM = _ca_status(
'ECA_ANACHRONISM', severity=CASeverity.ERROR, code=48,
desc="Requested feature is no longer supported")
ECA_NOSEARCHADDR = _ca_status(
'ECA_NOSEARCHADDR', severity=CASeverity.WARNING, code=49,
desc="Empty PV search address list")
ECA_NOCONVERT = _ca_status(
'ECA_NOCONVERT', severity=CASeverity.WARNING, code=50,
desc="No reasonable data conversion between client and server types")
ECA_BADCHID = _ca_status(
'ECA_BADCHID', severity=CASeverity.ERROR, code=51,
desc="Invalid channel identifier")
ECA_BADFUNCPTR = _ca_status(
'ECA_BADFUNCPTR', severity=CASeverity.ERROR, code=52,
desc="Invalid function pointer")
ECA_ISATTACHED = _ca_status(
'ECA_ISATTACHED', severity=CASeverity.WARNING, code=53,
desc="Thread is already attached to a client context")
ECA_UNAVAILINSERV = _ca_status(
'ECA_UNAVAILINSERV', severity=CASeverity.WARNING, code=54,
desc="Not supported by attached service")
ECA_CHANDESTROY = _ca_status(
'ECA_CHANDESTROY', severity=CASeverity.WARNING, code=55,
desc="User destroyed channel")
ECA_BADPRIORITY = _ca_status(
'ECA_BADPRIORITY', severity=CASeverity.ERROR, code=56,
desc="Invalid channel priority")
ECA_NOTTHREADED = _ca_status(
'ECA_NOTTHREADED', severity=CASeverity.ERROR, code=57,
desc=("Preemptive callback not enabled - additional threads may not "
"join context"))
ECA_16KARRAYCLIENT = _ca_status(
'ECA_16KARRAYCLIENT', severity=CASeverity.WARNING, code=58,
desc=("Client’s protocol revision does not support transfers "
"exceeding 16k bytes"))
ECA_CONNSEQTMO = _ca_status(
'ECA_CONNSEQTMO', severity=CASeverity.WARNING, code=59,
desc="Virtual circuit connection sequence aborted")
ECA_UNRESPTMO = _ca_status(
'ECA_UNRESPTMO', severity=CASeverity.WARNING, code=60,
desc="Virtual circuit unresponsive")
member.value
for member in CAStatus.__members__.values()}
def ensure_eca_value(status):
if isinstance(status, int):
return status
if isinstance(status, CAStatusCode):
return status.code_with_severity
if isinstance(status, CAStatus):
return status.value.code_with_severity
| true | true |
1c2ef098e30d95886546c9711570c82a2094e371 | 3,318 | py | Python | todo_drf/settings.py | stifferdoroskevich/DRF_TODO_APP | 7c2747970f30765edc730d96aad43ef6e2a9abbb | [
"MIT"
] | null | null | null | todo_drf/settings.py | stifferdoroskevich/DRF_TODO_APP | 7c2747970f30765edc730d96aad43ef6e2a9abbb | [
"MIT"
] | null | null | null | todo_drf/settings.py | stifferdoroskevich/DRF_TODO_APP | 7c2747970f30765edc730d96aad43ef6e2a9abbb | [
"MIT"
] | null | null | null | """
Django settings for todo_drf project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open('/home/stiffer/projects/KEYS/django_secret_key.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_drf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_drf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.72093 | 91 | 0.699216 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
with open('/home/stiffer/projects/KEYS/django_secret_key.txt') as f:
SECRET_KEY = f.read().strip()
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_drf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_drf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true | true |
1c2ef1d5f17d3fdb3154cd74cc4dcbb56149efa8 | 316 | py | Python | python/JournalTimeStamp.Description.py | BIMpraxis/Journalysis | af0c042b28d01ba5e44dafc2bbe9556434e897b8 | [
"MIT"
] | 26 | 2017-11-23T19:30:03.000Z | 2022-02-09T10:35:10.000Z | python/JournalTimeStamp.Description.py | BIMpraxis/Journalysis | af0c042b28d01ba5e44dafc2bbe9556434e897b8 | [
"MIT"
] | 51 | 2017-11-16T15:02:32.000Z | 2022-03-01T13:51:58.000Z | python/JournalTimeStamp.Description.py | BIMpraxis/Journalysis | af0c042b28d01ba5e44dafc2bbe9556434e897b8 | [
"MIT"
] | 9 | 2017-11-20T09:20:01.000Z | 2021-09-15T13:08:30.000Z | import clr
def process_input(func, input):
if isinstance(input, list): return [func(x) for x in input]
else: return func(input)
def journalTimeStampDescription(input):
if input.__repr__() == 'JournalTimeStamp': return input.Description
else: return None
OUT = process_input(journalTimeStampDescription,IN[0]) | 28.727273 | 68 | 0.772152 | import clr
def process_input(func, input):
if isinstance(input, list): return [func(x) for x in input]
else: return func(input)
def journalTimeStampDescription(input):
if input.__repr__() == 'JournalTimeStamp': return input.Description
else: return None
OUT = process_input(journalTimeStampDescription,IN[0]) | true | true |
1c2ef1f58935801850349dc48593c76787ad4780 | 5,370 | py | Python | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/fts_configurations/tests/test_fts_configuration_get_dictionaries.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/fts_configurations/tests/test_fts_configuration_get_dictionaries.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/fts_configurations/tests/test_fts_configuration_get_dictionaries.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas \
.fts_configurations.tests import utils as fts_config_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils import server_utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression import trigger_funcs_utils as fts_config_funcs_utils
from regression.python_test_utils import test_utils as utils
from . import utils as fts_configurations_utils
class FTSConfigurationDependencyDependentTestCase(BaseTestGenerator):
""" This class will get the dependency and dependents FTS configuration
under test schema. """
scenarios = utils.generate_scenarios(
'get_fts_configuration_get_dictionaries',
fts_configurations_utils.test_cases
)
def setUp(self):
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.extension_name = "postgres_fdw"
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.db_user = self.server["username"]
self.func_name = "fts_configuration_func_%s" % str(uuid.uuid4())[1:8]
self.fts_configuration_name = "fts_configuration_delete_%s" % (
str(uuid.uuid4())[1:8])
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add resource "
"groups.")
server_version = 0
if "type" in server_con["data"]:
if server_con["data"]["version"] < 90500:
message = "FTS Configuration are not supported by PG9.4 " \
"and PPAS9.4 and below."
self.skipTest(message)
self.function_info = fts_config_funcs_utils.create_trigger_function(
self.server, self.db_name, self.schema_name, self.func_name,
server_version)
self.fts_configuration_id = fts_configurations_utils. \
create_fts_configuration(
self.server, self.db_name, self.schema_name,
self.fts_configuration_name)
def get_fts_configuration_dictionaries(self):
"""
This functions returns the fts configuration dictionaries
:return: fts configuration dictionaries
"""
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' + str(self.schema_id) + '/',
content_type='html/json')
def runTest(self):
""" This function will add new FTS configuration under test schema. """
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
fts_conf_response = fts_configurations_utils.verify_fts_configuration(
self.server, self.db_name, self.fts_configuration_name
)
if not fts_conf_response:
raise Exception("Could not find the FTS Configuration.")
if self.is_positive_test:
response = self.get_fts_configuration_dictionaries()
else:
if hasattr(self, "error_fetching_fts_configuration"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.get_fts_configuration_dictionaries()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function delete the fts_config and disconnect the test
database."""
fts_config_utils.delete_fts_configurations(self.server, self.db_name,
self.schema_name,
self.fts_configuration_name)
database_utils.disconnect_database(self, self.server_id,
self.db_id)
| 44.016393 | 79 | 0.613035 | true | true | |
1c2ef31514c7d509ea3f4e1e79f2a7291f35bc8b | 4,174 | py | Python | nuitka/utils/Download.py | pkulev/Nuitka | e7b246ad0dcdef16398cecf1013cb7a03a6fe721 | [
"Apache-2.0"
] | 1 | 2021-05-25T12:48:28.000Z | 2021-05-25T12:48:28.000Z | nuitka/utils/Download.py | pkulev/Nuitka | e7b246ad0dcdef16398cecf1013cb7a03a6fe721 | [
"Apache-2.0"
] | null | null | null | nuitka/utils/Download.py | pkulev/Nuitka | e7b246ad0dcdef16398cecf1013cb7a03a6fe721 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Download utilities and extract locally when allowed.
Mostly used on Windows, for dependency walker and ccache binaries.
"""
import os
from nuitka import Tracing
from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin
raw_input,
urlretrieve,
)
from nuitka.utils import Utils
from .AppDirs import getAppDir
from .FileOperations import addFileExecutablePermission, deleteFile, makePath
def getCachedDownload(
url,
binary,
flatten,
is_arch_specific,
specifity,
message,
reject,
assume_yes_for_downloads,
):
# Many branches to deal with, pylint: disable=too-many-branches
nuitka_app_dir = getAppDir()
nuitka_app_dir = os.path.join(
nuitka_app_dir, os.path.basename(binary).replace(".exe", "")
)
if is_arch_specific:
nuitka_app_dir = os.path.join(nuitka_app_dir, Utils.getArchitecture())
if specifity:
nuitka_app_dir = os.path.join(nuitka_app_dir, specifity)
download_path = os.path.join(nuitka_app_dir, os.path.basename(url))
exe_path = os.path.join(nuitka_app_dir, binary)
makePath(nuitka_app_dir)
if not os.path.isfile(download_path) and not os.path.isfile(exe_path):
if assume_yes_for_downloads:
reply = "y"
else:
Tracing.printLine(
"""\
%s
Is it OK to download and put it in "%s".
No installer needed, cached, one time question.
Proceed and download? [Yes]/No """
% (message, nuitka_app_dir)
)
Tracing.flushStandardOutputs()
try:
reply = raw_input()
except EOFError:
reply = "no"
if reply.lower() in ("no", "n"):
if reject is not None:
Tracing.general.sysexit(reject)
else:
Tracing.general.info("Downloading '%s'." % url)
try:
urlretrieve(url, download_path)
except Exception: # Any kind of error, pylint: disable=broad-except
Tracing.general.sysexit(
"Failed to download '%s'. Contents should manually be extracted to '%s'."
% (url, download_path)
)
if not os.path.isfile(exe_path) and os.path.isfile(download_path):
Tracing.general.info("Extracting to '%s'" % exe_path)
import zipfile
try:
zip_file = zipfile.ZipFile(download_path)
for zip_info in zip_file.infolist():
if zip_info.filename[-1] == "/":
continue
if flatten:
zip_info.filename = os.path.basename(zip_info.filename)
zip_file.extract(zip_info, nuitka_app_dir)
except Exception: # Catching anything zip throws, pylint: disable=broad-except
Tracing.general.info("Problem with the downloaded zip file, deleting it.")
deleteFile(binary, must_exist=False)
deleteFile(download_path, must_exist=True)
Tracing.general.sysexit(
"Error, need %r as extracted from %r." % (binary, url)
)
# Check success here, and make sure it's executable.
if os.path.isfile(exe_path):
addFileExecutablePermission(exe_path)
else:
if reject:
Tracing.general.sysexit(reject)
exe_path = None
return exe_path
| 30.246377 | 93 | 0.629612 |
import os
from nuitka import Tracing
from nuitka.__past__ import (
raw_input,
urlretrieve,
)
from nuitka.utils import Utils
from .AppDirs import getAppDir
from .FileOperations import addFileExecutablePermission, deleteFile, makePath
def getCachedDownload(
url,
binary,
flatten,
is_arch_specific,
specifity,
message,
reject,
assume_yes_for_downloads,
):
nuitka_app_dir = getAppDir()
nuitka_app_dir = os.path.join(
nuitka_app_dir, os.path.basename(binary).replace(".exe", "")
)
if is_arch_specific:
nuitka_app_dir = os.path.join(nuitka_app_dir, Utils.getArchitecture())
if specifity:
nuitka_app_dir = os.path.join(nuitka_app_dir, specifity)
download_path = os.path.join(nuitka_app_dir, os.path.basename(url))
exe_path = os.path.join(nuitka_app_dir, binary)
makePath(nuitka_app_dir)
if not os.path.isfile(download_path) and not os.path.isfile(exe_path):
if assume_yes_for_downloads:
reply = "y"
else:
Tracing.printLine(
"""\
%s
Is it OK to download and put it in "%s".
No installer needed, cached, one time question.
Proceed and download? [Yes]/No """
% (message, nuitka_app_dir)
)
Tracing.flushStandardOutputs()
try:
reply = raw_input()
except EOFError:
reply = "no"
if reply.lower() in ("no", "n"):
if reject is not None:
Tracing.general.sysexit(reject)
else:
Tracing.general.info("Downloading '%s'." % url)
try:
urlretrieve(url, download_path)
except Exception:
Tracing.general.sysexit(
"Failed to download '%s'. Contents should manually be extracted to '%s'."
% (url, download_path)
)
if not os.path.isfile(exe_path) and os.path.isfile(download_path):
Tracing.general.info("Extracting to '%s'" % exe_path)
import zipfile
try:
zip_file = zipfile.ZipFile(download_path)
for zip_info in zip_file.infolist():
if zip_info.filename[-1] == "/":
continue
if flatten:
zip_info.filename = os.path.basename(zip_info.filename)
zip_file.extract(zip_info, nuitka_app_dir)
except Exception:
Tracing.general.info("Problem with the downloaded zip file, deleting it.")
deleteFile(binary, must_exist=False)
deleteFile(download_path, must_exist=True)
Tracing.general.sysexit(
"Error, need %r as extracted from %r." % (binary, url)
)
if os.path.isfile(exe_path):
addFileExecutablePermission(exe_path)
else:
if reject:
Tracing.general.sysexit(reject)
exe_path = None
return exe_path
| true | true |
1c2ef394b50092493cbc4ba243d5fe0be602a15b | 790 | py | Python | applications/StructuralMechanicsApplication/python_scripts/auxiliar_methods_solvers.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/StructuralMechanicsApplication/python_scripts/auxiliar_methods_solvers.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/StructuralMechanicsApplication/python_scripts/auxiliar_methods_solvers.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | # Importing the Kratos Library
import KratosMultiphysics as KM
def GetBDFIntegrationOrder(scheme_type):
if scheme_type == "backward_euler":
order = 1
else:
if scheme_type == "bdf":
raise Exception('Wrong input for scheme type: "bdf"! Please append the order to the bdf-scheme, e.g. "bdf2"')
# BDF schemes can be from 1 to 5 order, so in order to detect the integration order from the scheme_type we remove the "bdf" string, that is, if the user tells bdf3 only 3 will remain when we remove bdf which corresponds to the method of choice
order = int(scheme_type.replace("bdf", ""))
# Warning
if (order > 2):
KM.Logger.PrintWarning("BDF", "Order {}; constant time step must be considered".format(order))
return order
| 41.578947 | 252 | 0.682278 |
import KratosMultiphysics as KM
def GetBDFIntegrationOrder(scheme_type):
if scheme_type == "backward_euler":
order = 1
else:
if scheme_type == "bdf":
raise Exception('Wrong input for scheme type: "bdf"! Please append the order to the bdf-scheme, e.g. "bdf2"')
order = int(scheme_type.replace("bdf", ""))
if (order > 2):
KM.Logger.PrintWarning("BDF", "Order {}; constant time step must be considered".format(order))
return order
| true | true |
1c2ef4f00c256d8050804727ce8a2fb3e0de9f5d | 16,989 | py | Python | src/utsc/core/_vendor/bluecat_libraries/address_manager/api/models.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | src/utsc/core/_vendor/bluecat_libraries/address_manager/api/models.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | src/utsc/core/_vendor/bluecat_libraries/address_manager/api/models.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | # Copyright 2021 BlueCat Networks (USA) Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
"""Models representing all Address Manager object types supported in the API."""
import copy
import json
from .serialization import (
deserialize_joined_key_value_pairs,
serialize_joined_key_value_pairs,
serialize_joined_values,
serialize_possible_list,
deserialize_possible_list,
)
class APIEntity(dict):
"""
Model for the BAM API object type APIEntity.
:key id: The entity's ID. Value type is int.
:key name: The entity's name. Value type is str | None.
:key type: The entity's type. Must be a valid BAM object type.
:key properties: (Optional) Additional properties on the entity. Value must be dict[str, str].
"""
@staticmethod
def to_raw_model(data: dict) -> dict:
"""
:param data: APIEntity object or dict equivalent.
:type data: dict[str, Any]
:return: Dict that, once JSON-serialized, can be passed to BAM endpoints.
:rtype: dict[str, str]
"""
data = copy.deepcopy(data)
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "Optional[APIEntity]":
"""
:param data: Dict obtained by JSON-deserializing an APIEntity returned by a BAM endpoint.
:type data: dict[str, str]
:return: Entity object, or None if input's ``id`` is 0.
:rtype: APIEntity | None
"""
data = copy.deepcopy(data)
if data["id"] == 0:
return None
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIEntity(data)
class APIAccessRight(dict):
"""
Model for the BAM API object type APIAccessRight.
:key entityId: ID of the object to which the access right applies. Value type is int. Must be greater than 0.
:key userId: The access right's owner's ID. Value type is int. Must be greater than 0.
:key value: Value must be "HIDE", "VIEW", "ADD", "CHANGE", or "FULL".
:key overrides: (Optional) Override access rights of ``entityId``'s descendants (by default,
they inherit ``entityId``'s access right). Value type is dict[str, str]. Keys are object
types to be overriden; values are access right values.
:key properties: (Optional) Additional properties on the access right. Value type is dict[str, str].
"""
@staticmethod
def to_raw_model(data: dict) -> dict:
"""
:param data: APIAccessRight object or dict equivalent.
:type data: dict[str, Any]
:return: Dict that, once JSON-serialized, can be passed to BAM endpoints.
:rtype: dict[str, str]
"""
data = copy.deepcopy(data)
data["overrides"] = serialize_joined_key_value_pairs(data.get("overrides"))
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "APIAccessRight":
"""
:param data: Dict obtained by JSON-deserializing an APIAccessRight returned by a BAM endpoint.
:type data: dict[str, str]
:return: Access right object.
:rtype: APIAccessRight
"""
data = copy.deepcopy(data)
data["overrides"] = (
deserialize_joined_key_value_pairs(data["overrides"]) if data.get("overrides") else {}
)
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIAccessRight(data)
class APIDeploymentRole(dict):
"""
Model for the BAM API object type APIDeploymentRole.
:key id: The deployment role's ID. Value type is int.
:key type: Value must be "NONE", "MASTER", "MASTER_HIDDEN", "SLAVE", "SLAVE_STEALTH", "FORWARDER", "STUB", "RECURSION", "PEER", or "AD MASTER".
:key service: Value must be "DNS", "DHCP", or "TFTP".
:key entityId: The deployed entity's ID. Value type is int. Must be greater than 0.
:key serviceInterfaceId: ID of the server interface being deployed into. Value type is int. Must be greater than 0.
:key properties: (Optional) Additional properties on the deployment role. Value type is dict[str, str].
"""
@staticmethod
def to_raw_model(data: dict) -> dict:
"""
:param data: APIDeploymentRole object or dict equivalent.
:type data: dict[str, Any]
:return: Dict that, once JSON-serialized, can be passed to BAM endpoints.
:rtype: dict[str, str]
"""
data = copy.deepcopy(data)
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "Optional[APIDeploymentRole]":
"""
:param data: Dict obtained by JSON-deserializing an APIDeploymentRole returned by a BAM endpoint.
:type data: dict[str, str]
:return: Deployment role object, or None if input's ``id`` is 0.
:rtype: APIDeploymentRole | None
"""
data = copy.deepcopy(data)
if data.get("id") == 0:
return None
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIDeploymentRole(data)
class APIDeploymentOption(dict):
"""
Model for the BAM API object type APIDeploymentOption.
:key id: The deployment option's ID. Value type is int.
:key type: The deployment option's type. Must be a valid BAM option type.
:key name: The deployment option's name. Value type is str.
:key value: Field values of the option. Value type is list[str].
:key properties: (Optional) Additional properties on the deployment option. Value type is dict[str, str].
"""
@staticmethod
def to_raw_model(data: dict) -> dict:
"""
:param data: APIDeploymentOption object or dict equivalent.
:type data: dict[str, Any]
:return: Dict that, once JSON-serialized, can be passed to BAM endpoints.
:rtype: dict[str, str]
"""
data = copy.deepcopy(data)
data["value"] = serialize_possible_list(data.get("value", ""))
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "Optional[APIDeploymentOption]":
"""
:param data: Dict obtained by JSON-deserializing an APIDeploymentOption returned by a BAM endpoint.
:type data: dict[str, str]
:return: Deployment role object, or None if input's ``id`` is 0.
:rtype: APIDeploymentOption | None
"""
data = copy.deepcopy(data)
if data.get("id") == 0:
return None
data["value"] = deserialize_possible_list(data.get("value", ""))
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIDeploymentOption(data)
class APIUserDefinedField(dict):
"""
Model for the BAM API object type APIUserDefinedField.
:key name: The UDF's unique name. Value type is str.
:key displayName: The UDF's display name. Value type is str.
:key type: The UDF's type. Must be a valid BAM UDF type.
:key defaultValue: The UDF's default value. Value type is str.
:key required: If true, users must enter data in the field. Value type is bool.
:key hideFromSearch: If true, the UDF is hidden from search. Value type is bool.
:key validatorProperties: (Optional) The UDF's validation properties. Value type is dict[str, str].
:key predefinedValues: (Optional) The UDF's preset values. Value type is list[str].
:key properties: (Optional) Additional properties on the UDF. Value type is dict[str, str].
"""
@staticmethod
def to_raw_model(data: dict) -> dict:
"""
:param data: APIUserDefinedField object or dict equivalent.
:type data: dict[str, Any]
:return: Dict that, once JSON-serialized, can be passed to BAM endpoints.
:rtype: dict[str, str]
"""
data = copy.deepcopy(data)
data["predefinedValues"] = serialize_joined_values(
data.get("predefinedValues"), item_sep="|"
)
data["validatorProperties"] = serialize_joined_key_value_pairs(
data.get("validatorProperties"), item_sep=","
) # object types that can take multiple properties, separate each property with a “,” comma
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "APIUserDefinedField":
"""
:param data: Dict obtained by JSON-deserializing an APIUserDefinedField returned by a BAM endpoint.
:type data: dict[str, str]
:return: UDF object.
:rtype: APIUserDefinedField
"""
data = copy.deepcopy(data)
if data.get("predefinedValues"):
data["predefinedValues"] = list(filter(None, data.get("predefinedValues").split("|")))
else:
data["predefinedValues"] = []
if data.get("validatorProperties"):
data["validatorProperties"] = deserialize_joined_key_value_pairs(
data.get("validatorProperties"), item_sep=","
)
else:
data["validatorProperties"] = {}
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIUserDefinedField(data)
class UDLDefinition(dict):
"""
Model for the structure describing User-Defined Link definitions used by
Address Manager's API.
:key linkType: The UDL's unique name. Value type is str. Cannot be a reserved link type name
and cannot start with "BCN\\_".
:key displayName: The UDL's name as displayed in BAM. Value type is str.
:key sourceEntityTypes: The UDL's source entity types. Value type is list[str].
:key destinationEntityTypes: The UDL's destination entity types. Value type is list[str].
"""
# NOTE: The use of '\\_' in the above docstring is intentional. The goal is 2 level escaping:
# 1) '\\' translates into '\' in the Python string
# 2) '\_' make reStructuredText not treat 'BCN_' as an internal hyperlink target.
@staticmethod
def to_raw_model(data: dict) -> str:
"""
:param data: UDLDefinition object or dict equivalent.
:type data: dict[str, Any]
:return: JSON-encoded string that can be passed to BAM endpoints.
:rtype: str
"""
return json.dumps(data)
@staticmethod
def from_raw_model(data: dict) -> "UDLDefinition":
"""
:param data: Dict obtained by JSON-deserializing a UDL returned by a BAM endpoint.
:type data: dict[str, str]
:return: UDL definition object.
:rtype: UDLDefinition
"""
return UDLDefinition(data)
class UDLRelationship(dict):
"""
Model for the structure describing User-Defined Link relationships used by
Address Manager's API.
:key linkType: The UDL's link type. Value type is str.
:key sourceEntityId: The UDL's source entity ID. Value type is int.
:key destinationEntityId: (Optional) The UDL's destination entity ID. Value type is int.
"""
@staticmethod
def to_raw_model(data: dict) -> str:
"""
:param data: UDLRelationship object or dict equivalent.
:type data: dict[str, Any]
:return: JSON-encoded string that can be passed to BAM endpoints.
:rtype: str
"""
return json.dumps(data)
@staticmethod
def from_raw_model(data: dict) -> "UDLRelationship":
"""
:param data: Dict obtained by JSON-deserializing the result of UDLRelationship.to_raw_model(<something>).
:type data: dict[str, str]
:return: UDL relationship object.
:rtype: UDLRelationship
"""
return UDLRelationship(data)
class RetentionSettings(dict):
"""
Model for BAM history retention settings.
:key admin: (Optional) The number of days of administrative history to keep in the database. Value type is int.
:key sessionEvent: (Optional) The number of days of session event history to keep in the database. Value type is int.
:key ddns: (Optional) The number of days of DDNS history to keep in the database. Value type is int.
:key dhcp: (Optional) The number of days of DHCP history to keep in the database. Value type is int.
.. note::
* The value for sessionEvent must be greater than or equal to the value of
each of the other types.
* The input value for the retention periods (in days) must be greater than or equal to one.
* Setting the value to -1 is equivalent to Retain Indefinitely in the BAM database.
* Setting the DDNS and DHCP retention setting to 0 is equivalent to Do Not Retain,
and these records no longer write to the database.
So, if a user has enabled the audit data export feature, they will get no records written to their audit data.
"""
@staticmethod
def to_raw_model(data: dict) -> dict:
"""
:param data: RetentionSettings object or dict equivalent.
:type data: dict[str, Any]
:return: Dict that, once JSON-serialized, can be passed to BAM endpoints.
:rtype: dict
"""
data = copy.deepcopy(data)
update_admin = data.get("admin") is not None
update_session_event = data.get("sessionEvent") is not None
update_ddns = data.get("ddns") is not None
update_dhcp = data.get("dhcp") is not None
return dict(
admin=data.get("admin"),
updateAdmin=update_admin,
sessionEvent=data.get("sessionEvent"),
updateSessionEvent=update_session_event,
ddns=data.get("ddns"),
updateDdns=update_ddns,
dhcp=data.get("dhcp"),
updateDhcp=update_dhcp,
)
@staticmethod
def from_raw_model(data: str) -> "RetentionSettings":
"""
:param data: A value in the format returned by BAM method "updateRetentionSettings"
that holds the ordered settings for: admin, sessionEvent, ddns, and dhcp.
:type data: str
:return: Retention settings object.
:rtype: RetentionSettings
"""
admin, session_event, ddns, dhcp = list(map(int, data.split(",")))
return RetentionSettings(
admin=admin,
sessionEvent=session_event,
ddns=ddns,
dhcp=dhcp,
)
class ResponsePolicySearchResult(dict):
"""Model for the BAM API object type ResponsePolicySearchResult.
:key configId: ID of the parent configuration in which the response policy item is configured. Value type is int.
:key parentIds: IDs of parent response policy or response policy zone objects. Value type is list[int].
If policy item is associated with a Response Policy, it is the Response Policy object ID.
If policy item is associated with BlueCat Security feed data, it is the RP Zone object ID.
:key name: The response policy item's name. Value type is str.
:key category: The name of the BlueCat security feed category associated with the policy item. Value type is str | None.
:key policyType: The response policy's type. Value type is str.
"""
@staticmethod
def from_raw_model(data: dict) -> "ResponsePolicySearchResult":
"""
:param data: Dict obtained by JSON-deserializing a ResponsePolicySearchResult returned by a BAM endpoint.
:type data: dict[str, str]
:return: Response policy search result object.
:rtype: ResponsePolicySearchResult
"""
data = copy.deepcopy(data)
data["parentIds"] = list(map(int, data.get("parentIds").split(",")))
return ResponsePolicySearchResult(data)
class APIData(dict):
"""
Model for the BAM API object type APIData.
:key name: The name of the probe to collect data. Value type is str.
:key properties: Additional properties on the probe. Value must be list.
"""
@staticmethod
def from_raw_model(data: dict) -> "APIData":
"""
:param data: Dict obtained by JSON-deserializing an APIData returned by a BAM endpoint.
:type data: dict[str, str]
:return: API Data object.
:rtype: APIData
"""
data = copy.deepcopy(data)
data["properties"] = json.loads(data["properties"])
return APIData(data)
| 40.643541 | 147 | 0.647949 |
import copy
import json
from .serialization import (
deserialize_joined_key_value_pairs,
serialize_joined_key_value_pairs,
serialize_joined_values,
serialize_possible_list,
deserialize_possible_list,
)
class APIEntity(dict):
@staticmethod
def to_raw_model(data: dict) -> dict:
data = copy.deepcopy(data)
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "Optional[APIEntity]":
data = copy.deepcopy(data)
if data["id"] == 0:
return None
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIEntity(data)
class APIAccessRight(dict):
@staticmethod
def to_raw_model(data: dict) -> dict:
data = copy.deepcopy(data)
data["overrides"] = serialize_joined_key_value_pairs(data.get("overrides"))
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "APIAccessRight":
data = copy.deepcopy(data)
data["overrides"] = (
deserialize_joined_key_value_pairs(data["overrides"]) if data.get("overrides") else {}
)
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIAccessRight(data)
class APIDeploymentRole(dict):
@staticmethod
def to_raw_model(data: dict) -> dict:
data = copy.deepcopy(data)
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "Optional[APIDeploymentRole]":
data = copy.deepcopy(data)
if data.get("id") == 0:
return None
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIDeploymentRole(data)
class APIDeploymentOption(dict):
@staticmethod
def to_raw_model(data: dict) -> dict:
data = copy.deepcopy(data)
data["value"] = serialize_possible_list(data.get("value", ""))
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "Optional[APIDeploymentOption]":
data = copy.deepcopy(data)
if data.get("id") == 0:
return None
data["value"] = deserialize_possible_list(data.get("value", ""))
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIDeploymentOption(data)
class APIUserDefinedField(dict):
@staticmethod
def to_raw_model(data: dict) -> dict:
data = copy.deepcopy(data)
data["predefinedValues"] = serialize_joined_values(
data.get("predefinedValues"), item_sep="|"
)
data["validatorProperties"] = serialize_joined_key_value_pairs(
data.get("validatorProperties"), item_sep=","
)
data["properties"] = serialize_joined_key_value_pairs(data.get("properties"))
return data
@staticmethod
def from_raw_model(data: dict) -> "APIUserDefinedField":
data = copy.deepcopy(data)
if data.get("predefinedValues"):
data["predefinedValues"] = list(filter(None, data.get("predefinedValues").split("|")))
else:
data["predefinedValues"] = []
if data.get("validatorProperties"):
data["validatorProperties"] = deserialize_joined_key_value_pairs(
data.get("validatorProperties"), item_sep=","
)
else:
data["validatorProperties"] = {}
data["properties"] = (
deserialize_joined_key_value_pairs(data["properties"]) if data.get("properties") else {}
)
return APIUserDefinedField(data)
class UDLDefinition(dict):
@staticmethod
def to_raw_model(data: dict) -> str:
return json.dumps(data)
@staticmethod
def from_raw_model(data: dict) -> "UDLDefinition":
return UDLDefinition(data)
class UDLRelationship(dict):
@staticmethod
def to_raw_model(data: dict) -> str:
return json.dumps(data)
@staticmethod
def from_raw_model(data: dict) -> "UDLRelationship":
return UDLRelationship(data)
class RetentionSettings(dict):
@staticmethod
def to_raw_model(data: dict) -> dict:
data = copy.deepcopy(data)
update_admin = data.get("admin") is not None
update_session_event = data.get("sessionEvent") is not None
update_ddns = data.get("ddns") is not None
update_dhcp = data.get("dhcp") is not None
return dict(
admin=data.get("admin"),
updateAdmin=update_admin,
sessionEvent=data.get("sessionEvent"),
updateSessionEvent=update_session_event,
ddns=data.get("ddns"),
updateDdns=update_ddns,
dhcp=data.get("dhcp"),
updateDhcp=update_dhcp,
)
@staticmethod
def from_raw_model(data: str) -> "RetentionSettings":
admin, session_event, ddns, dhcp = list(map(int, data.split(",")))
return RetentionSettings(
admin=admin,
sessionEvent=session_event,
ddns=ddns,
dhcp=dhcp,
)
class ResponsePolicySearchResult(dict):
@staticmethod
def from_raw_model(data: dict) -> "ResponsePolicySearchResult":
data = copy.deepcopy(data)
data["parentIds"] = list(map(int, data.get("parentIds").split(",")))
return ResponsePolicySearchResult(data)
class APIData(dict):
@staticmethod
def from_raw_model(data: dict) -> "APIData":
data = copy.deepcopy(data)
data["properties"] = json.loads(data["properties"])
return APIData(data)
| true | true |
1c2ef4f5f308b647a88f60219f1a73c6c1f9e275 | 8,111 | py | Python | tests/app/main/helpers/validation/test_g7_declaration.py | uk-gov-mirror/alphagov.digitalmarketplace-supplier-frontend | cf9d06cffe95c436f056cc9c967e9ef8a25381a4 | [
"MIT"
] | 7 | 2015-11-21T20:43:37.000Z | 2020-07-22T13:20:18.000Z | tests/app/main/helpers/validation/test_g7_declaration.py | uk-gov-mirror/alphagov.digitalmarketplace-supplier-frontend | cf9d06cffe95c436f056cc9c967e9ef8a25381a4 | [
"MIT"
] | 783 | 2015-04-07T16:34:57.000Z | 2021-07-27T12:13:02.000Z | tests/app/main/helpers/validation/test_g7_declaration.py | uk-gov-mirror/alphagov.digitalmarketplace-supplier-frontend | cf9d06cffe95c436f056cc9c967e9ef8a25381a4 | [
"MIT"
] | 20 | 2015-06-13T15:37:23.000Z | 2021-04-10T18:02:09.000Z | # -*- coding: utf-8 -*-
from app.main.helpers.validation import G7Validator, get_validator
from app.main import content_loader
FULL_G7_SUBMISSION = {
"PR1": True,
"PR2": True,
"PR3": True,
"PR4": True,
"PR5": True,
"SQ1-1i-i": True,
"SQ2-1abcd": True,
"SQ2-1e": True,
"SQ2-1f": True,
"SQ2-1ghijklmn": True,
"SQ2-2a": True,
"SQ3-1a": True,
"SQ3-1b": True,
"SQ3-1c": True,
"SQ3-1d": True,
"SQ3-1e": True,
"SQ3-1f": True,
"SQ3-1g": True,
"SQ3-1h-i": True,
"SQ3-1h-ii": True,
"SQ3-1i-i": True,
"SQ3-1i-ii": True,
"SQ3-1j": True,
"SQ3-1k": "Blah",
"SQ4-1a": True,
"SQ4-1b": True,
"SQ5-2a": True,
"SQD2b": True,
"SQD2d": True,
"SQ1-1a": "Blah",
"SQ1-1b": "Blah",
"SQ1-1cii": "Blah",
"SQ1-1d": "Blah",
"SQ1-1d-i": "Blah",
"SQ1-1d-ii": "Blah",
"SQ1-1e": "Blah",
"SQ1-1h": "999999999",
"SQ1-1i-ii": "Blah",
"SQ1-1j-ii": "Blah",
"SQ1-1p-i": "Blah",
"SQ1-1k": "Blah",
"SQ1-1n": "Blah",
"SQ1-1o": "valid@email.com",
"SQ1-2a": "Blah",
"SQ1-2b": "valid@email.com",
"SQ2-2b": "Blah",
"SQ4-1c": "Blah",
"SQD2c": "Blah",
"SQD2e": "Blah",
"SQ1-1ci": "public limited company",
"SQ1-1j-i": ["licensed?"],
"SQ1-1m": "micro",
"SQ1-3": ["on-demand self-service. blah blah"],
"SQ5-1a": u"Yes – your organisation has, blah blah",
"SQC2": [
"race?",
"sexual orientation?",
"disability?",
"age equality?",
"religion or belief?",
"gender (sex)?",
"gender reassignment?",
"marriage or civil partnership?",
"pregnancy or maternity?",
"human rights?"
],
"SQC3": True,
"SQA2": True,
"SQA3": True,
"SQA4": True,
"SQA5": True,
"AQA3": True,
"SQE2a": ["as a prime contractor, using third parties (subcontractors) to provide some services"]
}
def test_error_if_required_field_is_missing():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ3-1i-i']
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ3-1i-i': 'answer_required'}
def test_error_if_required_text_field_is_empty():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ1-2b'] = ""
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-2b': 'answer_required'}
def test_no_error_if_optional_field_is_missing():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1p-i']
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_trading_status_details_error_depends_on_trading_status():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1cii']
validator = G7Validator(content, submission)
submission['SQ1-1ci'] = "something"
validator = G7Validator(content, submission)
assert validator.errors() == {}
submission['SQ1-1ci'] = "other (please specify)"
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1cii': 'answer_required'}
def test_trade_registers_details_error_depends_on_trade_registers():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1i-ii']
submission['SQ1-1i-i'] = False
validator = G7Validator(content, submission)
assert validator.errors() == {}
submission['SQ1-1i-i'] = True
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1i-ii': 'answer_required'}
def test_licenced_details_error_depends_on_licenced():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1j-ii']
del submission['SQ1-1j-i']
validator = G7Validator(content, submission)
assert validator.errors() == {}
submission['SQ1-1j-i'] = ["licensed"]
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1j-ii': 'answer_required'}
def test_no_error_if_no_tax_issues_and_no_details():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ4-1a'] = False
submission['SQ4-1b'] = False
del submission['SQ4-1c']
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_error_if_tax_issues_and_no_details():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ4-1c']
submission['SQ4-1a'] = True
submission['SQ4-1b'] = False
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ4-1c': 'answer_required'}
submission['SQ4-1a'] = False
submission['SQ4-1b'] = True
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ4-1c': 'answer_required'}
def test_error_if_mitigation_factors_not_provided_when_required():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ3-1k']
dependent_fields = [
'SQ2-2a', 'SQ3-1a', 'SQ3-1b', 'SQ3-1c', 'SQ3-1d', 'SQ3-1e', 'SQ3-1f', 'SQ3-1g',
'SQ3-1h-i', 'SQ3-1h-ii', 'SQ3-1i-i', 'SQ3-1i-ii', 'SQ3-1j'
]
for field in dependent_fields:
# Set all other fields to false to show that just this field causes the error
for other in dependent_fields:
submission[other] = False
submission[field] = True
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ3-1k': 'answer_required'}
def test_mitigation_factors_not_required():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ3-1k']
dependent_fields = [
'SQ2-2a', 'SQ3-1a', 'SQ3-1b', 'SQ3-1c', 'SQ3-1d', 'SQ3-1e', 'SQ3-1f', 'SQ3-1g',
'SQ3-1h-i', 'SQ3-1h-ii', 'SQ3-1i-i', 'SQ3-1i-ii', 'SQ3-1j'
]
for field in dependent_fields:
submission[field] = False
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_fields_only_relevant_to_non_uk():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ5-2a'] = False
del submission['SQ1-1i-i']
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1i-i': 'answer_required'}
def test_invalid_email_addresses_cause_errors():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ1-1o'] = '@invalid.com'
submission['SQ1-2b'] = 'some.user.missed.their.at.com'
validator = G7Validator(content, submission)
assert validator.errors() == {
'SQ1-1o': 'invalid_format',
'SQ1-2b': 'invalid_format',
}
def test_character_limit_errors():
cases = [
("SQ1-1a", 5000),
("SQ1-1cii", 5000),
("SQ1-1d-i", 5000),
("SQ1-1d-ii", 5000),
("SQ1-1i-ii", 5000),
("SQ3-1k", 5000),
]
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
for field, limit in cases:
submission[field] = "a" * (limit + 1)
validator = G7Validator(content, submission)
assert validator.errors() == {field: 'under_character_limit'}
submission[field] = "a" * limit
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_get_validator():
validator = get_validator({"slug": "g-cloud-7"}, None, None)
assert type(validator) is G7Validator
| 30.264925 | 101 | 0.635063 |
from app.main.helpers.validation import G7Validator, get_validator
from app.main import content_loader
FULL_G7_SUBMISSION = {
"PR1": True,
"PR2": True,
"PR3": True,
"PR4": True,
"PR5": True,
"SQ1-1i-i": True,
"SQ2-1abcd": True,
"SQ2-1e": True,
"SQ2-1f": True,
"SQ2-1ghijklmn": True,
"SQ2-2a": True,
"SQ3-1a": True,
"SQ3-1b": True,
"SQ3-1c": True,
"SQ3-1d": True,
"SQ3-1e": True,
"SQ3-1f": True,
"SQ3-1g": True,
"SQ3-1h-i": True,
"SQ3-1h-ii": True,
"SQ3-1i-i": True,
"SQ3-1i-ii": True,
"SQ3-1j": True,
"SQ3-1k": "Blah",
"SQ4-1a": True,
"SQ4-1b": True,
"SQ5-2a": True,
"SQD2b": True,
"SQD2d": True,
"SQ1-1a": "Blah",
"SQ1-1b": "Blah",
"SQ1-1cii": "Blah",
"SQ1-1d": "Blah",
"SQ1-1d-i": "Blah",
"SQ1-1d-ii": "Blah",
"SQ1-1e": "Blah",
"SQ1-1h": "999999999",
"SQ1-1i-ii": "Blah",
"SQ1-1j-ii": "Blah",
"SQ1-1p-i": "Blah",
"SQ1-1k": "Blah",
"SQ1-1n": "Blah",
"SQ1-1o": "valid@email.com",
"SQ1-2a": "Blah",
"SQ1-2b": "valid@email.com",
"SQ2-2b": "Blah",
"SQ4-1c": "Blah",
"SQD2c": "Blah",
"SQD2e": "Blah",
"SQ1-1ci": "public limited company",
"SQ1-1j-i": ["licensed?"],
"SQ1-1m": "micro",
"SQ1-3": ["on-demand self-service. blah blah"],
"SQ5-1a": u"Yes – your organisation has, blah blah",
"SQC2": [
"race?",
"sexual orientation?",
"disability?",
"age equality?",
"religion or belief?",
"gender (sex)?",
"gender reassignment?",
"marriage or civil partnership?",
"pregnancy or maternity?",
"human rights?"
],
"SQC3": True,
"SQA2": True,
"SQA3": True,
"SQA4": True,
"SQA5": True,
"AQA3": True,
"SQE2a": ["as a prime contractor, using third parties (subcontractors) to provide some services"]
}
def test_error_if_required_field_is_missing():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ3-1i-i']
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ3-1i-i': 'answer_required'}
def test_error_if_required_text_field_is_empty():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ1-2b'] = ""
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-2b': 'answer_required'}
def test_no_error_if_optional_field_is_missing():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1p-i']
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_trading_status_details_error_depends_on_trading_status():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1cii']
validator = G7Validator(content, submission)
submission['SQ1-1ci'] = "something"
validator = G7Validator(content, submission)
assert validator.errors() == {}
submission['SQ1-1ci'] = "other (please specify)"
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1cii': 'answer_required'}
def test_trade_registers_details_error_depends_on_trade_registers():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1i-ii']
submission['SQ1-1i-i'] = False
validator = G7Validator(content, submission)
assert validator.errors() == {}
submission['SQ1-1i-i'] = True
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1i-ii': 'answer_required'}
def test_licenced_details_error_depends_on_licenced():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ1-1j-ii']
del submission['SQ1-1j-i']
validator = G7Validator(content, submission)
assert validator.errors() == {}
submission['SQ1-1j-i'] = ["licensed"]
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1j-ii': 'answer_required'}
def test_no_error_if_no_tax_issues_and_no_details():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ4-1a'] = False
submission['SQ4-1b'] = False
del submission['SQ4-1c']
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_error_if_tax_issues_and_no_details():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ4-1c']
submission['SQ4-1a'] = True
submission['SQ4-1b'] = False
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ4-1c': 'answer_required'}
submission['SQ4-1a'] = False
submission['SQ4-1b'] = True
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ4-1c': 'answer_required'}
def test_error_if_mitigation_factors_not_provided_when_required():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ3-1k']
dependent_fields = [
'SQ2-2a', 'SQ3-1a', 'SQ3-1b', 'SQ3-1c', 'SQ3-1d', 'SQ3-1e', 'SQ3-1f', 'SQ3-1g',
'SQ3-1h-i', 'SQ3-1h-ii', 'SQ3-1i-i', 'SQ3-1i-ii', 'SQ3-1j'
]
for field in dependent_fields:
for other in dependent_fields:
submission[other] = False
submission[field] = True
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ3-1k': 'answer_required'}
def test_mitigation_factors_not_required():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
del submission['SQ3-1k']
dependent_fields = [
'SQ2-2a', 'SQ3-1a', 'SQ3-1b', 'SQ3-1c', 'SQ3-1d', 'SQ3-1e', 'SQ3-1f', 'SQ3-1g',
'SQ3-1h-i', 'SQ3-1h-ii', 'SQ3-1i-i', 'SQ3-1i-ii', 'SQ3-1j'
]
for field in dependent_fields:
submission[field] = False
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_fields_only_relevant_to_non_uk():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ5-2a'] = False
del submission['SQ1-1i-i']
validator = G7Validator(content, submission)
assert validator.errors() == {'SQ1-1i-i': 'answer_required'}
def test_invalid_email_addresses_cause_errors():
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
submission['SQ1-1o'] = '@invalid.com'
submission['SQ1-2b'] = 'some.user.missed.their.at.com'
validator = G7Validator(content, submission)
assert validator.errors() == {
'SQ1-1o': 'invalid_format',
'SQ1-2b': 'invalid_format',
}
def test_character_limit_errors():
cases = [
("SQ1-1a", 5000),
("SQ1-1cii", 5000),
("SQ1-1d-i", 5000),
("SQ1-1d-ii", 5000),
("SQ1-1i-ii", 5000),
("SQ3-1k", 5000),
]
content = content_loader.get_manifest('g-cloud-7', 'declaration')
submission = FULL_G7_SUBMISSION.copy()
for field, limit in cases:
submission[field] = "a" * (limit + 1)
validator = G7Validator(content, submission)
assert validator.errors() == {field: 'under_character_limit'}
submission[field] = "a" * limit
validator = G7Validator(content, submission)
assert validator.errors() == {}
def test_get_validator():
validator = get_validator({"slug": "g-cloud-7"}, None, None)
assert type(validator) is G7Validator
| true | true |
1c2ef6dccb38beef35f29ad77ea7c53c573111c8 | 9,323 | py | Python | armi/reactor/assemblyLists.py | MattGreav/test | f6bc7dcefd8b498b71fb92808ee70496f2206231 | [
"Apache-2.0"
] | null | null | null | armi/reactor/assemblyLists.py | MattGreav/test | f6bc7dcefd8b498b71fb92808ee70496f2206231 | [
"Apache-2.0"
] | null | null | null | armi/reactor/assemblyLists.py | MattGreav/test | f6bc7dcefd8b498b71fb92808ee70496f2206231 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Module containing :py:class:`AssemblyList` and related classes.
Assembly Lists are core-like objects that store collections of Assemblies. They were
originally developed to serve as things like spent-fuel pools and the like, where
spatial location of Assemblies need not be quite as precise.
Presently, the :py:class:`armi.reactor.reactors.Core` constructs a pair of these as
`self.sfp` and `self.cfp` (charged-fuel pool). We are in the process of removing these
as instance attributes of the ``Core``, and moving them into sibling systems on the root
:py:class:`armi.reactor.reactors.Reactor` object.
"""
import abc
import itertools
from armi import runLog
from armi.utils import units
from armi.reactor import grids
from armi.reactor import composites
from armi.reactor.flags import Flags
class AutoFiller(abc.ABC):
"""
Class for governing automatic insertion of Assemblies when a specific Composite
location isn't requested.
This is kept separate from the ``AssemblyList`` class itself to promote composition
over inheritance; reasonable implementations of auto-fill strategies will have their
own state, which subclasses of ``AssemblyList`` should not have to manage.
"""
def getNextLocation(self, a) -> grids.LocationBase:
"""
Return the next automatic location.
"""
def assemblyAdded(self, a):
"""
Register that an assembly has been added.
This allows an ``AutoFiller`` to be notified that an assembly has been added
manually.
"""
class RowMajorAutoFiller(AutoFiller):
"""
:py:class:`AutoFiller` implementation for filling a "rectangular" grid of
Assemblies.
This fills the :py:class:`armi.reactor.grids.Grid` of the associated
:py:class:`AssemblyList` by filling subsequent rows with ``nMajor`` assemblies
before moving to the next row.
"""
def __init__(self, aList, nMajor):
self._nMajor = nMajor
self._aList = aList
def getNextLocation(self, _a):
filledLocations = {a.spatialLocator for a in self._aList}
grid = self._aList.spatialGrid
for idx in itertools.count():
j = idx // self._nMajor
i = idx % self._nMajor
loc = grid[i, j, 0]
if loc not in filledLocations:
return loc
def assemblyAdded(self, a):
"""
Do nothing.
A more optimal implementation would cache things that would be affected by this.
"""
class AssemblyList(composites.Composite):
"""
A quasi-arbitrary collection of Assemblies.
The AssemblyList is similar to a Core, in that it is designed to store Assembly
objects. Unlike the Core, they have far fewer convenience functions, and permit
looser control over where assemblies actually live.
"""
def __init__(self, name, parent=None):
composites.Composite.__init__(self, name)
self.parent = parent
# make a Cartesian assembly rack by default. Anything that really cares about
# the layout should specify one manually or in Blueprints
self.spatialGrid = grids.CartesianGrid.fromRectangle(50.0, 50.0)
self._filler = RowMajorAutoFiller(self, 10)
@property
def r(self):
# This needs to be here until we remove the dependency of Reactor upon
# AssemblyLists
from armi.reactor import reactors
return self.getAncestor(fn=lambda x: isinstance(x, reactors.Reactor))
def __repr__(self):
return "<AssemblyList object: {0}>".format(self.name)
def add(self, assem, loc=None):
"""
Add an Assembly to the list.
Parameters
----------
assem : Assembly
The Assembly to add to the list
loc : LocationBase, optional
If provided, the assembly is inserted at that location, similarly to how a
Core would function. If it is not provided, the locator on the Assembly
object will be used. If the Assembly's locator belongs to
``self.spatialGrid``, the Assembly's existing locator will not be used.
This is unlike the Core, which would try to use the same indices, but move
the locator to the Core's grid. If no locator is passed, or if the
Assembly's locator is not in the AssemblyList's grid, then the Assembly will
be automatically located in the grid using the associated ``AutoFiller``
object.
"""
if loc is not None and loc.grid is not self.spatialGrid:
raise ValueError(
"An assembly cannot be added to {} using a spatial locator "
"from another grid".format(self)
)
locProvided = loc is not None or (
assem.spatialLocator is not None
and assem.spatialLocator.grid is self.spatialGrid
)
if locProvided:
loc = loc or assem.spatialLocator
else:
loc = self._filler.getNextLocation(assem)
super().add(assem)
assem.spatialLocator = loc
self._filler.assemblyAdded(assem)
def getAssembly(self, name):
"""
Get a specific Assembly by name.
"""
for a in self.getChildren():
if a.getName() == name:
return a
def count(self):
if not self.getChildren():
return
runLog.important("Count:")
totCount = 0
thisTimeCount = 0
a = self.getChildren()[0]
lastTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime
for a in self.getChildren():
thisTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime
if thisTime != lastTime:
runLog.important(
"Number of assemblies moved at t={0:6.2f}: {1:04d}. Cumulative: {2:04d}".format(
lastTime, thisTimeCount, totCount
)
)
lastTime = thisTime
thisTimeCount = 0
totCount += 1
thisTimeCount += 1
class SpentFuelPool(AssemblyList):
"""A place to put assemblies when they've been discharged. Can tell you inventory stats, etc. """
def report(self):
title = "{0} Report".format(self.name)
runLog.important("-" * len(title))
runLog.important(title)
runLog.important("-" * len(title))
totFis = 0.0
for a in self.getChildren():
runLog.important(
"{assembly:15s} discharged at t={dTime:10f} after {residence:10f} yrs. It entered at cycle: {cycle}. "
"It has {fiss:10f} kg (x {mult}) fissile and peak BU={bu:.2f} %.".format(
assembly=a,
dTime=a.p.dischargeTime,
residence=(a.p.dischargeTime - a.p.chargeTime),
cycle=a.p.chargeCycle,
fiss=a.getFissileMass() * a.p.multiplicity,
bu=a.getMaxParam("percentBu"),
mult=a.p.multiplicity,
)
)
totFis += a.getFissileMass() * a.p.multiplicity / 1000 # convert to kg
runLog.important(
"Total full-core fissile inventory of {0} is {1:.4E} MT".format(
self, totFis / 1000.0
)
)
class ChargedFuelPool(AssemblyList):
"""A place to put boosters so you can see how much you added. Can tell you inventory stats, etc. """
def report(self):
title = "{0} Report".format(self.name)
runLog.important("-" * len(title))
runLog.important(title)
runLog.important("-" * len(title))
totFis = 0.0
runLog.important(
"{assembly:15s} {dTime:10s} {cycle:3s} {bu:5s} {fiss:13s} {cum:13s}".format(
assembly="Assem. Name",
dTime="Charge Time",
cycle="Charge cyc",
bu="BU",
fiss="kg fis (full core)",
cum="Cumulative fis (full, MT)",
)
)
for a in self.getChildren():
totFis += a.p.chargeFis * a.p.multiplicity / 1000.0
runLog.important(
"{assembly:15s} {dTime:10f} {cycle:3f} {bu:5.2f} {fiss:13.4f} {cum:13.4f}".format(
assembly=a,
dTime=a.p.chargeTime,
cycle=a.p.chargeCycle,
fiss=a.p.chargeFis,
bu=a.p.chargeBu,
cum=totFis,
)
)
runLog.important(
"Total full core fissile inventory of {0} is {1:.4E} MT".format(
self, totFis
)
)
| 35.448669 | 118 | 0.602059 |
import abc
import itertools
from armi import runLog
from armi.utils import units
from armi.reactor import grids
from armi.reactor import composites
from armi.reactor.flags import Flags
class AutoFiller(abc.ABC):
def getNextLocation(self, a) -> grids.LocationBase:
def assemblyAdded(self, a):
class RowMajorAutoFiller(AutoFiller):
def __init__(self, aList, nMajor):
self._nMajor = nMajor
self._aList = aList
def getNextLocation(self, _a):
filledLocations = {a.spatialLocator for a in self._aList}
grid = self._aList.spatialGrid
for idx in itertools.count():
j = idx // self._nMajor
i = idx % self._nMajor
loc = grid[i, j, 0]
if loc not in filledLocations:
return loc
def assemblyAdded(self, a):
class AssemblyList(composites.Composite):
def __init__(self, name, parent=None):
composites.Composite.__init__(self, name)
self.parent = parent
self.spatialGrid = grids.CartesianGrid.fromRectangle(50.0, 50.0)
self._filler = RowMajorAutoFiller(self, 10)
@property
def r(self):
from armi.reactor import reactors
return self.getAncestor(fn=lambda x: isinstance(x, reactors.Reactor))
def __repr__(self):
return "<AssemblyList object: {0}>".format(self.name)
def add(self, assem, loc=None):
if loc is not None and loc.grid is not self.spatialGrid:
raise ValueError(
"An assembly cannot be added to {} using a spatial locator "
"from another grid".format(self)
)
locProvided = loc is not None or (
assem.spatialLocator is not None
and assem.spatialLocator.grid is self.spatialGrid
)
if locProvided:
loc = loc or assem.spatialLocator
else:
loc = self._filler.getNextLocation(assem)
super().add(assem)
assem.spatialLocator = loc
self._filler.assemblyAdded(assem)
def getAssembly(self, name):
for a in self.getChildren():
if a.getName() == name:
return a
def count(self):
if not self.getChildren():
return
runLog.important("Count:")
totCount = 0
thisTimeCount = 0
a = self.getChildren()[0]
lastTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime
for a in self.getChildren():
thisTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime
if thisTime != lastTime:
runLog.important(
"Number of assemblies moved at t={0:6.2f}: {1:04d}. Cumulative: {2:04d}".format(
lastTime, thisTimeCount, totCount
)
)
lastTime = thisTime
thisTimeCount = 0
totCount += 1
thisTimeCount += 1
class SpentFuelPool(AssemblyList):
def report(self):
title = "{0} Report".format(self.name)
runLog.important("-" * len(title))
runLog.important(title)
runLog.important("-" * len(title))
totFis = 0.0
for a in self.getChildren():
runLog.important(
"{assembly:15s} discharged at t={dTime:10f} after {residence:10f} yrs. It entered at cycle: {cycle}. "
"It has {fiss:10f} kg (x {mult}) fissile and peak BU={bu:.2f} %.".format(
assembly=a,
dTime=a.p.dischargeTime,
residence=(a.p.dischargeTime - a.p.chargeTime),
cycle=a.p.chargeCycle,
fiss=a.getFissileMass() * a.p.multiplicity,
bu=a.getMaxParam("percentBu"),
mult=a.p.multiplicity,
)
)
totFis += a.getFissileMass() * a.p.multiplicity / 1000
runLog.important(
"Total full-core fissile inventory of {0} is {1:.4E} MT".format(
self, totFis / 1000.0
)
)
class ChargedFuelPool(AssemblyList):
def report(self):
title = "{0} Report".format(self.name)
runLog.important("-" * len(title))
runLog.important(title)
runLog.important("-" * len(title))
totFis = 0.0
runLog.important(
"{assembly:15s} {dTime:10s} {cycle:3s} {bu:5s} {fiss:13s} {cum:13s}".format(
assembly="Assem. Name",
dTime="Charge Time",
cycle="Charge cyc",
bu="BU",
fiss="kg fis (full core)",
cum="Cumulative fis (full, MT)",
)
)
for a in self.getChildren():
totFis += a.p.chargeFis * a.p.multiplicity / 1000.0
runLog.important(
"{assembly:15s} {dTime:10f} {cycle:3f} {bu:5.2f} {fiss:13.4f} {cum:13.4f}".format(
assembly=a,
dTime=a.p.chargeTime,
cycle=a.p.chargeCycle,
fiss=a.p.chargeFis,
bu=a.p.chargeBu,
cum=totFis,
)
)
runLog.important(
"Total full core fissile inventory of {0} is {1:.4E} MT".format(
self, totFis
)
)
| true | true |
1c2ef76024d7b737707cdc6ecf0cee5754fa74c3 | 2,400 | py | Python | 14/recipe.py | Keilan/advent-of-code-2018 | 3f3b4952c3633df4008e734da15e219fa67ec635 | [
"MIT"
] | null | null | null | 14/recipe.py | Keilan/advent-of-code-2018 | 3f3b4952c3633df4008e734da15e219fa67ec635 | [
"MIT"
] | null | null | null | 14/recipe.py | Keilan/advent-of-code-2018 | 3f3b4952c3633df4008e734da15e219fa67ec635 | [
"MIT"
] | null | null | null | def perform_attempt(scoreboard, index1, index2):
# Get new scores
combined = scoreboard[index1] + scoreboard[index2]
score1 = combined // 10
score2 = combined % 10
# Update scoreboard
scores_added = []
if score1 != 0:
scores_added.append(score1)
scores_added.append(score2)
scoreboard.extend(scores_added)
# Update positions
index1 = (index1 + 1 + scoreboard[index1]) % len(scoreboard)
index2 = (index2 + 1 + scoreboard[index2]) % len(scoreboard)
return index1, index2, scores_added
def score_after(attempts):
# Setup initial score
scoreboard = [3,7]
elf1 = 0
elf2 = 1
#Perform initial attempts
while len(scoreboard) < attempts+10:
elf1, elf2, _ = perform_attempt(scoreboard, elf1, elf2)
last_10 = scoreboard[attempts:attempts+10]
print('The 10 recipes after recipe {} have scores: {}'.format(
attempts, ''.join(str(i) for i in last_10)))
def find_sequence(sequence):
# Convert integer sequence to list
sequence = [int(i) for i in sequence]
# Setup initial score
scoreboard = [3,7]
elf1 = 0
elf2 = 1
# The number of elements in the sequence that we've seen up to this point
idx = 0
# Check for initial sequence
for score in scoreboard:
if score == sequence[idx]:
idx += 1
else:
idx = 0
#Perform initial attempts
while idx < len(sequence):
elf1, elf2, scores_added = perform_attempt(scoreboard, elf1, elf2)
for score in scores_added:
if score == sequence[idx]:
idx += 1
elif score == sequence[0]:
idx = 1
else:
idx = 0
#Break if finished
if idx == len(sequence):
break
# Find amount before
scores_before = len(scoreboard) - len(sequence)
if scoreboard[-len(sequence):] != sequence:
scores_before -= 1 #Ignore final idx if unused
print('The sequence {} first appears after {} recipes'.format(sequence, scores_before))
#last_10 = scoreboard[attempts:attempts+10]
#print('The 10 recipes after recipe {} have scores: {}'.format(
#attempts, ''.join(str(i) for i in last_10)))
score_after(323081)
find_sequence('5158916')
find_sequence('01245')
find_sequence('92510')
find_sequence('59414')
find_sequence('323081') | 28.235294 | 91 | 0.619583 | def perform_attempt(scoreboard, index1, index2):
combined = scoreboard[index1] + scoreboard[index2]
score1 = combined // 10
score2 = combined % 10
scores_added = []
if score1 != 0:
scores_added.append(score1)
scores_added.append(score2)
scoreboard.extend(scores_added)
index1 = (index1 + 1 + scoreboard[index1]) % len(scoreboard)
index2 = (index2 + 1 + scoreboard[index2]) % len(scoreboard)
return index1, index2, scores_added
def score_after(attempts):
scoreboard = [3,7]
elf1 = 0
elf2 = 1
while len(scoreboard) < attempts+10:
elf1, elf2, _ = perform_attempt(scoreboard, elf1, elf2)
last_10 = scoreboard[attempts:attempts+10]
print('The 10 recipes after recipe {} have scores: {}'.format(
attempts, ''.join(str(i) for i in last_10)))
def find_sequence(sequence):
sequence = [int(i) for i in sequence]
scoreboard = [3,7]
elf1 = 0
elf2 = 1
idx = 0
# Check for initial sequence
for score in scoreboard:
if score == sequence[idx]:
idx += 1
else:
idx = 0
#Perform initial attempts
while idx < len(sequence):
elf1, elf2, scores_added = perform_attempt(scoreboard, elf1, elf2)
for score in scores_added:
if score == sequence[idx]:
idx += 1
elif score == sequence[0]:
idx = 1
else:
idx = 0
#Break if finished
if idx == len(sequence):
break
# Find amount before
scores_before = len(scoreboard) - len(sequence)
if scoreboard[-len(sequence):] != sequence:
scores_before -= 1 #Ignore final idx if unused
print('The sequence {} first appears after {} recipes'.format(sequence, scores_before))
#last_10 = scoreboard[attempts:attempts+10]
#print('The 10 recipes after recipe {} have scores: {}'.format(
#attempts, ''.join(str(i) for i in last_10)))
score_after(323081)
find_sequence('5158916')
find_sequence('01245')
find_sequence('92510')
find_sequence('59414')
find_sequence('323081') | true | true |
1c2ef7a70c702537518d5a8441086e69cd29de52 | 1,875 | py | Python | parser/team27/G-27/execution/function/trigonometric/asin.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team27/G-27/execution/function/trigonometric/asin.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team27/G-27/execution/function/trigonometric/asin.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from execution.abstract.function import *
from execution.symbol.typ import *
from libraries.trigonometric_functions import asin
class Asin(Function):
def __init__(self, input, row, column):
Function.__init__(self,row,column)
self.input = input
def execute(self, environment):
#input es una lista
# los valores del imput deben estar en el rango de [-1,1] si no se ejecuta el error
if isinstance(self.input,list):
respuesta = []
for val in self.input:
value = val.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
if value['value'] < -1 or value['value'] > 1:
return {'Error':"El valor " + str(value['value']) + " no entra en el rango de [1,infinito] que son aceptados por la funcion asin()", 'linea':self.row,'columna':self.column }
result = asin(value['value'])
respuesta.append({'value':result, 'typ': value['typ']})
return respuesta
#input valor puntual
else:
value = self.input.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
if value['value'] < -1 or value['value'] > 1:
return {'Error':"El valor " + str(value['value']) + " no entra en el rango de [1,infinito] que son aceptados por la funcion asin()", 'linea':self.row,'columna':self.column }
return {'value':asin(value['value']), 'typ': Type.DECIMAL}
| 52.083333 | 193 | 0.5632 | from execution.abstract.function import *
from execution.symbol.typ import *
from libraries.trigonometric_functions import asin
class Asin(Function):
def __init__(self, input, row, column):
Function.__init__(self,row,column)
self.input = input
def execute(self, environment):
if isinstance(self.input,list):
respuesta = []
for val in self.input:
value = val.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
if value['value'] < -1 or value['value'] > 1:
return {'Error':"El valor " + str(value['value']) + " no entra en el rango de [1,infinito] que son aceptados por la funcion asin()", 'linea':self.row,'columna':self.column }
result = asin(value['value'])
respuesta.append({'value':result, 'typ': value['typ']})
return respuesta
else:
value = self.input.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
if value['value'] < -1 or value['value'] > 1:
return {'Error':"El valor " + str(value['value']) + " no entra en el rango de [1,infinito] que son aceptados por la funcion asin()", 'linea':self.row,'columna':self.column }
return {'value':asin(value['value']), 'typ': Type.DECIMAL}
| true | true |
1c2ef813d7f16c4fe3c34d616adbed3c7bba2819 | 18,954 | py | Python | google/ads/googleads/v8/services/services/product_bidding_category_constant_service/client.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 285 | 2018-10-05T16:47:58.000Z | 2022-03-31T00:58:39.000Z | google/ads/googleads/v8/services/services/product_bidding_category_constant_service/client.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 425 | 2018-09-10T13:32:41.000Z | 2022-03-31T14:50:05.000Z | google/ads/googleads/v8/services/services/product_bidding_category_constant_service/client.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 369 | 2018-11-28T07:01:00.000Z | 2022-03-28T09:53:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import (
product_bidding_category_constant,
)
from google.ads.googleads.v8.services.types import (
product_bidding_category_constant_service,
)
from .transports.base import (
ProductBiddingCategoryConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ProductBiddingCategoryConstantServiceGrpcTransport
class ProductBiddingCategoryConstantServiceClientMeta(type):
"""Metaclass for the ProductBiddingCategoryConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ProductBiddingCategoryConstantServiceTransport]]
_transport_registry[
"grpc"
] = ProductBiddingCategoryConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ProductBiddingCategoryConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ProductBiddingCategoryConstantServiceClient(
metaclass=ProductBiddingCategoryConstantServiceClientMeta
):
"""Service to fetch Product Bidding Categories."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProductBiddingCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProductBiddingCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ProductBiddingCategoryConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
ProductBiddingCategoryConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def product_bidding_category_constant_path(
country_code: str, level: str, id: str,
) -> str:
"""Return a fully-qualified product_bidding_category_constant string."""
return "productBiddingCategoryConstants/{country_code}~{level}~{id}".format(
country_code=country_code, level=level, id=id,
)
@staticmethod
def parse_product_bidding_category_constant_path(
path: str,
) -> Dict[str, str]:
"""Parse a product_bidding_category_constant path into its component segments."""
m = re.match(
r"^productBiddingCategoryConstants/(?P<country_code>.+?)~(?P<level>.+?)~(?P<id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, ProductBiddingCategoryConstantServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the product bidding category constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ProductBiddingCategoryConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(
transport, ProductBiddingCategoryConstantServiceTransport
):
# transport is a ProductBiddingCategoryConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ProductBiddingCategoryConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_product_bidding_category_constant(
self,
request: product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> product_bidding_category_constant.ProductBiddingCategoryConstant:
r"""Returns the requested Product Bidding Category in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetProductBiddingCategoryConstantRequest`):
The request object. Request message for
[ProductBiddingCategoryConstantService.GetProductBiddingCategoryConstant][google.ads.googleads.v8.services.ProductBiddingCategoryConstantService.GetProductBiddingCategoryConstant].
resource_name (:class:`str`):
Required. Resource name of the
Product Bidding Category to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.ProductBiddingCategoryConstant:
A Product Bidding Category.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest,
):
request = product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_product_bidding_category_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ProductBiddingCategoryConstantServiceClient",)
| 41.565789 | 196 | 0.647937 |
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials
from google.auth.transport import mtls
from google.auth.transport.grpc import SslCredentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.ads.googleads.v8.resources.types import (
product_bidding_category_constant,
)
from google.ads.googleads.v8.services.types import (
product_bidding_category_constant_service,
)
from .transports.base import (
ProductBiddingCategoryConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ProductBiddingCategoryConstantServiceGrpcTransport
class ProductBiddingCategoryConstantServiceClientMeta(type):
_transport_registry = (
OrderedDict()
)
_transport_registry[
"grpc"
] = ProductBiddingCategoryConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ProductBiddingCategoryConstantServiceTransport]:
if label:
return cls._transport_registry[label]
return next(iter(cls._transport_registry.values()))
class ProductBiddingCategoryConstantServiceClient(
metaclass=ProductBiddingCategoryConstantServiceClientMeta
):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__(
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ProductBiddingCategoryConstantServiceTransport:
return self._transport
@staticmethod
def product_bidding_category_constant_path(
country_code: str, level: str, id: str,
) -> str:
return "productBiddingCategoryConstants/{country_code}~{level}~{id}".format(
country_code=country_code, level=level, id=id,
)
@staticmethod
def parse_product_bidding_category_constant_path(
path: str,
) -> Dict[str, str]:
m = re.match(
r"^productBiddingCategoryConstants/(?P<country_code>.+?)~(?P<level>.+?)~(?P<id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, ProductBiddingCategoryConstantServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
if isinstance(
transport, ProductBiddingCategoryConstantServiceTransport
):
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ProductBiddingCategoryConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_product_bidding_category_constant(
self,
request: product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> product_bidding_category_constant.ProductBiddingCategoryConstant:
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(
request,
product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest,
):
request = product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest(
request
)
if resource_name is not None:
request.resource_name = resource_name
rpc = self._transport._wrapped_methods[
self._transport.get_product_bidding_category_constant
]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
return response
__all__ = ("ProductBiddingCategoryConstantServiceClient",)
| true | true |
1c2ef81485b97a49069d91271d4f6e8e453305b3 | 1,961 | py | Python | capstone/capdb/tests/test_versioning.py | jcushman/capstone | ef3ced77f69aabe14c89ab67003a6e88736bf777 | [
"MIT"
] | null | null | null | capstone/capdb/tests/test_versioning.py | jcushman/capstone | ef3ced77f69aabe14c89ab67003a6e88736bf777 | [
"MIT"
] | 4 | 2021-09-02T20:54:31.000Z | 2022-02-27T14:04:06.000Z | capstone/capdb/tests/test_versioning.py | jcushman/capstone | ef3ced77f69aabe14c89ab67003a6e88736bf777 | [
"MIT"
] | null | null | null | from copy import deepcopy
import pytest
from django.db import transaction
from scripts.helpers import parse_xml, serialize_xml
@pytest.mark.parametrize('versioned_fixture_name', [
'volume_xml',
'case_xml',
'page_xml'
])
@pytest.mark.django_db(transaction=True)
def test_versioning(versioned_fixture_name, request):
# load initial volume_xml/case_xml/page_xml
versioned_instance = request.getfuncargvalue(versioned_fixture_name)
original_instance = deepcopy(versioned_instance)
# starts with no history
assert versioned_instance.history.count() == 0
# versions are only created once per transaction.
# since tests run in transactions, run an initial sub-transaction to
# make sure our next save causes a new version to be created.
# note that this is not sufficient when using the temporal_tables
# extension, which additionally requires (transaction=True) as an
# argument to the pytest.mark.django_db decorator
with transaction.atomic(using='capdb'):
versioned_instance.save()
# make some modifications:
versioned_instance.s3_key = 'changed'
parsed = parse_xml(versioned_instance.orig_xml)
parsed('mets').append("<new_element/>")
versioned_instance.orig_xml = serialize_xml(parsed)
# save modified version:
with transaction.atomic(using='capdb'):
versioned_instance.save()
# historical version should now exist:
previous_version = versioned_instance.history.first()
assert previous_version
# current version's sys_period should start where historical version's sys_period ends:
versioned_instance.refresh_from_db() # load current sys_period
assert versioned_instance.sys_period.lower == previous_version.sys_period.upper
# historical version should have values from before latest save:
assert previous_version.s3_key == original_instance.s3_key
assert previous_version.orig_xml == original_instance.orig_xml
| 37 | 91 | 0.760836 | from copy import deepcopy
import pytest
from django.db import transaction
from scripts.helpers import parse_xml, serialize_xml
@pytest.mark.parametrize('versioned_fixture_name', [
'volume_xml',
'case_xml',
'page_xml'
])
@pytest.mark.django_db(transaction=True)
def test_versioning(versioned_fixture_name, request):
versioned_instance = request.getfuncargvalue(versioned_fixture_name)
original_instance = deepcopy(versioned_instance)
assert versioned_instance.history.count() == 0
with transaction.atomic(using='capdb'):
versioned_instance.save()
versioned_instance.s3_key = 'changed'
parsed = parse_xml(versioned_instance.orig_xml)
parsed('mets').append("<new_element/>")
versioned_instance.orig_xml = serialize_xml(parsed)
with transaction.atomic(using='capdb'):
versioned_instance.save()
previous_version = versioned_instance.history.first()
assert previous_version
versioned_instance.refresh_from_db()
assert versioned_instance.sys_period.lower == previous_version.sys_period.upper
assert previous_version.s3_key == original_instance.s3_key
assert previous_version.orig_xml == original_instance.orig_xml
| true | true |
1c2ef995372fc9c30e32044a35e8087c8c35d3f7 | 3,507 | py | Python | tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/metric.py | CyberFlameGO/milvus | c6ebae89598c4198fa44ea02f8a60219b21fbffd | [
"Apache-2.0"
] | 10,504 | 2019-09-16T12:20:11.000Z | 2022-03-31T15:07:56.000Z | tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/metric.py | CyberFlameGO/milvus | c6ebae89598c4198fa44ea02f8a60219b21fbffd | [
"Apache-2.0"
] | 13,389 | 2019-09-16T06:49:53.000Z | 2022-03-31T18:01:24.000Z | tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/metric.py | CyberFlameGO/milvus | c6ebae89598c4198fa44ea02f8a60219b21fbffd | [
"Apache-2.0"
] | 1,792 | 2019-09-18T04:27:42.000Z | 2022-03-31T14:37:20.000Z | import time
import datetime
import json
import hashlib
from .env import Env
from .server import Server
from .hardware import Hardware
class Metric(object):
"""
A template for reporting data:
{
"_id" : ObjectId("6126865855aba6fb8e742f05"),
"_version" : "0.1",
"_type" : "case",
"run_id" : NumberInt(1629914593),
"mode" : "local",
"server" : {
"id" : ObjectId("6126865855aba6fb8e742f04"),
"value" : {
"_version" : "0.1",
"_type" : "server",
"version" : "2.0.0-RC5",
"mode" : "single",
"build_commit" : null,
"deploy_opology" : {
"server" : {
"server_tag" : "8c16m"
},
"milvus" : {
"deploy_mode" : "single"
}
}
}
},
"hardware" : {
"id" : ObjectId("60f078c5d8aad7192f9baf80"),
"value" : {
"_version" : "0.1",
"_type" : "hardware",
"name" : "server_tag",
"cpus" : 0.0
}
},
"env" : {
"id" : ObjectId("604b54df90fbee981a6ed81d"),
"value" : {
"_version" : "0.1",
"_type" : "env",
"server_config" : null,
"OMP_NUM_THREADS" : null
}
},
"status" : "RUN_SUCC",
"err_message" : "",
"collection" : {
"dimension" : NumberInt(128),
"metric_type" : "l2",
"dataset_name" : "sift_128_euclidean"
},
"index" : {
"index_type" : "ivf_sq8",
"index_param" : {
"nlist" : NumberInt(1024)
}
},
"search" : {
"nq" : NumberInt(10000),
"topk" : NumberInt(10),
"search_param" : {
"nprobe" : NumberInt(1)
},
"filter" : [
]
},
"run_params" : null,
"metrics" : {
"type" : "ann_accuracy",
"value" : {
"acc" : 0.377
}
},
"datetime" : "2021-08-25 18:03:13.820593",
"type" : "metric"
}
"""
def __init__(self):
self._version = '0.1'
self._type = 'metric'
self.run_id = None
self.mode = None
self.server = Server()
self.hardware = Hardware()
self.env = Env()
self.status = "INIT"
self.err_message = ""
self.collection = {}
self.index = {}
self.search = {}
self.run_params = {}
self.metrics = {
"type": "",
"value": None,
}
self.datetime = str(datetime.datetime.now())
def set_run_id(self):
self.run_id = int(time.time())
def set_mode(self, mode):
self.mode = mode
# including: metric, suite_metric
def set_case_metric_type(self):
self._type = "case"
def json_md5(self):
json_str = json.dumps(vars(self), sort_keys=True)
return hashlib.md5(json_str.encode('utf-8')).hexdigest()
def update_status(self, status):
self.status = status
def update_result(self, result):
self.metrics["value"].update(result)
def update_message(self, err_message):
self.err_message = err_message | 26.770992 | 64 | 0.437411 | import time
import datetime
import json
import hashlib
from .env import Env
from .server import Server
from .hardware import Hardware
class Metric(object):
def __init__(self):
self._version = '0.1'
self._type = 'metric'
self.run_id = None
self.mode = None
self.server = Server()
self.hardware = Hardware()
self.env = Env()
self.status = "INIT"
self.err_message = ""
self.collection = {}
self.index = {}
self.search = {}
self.run_params = {}
self.metrics = {
"type": "",
"value": None,
}
self.datetime = str(datetime.datetime.now())
def set_run_id(self):
self.run_id = int(time.time())
def set_mode(self, mode):
self.mode = mode
def set_case_metric_type(self):
self._type = "case"
def json_md5(self):
json_str = json.dumps(vars(self), sort_keys=True)
return hashlib.md5(json_str.encode('utf-8')).hexdigest()
def update_status(self, status):
self.status = status
def update_result(self, result):
self.metrics["value"].update(result)
def update_message(self, err_message):
self.err_message = err_message | true | true |
1c2ef9b593c702ed9b0441bcdae7e77d0db8f9b9 | 16,106 | py | Python | jax_md/space.py | pmistani/jax-md | 125c6922c1bc09df33d6a9934f50ea1321e02e73 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax_md/space.py | pmistani/jax-md | 125c6922c1bc09df33d6a9934f50ea1321e02e73 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax_md/space.py | pmistani/jax-md | 125c6922c1bc09df33d6a9934f50ea1321e02e73 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spaces in which particles are simulated.
Spaces are pairs of functions containing:
* `displacement_fn(Ra, Rb, **kwargs)`
Computes displacements between pairs of particles. Ra and Rb should
be ndarrays of shape [spatial_dim]. Returns an ndarray of shape
[spatial_dim]. To compute the displacement over more than one particle
at a time see the `map_product`, `map_bond`, and `map_neighbor` functions.
* `shift_fn(R, dR, **kwargs)` Moves points at position R by an amount dR.
Spaces can accept keyword arguments allowing the space to be changed over the
course of a simulation. For an example of this use see `periodic_general`.
Although displacement functions are compute the displacement between two
points, it is often useful to compute displacements between multiple particles
in a vectorized fashion. To do this we provide three functions: `map_product`,
`map_bond`, and `map_neighbor`.
* `map_pair` computes displacements between all pairs of points such that if
Ra has shape [n, spatial_dim] and Rb has shape `[m, spatial_dim]` then the
output has shape `[n, m, spatial_dim]`.
* `map_bond` computes displacements between all points in a list such that if
Ra has shape [n, spatial_dim] and Rb has shape [m, spatial_dim] then the
output has shape [n, spatial_dim].
* `map_neighbor` computes displacements between points and all of their
neighbors such that if Ra has shape [n, spatial_dim] and Rb has shape
[n, neighbors, spatial_dim] then the output has shape
[n, neighbors, spatial_dim].
"""
from typing import Callable, Union, Tuple, Any, Optional
from jax.abstract_arrays import ShapedArray
from jax import eval_shape
from jax import vmap
from jax import custom_jvp
import jax
import jax.numpy as jnp
from jax_md.util import Array
from jax_md.util import f32
from jax_md.util import f64
from jax_md.util import safe_mask
# Types
DisplacementFn = Callable[[Array, Array], Array]
MetricFn = Callable[[Array, Array], float]
DisplacementOrMetricFn = Union[DisplacementFn, MetricFn]
ShiftFn = Callable[[Array, Array], Array]
Space = Tuple[DisplacementFn, ShiftFn]
Box = Array
# Primitive Spatial Transforms
def inverse(box: Box) -> Box:
"""Compute the inverse of an affine transformation."""
if jnp.isscalar(box) or box.size == 1:
return 1 / box
elif box.ndim == 1:
return 1 / box
elif box.ndim == 2:
return jnp.linalg.inv(box)
raise ValueError(('Box must be either: a scalar, a vector, or a matrix. '
f'Found {box}.'))
def _get_free_indices(n: int) -> str:
return ''.join([chr(ord('a') + i) for i in range(n)])
def raw_transform(box: Box, R: Array) -> Array:
"""Apply an affine transformation to positions.
See `periodic_general` for a description of the semantics of `box`.
Args:
box: An affine transformation described in `periodic_general`.
R: Array of positions. Should have shape `(..., spatial_dimension)`.
Returns:
A transformed array positions of shape `(..., spatial_dimension)`.
"""
if jnp.isscalar(box) or box.size == 1:
return R * box
elif box.ndim == 1:
indices = _get_free_indices(R.ndim - 1) + 'i'
return jnp.einsum(f'i,{indices}->{indices}', box, R)
elif box.ndim == 2:
free_indices = _get_free_indices(R.ndim - 1)
left_indices = free_indices + 'j'
right_indices = free_indices + 'i'
return jnp.einsum(f'ij,{left_indices}->{right_indices}', box, R)
raise ValueError(('Box must be either: a scalar, a vector, or a matrix. '
f'Found {box}.'))
@custom_jvp
def transform(box: Box, R: Array) -> Array:
"""Apply an affine transformation to positions.
See `periodic_general` for a description of the semantics of `box`.
Args:
box: An affine transformation described in `periodic_general`.
R: Array of positions. Should have shape `(..., spatial_dimension)`.
Returns:
A transformed array positions of shape `(..., spatial_dimension)`.
"""
return raw_transform(box, R)
@transform.defjvp
def transform_jvp(primals, tangents):
box, R = primals
dbox, dR = tangents
return (transform(box, R), dR + transform(dbox, R))
def pairwise_displacement(Ra: Array, Rb: Array) -> Array:
"""Compute a matrix of pairwise displacements given two sets of positions.
Args:
Ra: Vector of positions; ndarray(shape=[spatial_dim]).
Rb: Vector of positions; ndarray(shape=[spatial_dim]).
Returns:
Matrix of displacements; ndarray(shape=[spatial_dim]).
"""
if len(Ra.shape) != 1:
msg = (
'Can only compute displacements between vectors. To compute '
'displacements between sets of vectors use vmap or TODO.'
)
raise ValueError(msg)
if Ra.shape != Rb.shape:
msg = 'Can only compute displacement between vectors of equal dimension.'
raise ValueError(msg)
return Ra - Rb
def periodic_displacement(side: Box, dR: Array) -> Array:
"""Wraps displacement vectors into a hypercube.
Args:
side: Specification of hypercube size. Either,
(a) float if all sides have equal length.
(b) ndarray(spatial_dim) if sides have different lengths.
dR: Matrix of displacements; ndarray(shape=[..., spatial_dim]).
Returns:
Matrix of wrapped displacements; ndarray(shape=[..., spatial_dim]).
"""
return jnp.mod(dR + side * f32(0.5), side) - f32(0.5) * side
def square_distance(dR: Array) -> Array:
"""Computes square distances.
Args:
dR: Matrix of displacements; ndarray(shape=[..., spatial_dim]).
Returns:
Matrix of squared distances; ndarray(shape=[...]).
"""
return jnp.sum(dR ** 2, axis=-1)
def distance(dR: Array) -> Array:
"""Computes distances.
Args:
dR: Matrix of displacements; ndarray(shape=[..., spatial_dim]).
Returns:
Matrix of distances; ndarray(shape=[...]).
"""
dr = square_distance(dR)
return safe_mask(dr > 0, jnp.sqrt, dr)
def periodic_shift(side: Box, R: Array, dR: Array) -> Array:
"""Shifts positions, wrapping them back within a periodic hypercube."""
return jnp.mod(R + dR, side)
""" Spaces """
def free() -> Space:
"""Free boundary conditions."""
def displacement_fn(Ra: Array, Rb: Array, perturbation: Optional[Array]=None,
**unused_kwargs) -> Array:
dR = pairwise_displacement(Ra, Rb)
if perturbation is not None:
dR = raw_transform(perturbation, dR)
return dR
def shift_fn(R: Array, dR: Array, **unused_kwargs) -> Array:
return R + dR
return displacement_fn, shift_fn
def periodic(side: Box, wrapped: bool=True) -> Space:
"""Periodic boundary conditions on a hypercube of sidelength side.
Args:
side: Either a float or an ndarray of shape [spatial_dimension] specifying
the size of each side of the periodic box.
wrapped: A boolean specifying whether or not particle positions are
remapped back into the box after each step
Returns:
(displacement_fn, shift_fn) tuple.
"""
def displacement_fn(Ra: Array, Rb: Array,
perturbation: Optional[Array] = None,
**unused_kwargs) -> Array:
if 'box' in unused_kwargs:
raise ValueError(('`space.periodic` does not accept a box argument.'
'Perhaps you meant to use `space.periodic_general`?'))
dR = periodic_displacement(side, pairwise_displacement(Ra, Rb))
if perturbation is not None:
dR = raw_transform(perturbation, dR)
return dR
if wrapped:
def shift_fn(R: Array, dR: Array, **unused_kwargs) -> Array:
if 'box' in unused_kwargs:
raise ValueError(('`space.periodic` does not accept a box argument.'
'Perhaps you meant to use `space.periodic_general`?'))
return periodic_shift(side, R, dR)
else:
def shift_fn(R: Array, dR: Array, **unused_kwargs) -> Array:
if 'box' in unused_kwargs:
raise ValueError(('`space.periodic` does not accept a box argument.'
'Perhaps you meant to use `space.periodic_general`?'))
return R + dR
return displacement_fn, shift_fn
def periodic_general(box: Box,
fractional_coordinates: bool=True,
wrapped: bool=True) -> Space:
"""Periodic boundary conditions on a parallelepiped.
This function defines a simulation on a parallelepiped, :math:`X`, formed by
applying an affine transformation, :math:`T`, to the unit hypercube
:math:`U = [0, 1]^d` along with periodic boundary conditions across all
of the faces.
Formally, the space is defined such that :math:`X = {Tu : u \in [0, 1]^d}`.
The affine transformation, :math:`T`, can be specified in a number of different
ways. For a parallelepiped that is: 1) a cube of side length :math:`L`, the affine
transformation can simply be a scalar; 2) an orthorhombic unit cell can be
specified by a vector `[Lx, Ly, Lz]` of lengths for each axis; 3) a general
triclinic cell can be specified by an upper triangular matrix.
There are a number of ways to parameterize a simulation on :math:`X`.
`periodic_general` supports two parametrizations of :math:`X` that can be selected
using the `fractional_coordinates` keyword argument.
1) When `fractional_coordinates=True`, particle positions are stored in the
unit cube, :math:`u\in U`. Here, the displacement function computes the
displacement between :math:`x, y \in X` as :math:`d_X(x, y) = Td_U(u, v)` where
:math:`d_U` is the displacement function on the unit cube, :math:`U`, :math:`x = Tu`, and
:math:`v = Tv` with :math:`u, v \in U`. The derivative of the displacement function
is defined so that derivatives live in :math:`X` (as opposed to being
backpropagated to :math:`U`). The shift function, `shift_fn(R, dR)` is defined
so that :math:`R` is expected to lie in :math:`U` while :math:`dR` should lie in :math:`X`. This
combination enables code such as `shift_fn(R, force_fn(R))` to work as
intended.
2) When `fractional_coordinates=False`, particle positions are stored in
the parallelepiped :math:`X`. Here, for :math:`x, y \in X`, the displacement function
is defined as :math:`d_X(x, y) = Td_U(T^{-1}x, T^{-1}y)`. Since there is an
extra multiplication by :math:`T^{-1}`, this parameterization is typically
slower than `fractional_coordinates=False`. As in 1), the displacement
function is defined to compute derivatives in :math:`X`. The shift function
is defined so that :math:`R` and :math:`dR` should both lie in :math:`X`.
Example:
.. code-block:: python
from jax import random
side_length = 10.0
disp_frac, shift_frac = periodic_general(side_length,
fractional_coordinates=True)
disp_real, shift_real = periodic_general(side_length,
fractional_coordinates=False)
# Instantiate random positions in both parameterizations.
R_frac = random.uniform(random.PRNGKey(0), (4, 3))
R_real = side_length * R_frac
# Make some shift vectors.
dR = random.normal(random.PRNGKey(0), (4, 3))
disp_real(R_real[0], R_real[1]) == disp_frac(R_frac[0], R_frac[1])
transform(side_length, shift_frac(R_frac, 1.0)) == shift_real(R_real, 1.0)
It is often desirable to deform a simulation cell either: using a finite
deformation during a simulation, or using an infinitesimal deformation while
computing elastic constants. To do this using fractional coordinates, we can
supply a new affine transformation as `displacement_fn(Ra, Rb, box=new_box)`.
When using real coordinates, we can specify positions in a space :math:`X` defined
by an affine transformation :math:`T` and compute displacements in a deformed space
:math:`X'` defined by an affine transformation :math:`T'`. This is done by writing
`displacement_fn(Ra, Rb, new_box=new_box)`.
There are a few caveats when using `periodic_general`. `periodic_general`
uses the minimum image convention, and so it will fail for potentials whose
cutoff is longer than the half of the side-length of the box. It will also
fail to find the correct image when the box is too deformed. We hope to add a
more robust box for small simulations soon (TODO) along with better error
checking. In the meantime caution is recommended.
Args:
box: A `(spatial_dim, spatial_dim)` affine transformation.
fractional_coordinates: A boolean specifying whether positions are stored
in the parallelepiped or the unit cube.
wrapped: A boolean specifying whether or not particle positions are
remapped back into the box after each step
Returns:
(displacement_fn, shift_fn) tuple.
"""
inv_box = inverse(box)
def displacement_fn(Ra, Rb, perturbation=None, **kwargs):
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
if not fractional_coordinates:
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
if not fractional_coordinates:
Ra = transform(_inv_box, Ra)
Rb = transform(_inv_box, Rb)
dR = periodic_displacement(f32(1.0), pairwise_displacement(Ra, Rb))
dR = transform(_box, dR)
if perturbation is not None:
dR = raw_transform(perturbation, dR)
return dR
def u(R, dR):
if wrapped:
return periodic_shift(f32(1.0), R, dR)
return R + dR
def shift_fn(R, dR, **kwargs):
if not fractional_coordinates and not wrapped:
return R + dR
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
dR = transform(_inv_box, dR)
if not fractional_coordinates:
R = transform(_inv_box, R)
R = u(R, dR)
if not fractional_coordinates:
R = transform(_box, R)
return R
return displacement_fn, shift_fn
def metric(displacement: DisplacementFn) -> MetricFn:
"""Takes a displacement function and creates a metric."""
return lambda Ra, Rb, **kwargs: distance(displacement(Ra, Rb, **kwargs))
def map_product(metric_or_displacement: DisplacementOrMetricFn
) -> DisplacementOrMetricFn:
"""Vectorizes a metric or displacement function over all pairs."""
return vmap(vmap(metric_or_displacement, (0, None), 0), (None, 0), 0)
def map_bond(metric_or_displacement: DisplacementOrMetricFn
) -> DisplacementOrMetricFn:
"""Vectorizes a metric or displacement function over bonds."""
return vmap(metric_or_displacement, (0, 0), 0)
def map_neighbor(metric_or_displacement: DisplacementOrMetricFn
) -> DisplacementOrMetricFn:
"""Vectorizes a metric or displacement function over neighborhoods."""
def wrapped_fn(Ra, Rb, **kwargs):
return vmap(vmap(metric_or_displacement, (None, 0)))(-Ra, -Rb, **kwargs)
return wrapped_fn
def canonicalize_displacement_or_metric(displacement_or_metric):
"""Checks whether or not a displacement or metric was provided."""
for dim in range(1, 4):
try:
R = ShapedArray((dim,), f32)
dR_or_dr = eval_shape(displacement_or_metric, R, R, t=0)
if len(dR_or_dr.shape) == 0:
return displacement_or_metric
else:
return metric(displacement_or_metric)
except TypeError:
continue
except ValueError:
continue
raise ValueError(
'Canonicalize displacement not implemented for spatial dimension larger'
'than 4.')
| 35.870824 | 103 | 0.688191 |
from typing import Callable, Union, Tuple, Any, Optional
from jax.abstract_arrays import ShapedArray
from jax import eval_shape
from jax import vmap
from jax import custom_jvp
import jax
import jax.numpy as jnp
from jax_md.util import Array
from jax_md.util import f32
from jax_md.util import f64
from jax_md.util import safe_mask
DisplacementFn = Callable[[Array, Array], Array]
MetricFn = Callable[[Array, Array], float]
DisplacementOrMetricFn = Union[DisplacementFn, MetricFn]
ShiftFn = Callable[[Array, Array], Array]
Space = Tuple[DisplacementFn, ShiftFn]
Box = Array
def inverse(box: Box) -> Box:
if jnp.isscalar(box) or box.size == 1:
return 1 / box
elif box.ndim == 1:
return 1 / box
elif box.ndim == 2:
return jnp.linalg.inv(box)
raise ValueError(('Box must be either: a scalar, a vector, or a matrix. '
f'Found {box}.'))
def _get_free_indices(n: int) -> str:
return ''.join([chr(ord('a') + i) for i in range(n)])
def raw_transform(box: Box, R: Array) -> Array:
if jnp.isscalar(box) or box.size == 1:
return R * box
elif box.ndim == 1:
indices = _get_free_indices(R.ndim - 1) + 'i'
return jnp.einsum(f'i,{indices}->{indices}', box, R)
elif box.ndim == 2:
free_indices = _get_free_indices(R.ndim - 1)
left_indices = free_indices + 'j'
right_indices = free_indices + 'i'
return jnp.einsum(f'ij,{left_indices}->{right_indices}', box, R)
raise ValueError(('Box must be either: a scalar, a vector, or a matrix. '
f'Found {box}.'))
@custom_jvp
def transform(box: Box, R: Array) -> Array:
return raw_transform(box, R)
@transform.defjvp
def transform_jvp(primals, tangents):
box, R = primals
dbox, dR = tangents
return (transform(box, R), dR + transform(dbox, R))
def pairwise_displacement(Ra: Array, Rb: Array) -> Array:
if len(Ra.shape) != 1:
msg = (
'Can only compute displacements between vectors. To compute '
'displacements between sets of vectors use vmap or TODO.'
)
raise ValueError(msg)
if Ra.shape != Rb.shape:
msg = 'Can only compute displacement between vectors of equal dimension.'
raise ValueError(msg)
return Ra - Rb
def periodic_displacement(side: Box, dR: Array) -> Array:
return jnp.mod(dR + side * f32(0.5), side) - f32(0.5) * side
def square_distance(dR: Array) -> Array:
return jnp.sum(dR ** 2, axis=-1)
def distance(dR: Array) -> Array:
dr = square_distance(dR)
return safe_mask(dr > 0, jnp.sqrt, dr)
def periodic_shift(side: Box, R: Array, dR: Array) -> Array:
return jnp.mod(R + dR, side)
def free() -> Space:
def displacement_fn(Ra: Array, Rb: Array, perturbation: Optional[Array]=None,
**unused_kwargs) -> Array:
dR = pairwise_displacement(Ra, Rb)
if perturbation is not None:
dR = raw_transform(perturbation, dR)
return dR
def shift_fn(R: Array, dR: Array, **unused_kwargs) -> Array:
return R + dR
return displacement_fn, shift_fn
def periodic(side: Box, wrapped: bool=True) -> Space:
def displacement_fn(Ra: Array, Rb: Array,
perturbation: Optional[Array] = None,
**unused_kwargs) -> Array:
if 'box' in unused_kwargs:
raise ValueError(('`space.periodic` does not accept a box argument.'
'Perhaps you meant to use `space.periodic_general`?'))
dR = periodic_displacement(side, pairwise_displacement(Ra, Rb))
if perturbation is not None:
dR = raw_transform(perturbation, dR)
return dR
if wrapped:
def shift_fn(R: Array, dR: Array, **unused_kwargs) -> Array:
if 'box' in unused_kwargs:
raise ValueError(('`space.periodic` does not accept a box argument.'
'Perhaps you meant to use `space.periodic_general`?'))
return periodic_shift(side, R, dR)
else:
def shift_fn(R: Array, dR: Array, **unused_kwargs) -> Array:
if 'box' in unused_kwargs:
raise ValueError(('`space.periodic` does not accept a box argument.'
'Perhaps you meant to use `space.periodic_general`?'))
return R + dR
return displacement_fn, shift_fn
def periodic_general(box: Box,
fractional_coordinates: bool=True,
wrapped: bool=True) -> Space:
inv_box = inverse(box)
def displacement_fn(Ra, Rb, perturbation=None, **kwargs):
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
if not fractional_coordinates:
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
if not fractional_coordinates:
Ra = transform(_inv_box, Ra)
Rb = transform(_inv_box, Rb)
dR = periodic_displacement(f32(1.0), pairwise_displacement(Ra, Rb))
dR = transform(_box, dR)
if perturbation is not None:
dR = raw_transform(perturbation, dR)
return dR
def u(R, dR):
if wrapped:
return periodic_shift(f32(1.0), R, dR)
return R + dR
def shift_fn(R, dR, **kwargs):
if not fractional_coordinates and not wrapped:
return R + dR
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
dR = transform(_inv_box, dR)
if not fractional_coordinates:
R = transform(_inv_box, R)
R = u(R, dR)
if not fractional_coordinates:
R = transform(_box, R)
return R
return displacement_fn, shift_fn
def metric(displacement: DisplacementFn) -> MetricFn:
return lambda Ra, Rb, **kwargs: distance(displacement(Ra, Rb, **kwargs))
def map_product(metric_or_displacement: DisplacementOrMetricFn
) -> DisplacementOrMetricFn:
return vmap(vmap(metric_or_displacement, (0, None), 0), (None, 0), 0)
def map_bond(metric_or_displacement: DisplacementOrMetricFn
) -> DisplacementOrMetricFn:
return vmap(metric_or_displacement, (0, 0), 0)
def map_neighbor(metric_or_displacement: DisplacementOrMetricFn
) -> DisplacementOrMetricFn:
def wrapped_fn(Ra, Rb, **kwargs):
return vmap(vmap(metric_or_displacement, (None, 0)))(-Ra, -Rb, **kwargs)
return wrapped_fn
def canonicalize_displacement_or_metric(displacement_or_metric):
for dim in range(1, 4):
try:
R = ShapedArray((dim,), f32)
dR_or_dr = eval_shape(displacement_or_metric, R, R, t=0)
if len(dR_or_dr.shape) == 0:
return displacement_or_metric
else:
return metric(displacement_or_metric)
except TypeError:
continue
except ValueError:
continue
raise ValueError(
'Canonicalize displacement not implemented for spatial dimension larger'
'than 4.')
| true | true |
1c2efae827fb9292634f95c87c7625ff9d6887a5 | 9,423 | py | Python | src/datadog_api_client/v1/model/notebook_update_cell.py | rchenzheng/datadog-api-client-python | 2e86ac098c6f0c7fdd90ed218224587c0f8eafef | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v1/model/notebook_update_cell.py | rchenzheng/datadog-api-client-python | 2e86ac098c6f0c7fdd90ed218224587c0f8eafef | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v1/model/notebook_update_cell.py | rchenzheng/datadog-api-client-python | 2e86ac098c6f0c7fdd90ed218224587c0f8eafef | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.notebook_cell_create_request import NotebookCellCreateRequest
from datadog_api_client.v1.model.notebook_cell_resource_type import NotebookCellResourceType
from datadog_api_client.v1.model.notebook_cell_update_request import NotebookCellUpdateRequest
from datadog_api_client.v1.model.notebook_cell_update_request_attributes import NotebookCellUpdateRequestAttributes
globals()["NotebookCellCreateRequest"] = NotebookCellCreateRequest
globals()["NotebookCellResourceType"] = NotebookCellResourceType
globals()["NotebookCellUpdateRequest"] = NotebookCellUpdateRequest
globals()["NotebookCellUpdateRequestAttributes"] = NotebookCellUpdateRequestAttributes
class NotebookUpdateCell(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {}
@cached_property
def discriminator():
return None
attribute_map = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NotebookUpdateCell - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
attributes (NotebookCellUpdateRequestAttributes): [optional] # noqa: E501
type (NotebookCellResourceType): [optional] # noqa: E501
id (str): Notebook cell ID.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_spec_property_naming": _spec_property_naming,
"_configuration": _configuration,
"_visited_composed_classes": self._visited_composed_classes,
}
required_args = {}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
"anyOf": [],
"allOf": [],
"oneOf": [
NotebookCellCreateRequest,
NotebookCellUpdateRequest,
],
}
| 40.969565 | 119 | 0.609148 |
import re
import sys
from datadog_api_client.v1.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.notebook_cell_create_request import NotebookCellCreateRequest
from datadog_api_client.v1.model.notebook_cell_resource_type import NotebookCellResourceType
from datadog_api_client.v1.model.notebook_cell_update_request import NotebookCellUpdateRequest
from datadog_api_client.v1.model.notebook_cell_update_request_attributes import NotebookCellUpdateRequestAttributes
globals()["NotebookCellCreateRequest"] = NotebookCellCreateRequest
globals()["NotebookCellResourceType"] = NotebookCellResourceType
globals()["NotebookCellUpdateRequest"] = NotebookCellUpdateRequest
globals()["NotebookCellUpdateRequestAttributes"] = NotebookCellUpdateRequestAttributes
class NotebookUpdateCell(ModelComposed):
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
)
_nullable = False
@cached_property
def openapi_types():
return {}
@cached_property
def discriminator():
return None
attribute_map = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_spec_property_naming": _spec_property_naming,
"_configuration": _configuration,
"_visited_composed_classes": self._visited_composed_classes,
}
required_args = {}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
"anyOf": [],
"allOf": [],
"oneOf": [
NotebookCellCreateRequest,
NotebookCellUpdateRequest,
],
}
| true | true |
1c2efb5426c722d309f0b0ac145a4d76849b91f0 | 3,310 | py | Python | infra/bots/recipes/sync_and_compile.py | mohad12211/skia | 042a53aa094715e031ebad4da072524ace316744 | [
"BSD-3-Clause"
] | 3 | 2019-03-07T17:01:23.000Z | 2021-07-03T22:01:36.000Z | infra/bots/recipes/sync_and_compile.py | mohad12211/skia | 042a53aa094715e031ebad4da072524ace316744 | [
"BSD-3-Clause"
] | 2 | 2021-09-10T03:50:52.000Z | 2021-09-10T07:10:19.000Z | infra/bots/recipes/sync_and_compile.py | mohad12211/skia | 042a53aa094715e031ebad4da072524ace316744 | [
"BSD-3-Clause"
] | 14 | 2015-07-17T17:23:53.000Z | 2020-07-06T21:06:57.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming compile.
DEPS = [
'build',
'checkout',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
def RunSteps(api):
api.vars.setup()
# Check out code.
bot_update = True
checkout_root = api.checkout.default_checkout_root
checkout_chromium = False
checkout_flutter = False
flutter_android = False
if 'NoDEPS' in api.properties['buildername']:
bot_update = False
checkout_root = api.path['start_dir']
if 'CommandBuffer' in api.vars.builder_name:
checkout_chromium = True
if 'Flutter' in api.vars.builder_name:
checkout_root = checkout_root.join('flutter')
checkout_flutter = True
if 'Android' in api.vars.builder_name:
flutter_android = True
if bot_update:
api.checkout.bot_update(
checkout_root=checkout_root,
checkout_chromium=checkout_chromium,
checkout_flutter=checkout_flutter,
flutter_android=flutter_android)
else:
api.checkout.git(checkout_root=checkout_root)
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
out_dir = checkout_root.join(
'skia', 'out', api.vars.builder_name, api.vars.configuration)
if 'Flutter' in api.vars.builder_name:
out_dir = checkout_root.join('src', 'out', 'android_release')
try:
api.build(checkout_root=checkout_root, out_dir=out_dir)
# TODO(borenet): Move this out of the try/finally.
dst = api.vars.swarming_out_dir
api.build.copy_build_products(out_dir=out_dir, dst=dst)
if 'SKQP' in api.vars.extra_tokens:
wlist = checkout_root.join(
'skia', 'infra','cts', 'whitelist_devices.json')
api.file.copy('copy whitelist', wlist, dst)
finally:
if 'Win' in api.vars.builder_cfg.get('os', ''):
api.python.inline(
name='cleanup',
program='''
# [VPYTHON:BEGIN]
# wheel: <
# name: "infra/python/wheels/psutil/${vpython_platform}"
# version: "version:5.4.7"
# >
# [VPYTHON:END]
import psutil
for p in psutil.process_iter():
try:
if p.name in ('mspdbsrv.exe', 'vctip.exe', 'cl.exe', 'link.exe'):
p.kill()
except psutil._error.AccessDenied:
pass
''',
infra_step=True,
venv=True)
api.run.check_failure()
TEST_BUILDERS = [
'Build-Debian9-Clang-universal-devrel-Android_SKQP',
'Build-Debian9-Clang-arm-Release-Flutter_Android',
'Build-Mac-Clang-x86_64-Debug-CommandBuffer',
'Build-Win10-Clang-x86_64-Release-NoDEPS',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
if 'Win' in builder:
test += api.platform('win', 64)
yield test
| 27.131148 | 73 | 0.667976 |
DEPS = [
'build',
'checkout',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
def RunSteps(api):
api.vars.setup()
bot_update = True
checkout_root = api.checkout.default_checkout_root
checkout_chromium = False
checkout_flutter = False
flutter_android = False
if 'NoDEPS' in api.properties['buildername']:
bot_update = False
checkout_root = api.path['start_dir']
if 'CommandBuffer' in api.vars.builder_name:
checkout_chromium = True
if 'Flutter' in api.vars.builder_name:
checkout_root = checkout_root.join('flutter')
checkout_flutter = True
if 'Android' in api.vars.builder_name:
flutter_android = True
if bot_update:
api.checkout.bot_update(
checkout_root=checkout_root,
checkout_chromium=checkout_chromium,
checkout_flutter=checkout_flutter,
flutter_android=flutter_android)
else:
api.checkout.git(checkout_root=checkout_root)
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
out_dir = checkout_root.join(
'skia', 'out', api.vars.builder_name, api.vars.configuration)
if 'Flutter' in api.vars.builder_name:
out_dir = checkout_root.join('src', 'out', 'android_release')
try:
api.build(checkout_root=checkout_root, out_dir=out_dir)
dst = api.vars.swarming_out_dir
api.build.copy_build_products(out_dir=out_dir, dst=dst)
if 'SKQP' in api.vars.extra_tokens:
wlist = checkout_root.join(
'skia', 'infra','cts', 'whitelist_devices.json')
api.file.copy('copy whitelist', wlist, dst)
finally:
if 'Win' in api.vars.builder_cfg.get('os', ''):
api.python.inline(
name='cleanup',
program='''
# [VPYTHON:BEGIN]
# wheel: <
# name: "infra/python/wheels/psutil/${vpython_platform}"
# version: "version:5.4.7"
# >
# [VPYTHON:END]
import psutil
for p in psutil.process_iter():
try:
if p.name in ('mspdbsrv.exe', 'vctip.exe', 'cl.exe', 'link.exe'):
p.kill()
except psutil._error.AccessDenied:
pass
''',
infra_step=True,
venv=True)
api.run.check_failure()
TEST_BUILDERS = [
'Build-Debian9-Clang-universal-devrel-Android_SKQP',
'Build-Debian9-Clang-arm-Release-Flutter_Android',
'Build-Mac-Clang-x86_64-Debug-CommandBuffer',
'Build-Win10-Clang-x86_64-Release-NoDEPS',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
if 'Win' in builder:
test += api.platform('win', 64)
yield test
| true | true |
1c2efb9c64196093b7fed95dedf803456c33d37b | 522 | py | Python | src/flash/demo_lcd.py | gr4viton/esp_fun | 28f74ce50c16555705ecee97336ae28c7fd86704 | [
"MIT"
] | 1 | 2020-02-23T22:28:32.000Z | 2020-02-23T22:28:32.000Z | src/flash/demo_lcd.py | gr4viton/esp_fun | 28f74ce50c16555705ecee97336ae28c7fd86704 | [
"MIT"
] | null | null | null | src/flash/demo_lcd.py | gr4viton/esp_fun | 28f74ce50c16555705ecee97336ae28c7fd86704 | [
"MIT"
] | null | null | null | import time
import machine
from .esp8266_i2c_lcd import I2cLcd
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4), freq=400000)
lcd = I2cLcd(i2c, 63, 2, 16)
lcd.clear()
ls = ['Eat', 'Sleep', 'Rave', 'Repeat']
i = 1
while True:
if i % 2:
index = int(i / 2) % 4
txt = ls[index]
lcd.putstr(txt)
lcd.backlight_on()
period = 0.5
else:
txt = ""
lcd.clear()
lcd.backlight_off()
period = 0.2
print(i, txt)
time.sleep(period)
i += 1
| 18.642857 | 66 | 0.538314 | import time
import machine
from .esp8266_i2c_lcd import I2cLcd
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4), freq=400000)
lcd = I2cLcd(i2c, 63, 2, 16)
lcd.clear()
ls = ['Eat', 'Sleep', 'Rave', 'Repeat']
i = 1
while True:
if i % 2:
index = int(i / 2) % 4
txt = ls[index]
lcd.putstr(txt)
lcd.backlight_on()
period = 0.5
else:
txt = ""
lcd.clear()
lcd.backlight_off()
period = 0.2
print(i, txt)
time.sleep(period)
i += 1
| true | true |
1c2efcf844b577545dbab2aa4f2ce84e15c4ed03 | 1,754 | py | Python | leetcode/easy/logger-rate-limiter.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 8 | 2019-05-14T12:50:29.000Z | 2022-03-01T09:08:27.000Z | leetcode/easy/logger-rate-limiter.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 46 | 2019-03-24T20:59:29.000Z | 2019-04-09T16:28:43.000Z | leetcode/easy/logger-rate-limiter.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 1 | 2022-01-28T12:46:29.000Z | 2022-01-28T12:46:29.000Z | """
Design a logger system that receive stream of messages along with its timestamps,
each message should be printed if and only if it is not printed in the last 10 seconds.
Given a message and a timestamp (in seconds granularity),
return true if the message should be printed in the given timestamp, otherwise returns false.
It is possible that several messages arrive roughly at the same time.
Example:
Logger logger = new Logger();
// logging string "foo" at timestamp 1
logger.shouldPrintMessage(1, "foo"); returns true;
// logging string "bar" at timestamp 2
logger.shouldPrintMessage(2,"bar"); returns true;
// logging string "foo" at timestamp 3
logger.shouldPrintMessage(3,"foo"); returns false;
// logging string "bar" at timestamp 8
logger.shouldPrintMessage(8,"bar"); returns false;
// logging string "foo" at timestamp 10
logger.shouldPrintMessage(10,"foo"); returns false;
// logging string "foo" at timestamp 11
logger.shouldPrintMessage(11,"foo"); returns true;
"""
class Logger:
def __init__(self):
"""
Initialize your data structure here.
"""
self.store = {}
def shouldPrintMessage(self, timestamp, message):
"""
Returns true if the message should be printed in the given timestamp, otherwise returns false.
If this method returns false, the message will not be printed.
The timestamp is in seconds granularity.
"""
last_log = self.store.get(message)
if last_log is None or timestamp - last_log >= 10:
self.store[message] = timestamp
return True
return False
# Your Logger object will be instantiated and called as such:
# obj = Logger()
# param_1 = obj.shouldPrintMessage(timestamp,message)
| 29.728814 | 102 | 0.704105 |
class Logger:
def __init__(self):
self.store = {}
def shouldPrintMessage(self, timestamp, message):
last_log = self.store.get(message)
if last_log is None or timestamp - last_log >= 10:
self.store[message] = timestamp
return True
return False
| true | true |
1c2efd68e4e2211b4edeea61fbb1535efeada2c3 | 1,726 | py | Python | configs/cityscapes256.py | TimK1998/SemanticSynthesisForScoreBasedModels | b575ab646dd5a599d173b44a3585429082d0620d | [
"Apache-2.0"
] | null | null | null | configs/cityscapes256.py | TimK1998/SemanticSynthesisForScoreBasedModels | b575ab646dd5a599d173b44a3585429082d0620d | [
"Apache-2.0"
] | null | null | null | configs/cityscapes256.py | TimK1998/SemanticSynthesisForScoreBasedModels | b575ab646dd5a599d173b44a3585429082d0620d | [
"Apache-2.0"
] | null | null | null | import ml_collections
import torch
def get_default_configs():
config = ml_collections.ConfigDict()
# training
config.training = training = ml_collections.ConfigDict()
config.training.batch_size = 8
training.epochs = 2000
# Time in epochs
training.checkpoint_save_freq = 50
training.sampling_freq = 25
# Time in steps
training.log_freq = 50
training.eval_freq = 5000
training.snapshot_sampling = True
training.reduce_mean = False
# sampling
config.sampling = sampling = ml_collections.ConfigDict()
sampling.n_steps_each = 1
sampling.noise_removal = True
sampling.probability_flow = False
sampling.snr = 0.1
sampling.batch_size = 1
sampling.sampling_height = 256
sampling.sampling_width = 512
sampling.sem_seg_scale = 0.02
# data
config.data = data = ml_collections.ConfigDict()
data.dataset = 'cityscapes256'
data.image_size = 256
data.random_flip = False
data.n_channels = 3
data.n_labels = 20
data.crop_to_square = False
# model
config.model = model = ml_collections.ConfigDict()
model.sigma_min = 0.01
model.sigma_max = 338
model.n_scales = 2000
model.beta_min = 0.1
model.beta_max = 20.
model.dropout = 0.
model.embedding_type = 'fourier'
model.bilinear = True
model.conditional = True
# optimization
config.optim = optim = ml_collections.ConfigDict()
optim.weight_decay = 0
optim.lr = 2e-4
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = 5000
optim.grad_clip = 1.
optim.mixed_prec = True
config.device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
return config
| 26.151515 | 96 | 0.680185 | import ml_collections
import torch
def get_default_configs():
config = ml_collections.ConfigDict()
config.training = training = ml_collections.ConfigDict()
config.training.batch_size = 8
training.epochs = 2000
training.checkpoint_save_freq = 50
training.sampling_freq = 25
training.log_freq = 50
training.eval_freq = 5000
training.snapshot_sampling = True
training.reduce_mean = False
config.sampling = sampling = ml_collections.ConfigDict()
sampling.n_steps_each = 1
sampling.noise_removal = True
sampling.probability_flow = False
sampling.snr = 0.1
sampling.batch_size = 1
sampling.sampling_height = 256
sampling.sampling_width = 512
sampling.sem_seg_scale = 0.02
config.data = data = ml_collections.ConfigDict()
data.dataset = 'cityscapes256'
data.image_size = 256
data.random_flip = False
data.n_channels = 3
data.n_labels = 20
data.crop_to_square = False
config.model = model = ml_collections.ConfigDict()
model.sigma_min = 0.01
model.sigma_max = 338
model.n_scales = 2000
model.beta_min = 0.1
model.beta_max = 20.
model.dropout = 0.
model.embedding_type = 'fourier'
model.bilinear = True
model.conditional = True
config.optim = optim = ml_collections.ConfigDict()
optim.weight_decay = 0
optim.lr = 2e-4
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = 5000
optim.grad_clip = 1.
optim.mixed_prec = True
config.device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
return config
| true | true |
1c2efdac056e4002d95567fded27a1c73c74ebec | 653 | py | Python | src/settlers/urls.py | dakrauth/django-settlers | 3754296ee979a95fbd5885964cc0c1bfe301a3a0 | [
"MIT"
] | null | null | null | src/settlers/urls.py | dakrauth/django-settlers | 3754296ee979a95fbd5885964cc0c1bfe301a3a0 | [
"MIT"
] | null | null | null | src/settlers/urls.py | dakrauth/django-settlers | 3754296ee979a95fbd5885964cc0c1bfe301a3a0 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'settlers'
urlpatterns = [
path('', views.ListingView.as_view(), name='listing'),
path('api/<int:pk>/', views.api, name='api'),
path('demo/', views.GameDemoView.as_view(), name='demo'),
path('random/', views.RandomView.as_view(), name='random'),
path('new/', views.NewView.as_view(), name='new'),
path('seafarers/', views.SeafarersView.as_view(), name='new'),
path('<int:pk>/', views.GameDetailView.as_view(), name='detail'),
path('<int:pk>/data/', views.game_state, name='detail-data'),
path('<int:pk>/email/', views.game_email, name='detail-email')
]
| 34.368421 | 69 | 0.646248 | from django.urls import path
from . import views
app_name = 'settlers'
urlpatterns = [
path('', views.ListingView.as_view(), name='listing'),
path('api/<int:pk>/', views.api, name='api'),
path('demo/', views.GameDemoView.as_view(), name='demo'),
path('random/', views.RandomView.as_view(), name='random'),
path('new/', views.NewView.as_view(), name='new'),
path('seafarers/', views.SeafarersView.as_view(), name='new'),
path('<int:pk>/', views.GameDetailView.as_view(), name='detail'),
path('<int:pk>/data/', views.game_state, name='detail-data'),
path('<int:pk>/email/', views.game_email, name='detail-email')
]
| true | true |
1c2efdf299dfd0aeabb652193373d0ccc20decac | 773 | py | Python | django_comments_xtd/urls.py | lyoniionly/django-comments-xtd | bc62a7359b9b460185e0fe4a7a1958bc9ef5599c | [
"BSD-2-Clause"
] | null | null | null | django_comments_xtd/urls.py | lyoniionly/django-comments-xtd | bc62a7359b9b460185e0fe4a7a1958bc9ef5599c | [
"BSD-2-Clause"
] | null | null | null | django_comments_xtd/urls.py | lyoniionly/django-comments-xtd | bc62a7359b9b460185e0fe4a7a1958bc9ef5599c | [
"BSD-2-Clause"
] | null | null | null | #-*- coding: utf-8 -*-
from django import VERSION as DJANGO_VERSION
if DJANGO_VERSION[0:2] < (1, 4):
from django.conf.urls.defaults import include, patterns, url
else:
from django.conf.urls import include, patterns, url
from django.views import generic
from django_comments_xtd import views, models
from django_comments_xtd.conf import settings
urlpatterns = patterns('',
url(r'', include("django.contrib.comments.urls")),
url(r'^sent/$', views.sent, name='comments-xtd-sent'),
url(r'^confirm/(?P<key>[^/]+)$', views.confirm, name='comments-xtd-confirm'),
)
if settings.COMMENTS_XTD_MAX_THREAD_LEVEL > 0:
urlpatterns += patterns("",
url(r'^reply/(?P<cid>[\d]+)$', views.reply, name='comments-xtd-reply'),
)
| 30.92 | 83 | 0.668823 |
from django import VERSION as DJANGO_VERSION
if DJANGO_VERSION[0:2] < (1, 4):
from django.conf.urls.defaults import include, patterns, url
else:
from django.conf.urls import include, patterns, url
from django.views import generic
from django_comments_xtd import views, models
from django_comments_xtd.conf import settings
urlpatterns = patterns('',
url(r'', include("django.contrib.comments.urls")),
url(r'^sent/$', views.sent, name='comments-xtd-sent'),
url(r'^confirm/(?P<key>[^/]+)$', views.confirm, name='comments-xtd-confirm'),
)
if settings.COMMENTS_XTD_MAX_THREAD_LEVEL > 0:
urlpatterns += patterns("",
url(r'^reply/(?P<cid>[\d]+)$', views.reply, name='comments-xtd-reply'),
)
| true | true |
1c2efee9b1418b6bb88e9e69777335068e1783f5 | 1,012 | py | Python | model.py | qfuggett/people-manager-flask | 97511e14c26a90df5b3dc2117c504c7572532761 | [
"Unlicense"
] | null | null | null | model.py | qfuggett/people-manager-flask | 97511e14c26a90df5b3dc2117c504c7572532761 | [
"Unlicense"
] | null | null | null | model.py | qfuggett/people-manager-flask | 97511e14c26a90df5b3dc2117c504c7572532761 | [
"Unlicense"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
import datetime
db = SQLAlchemy()
class User(db.Model):
__tablename__ = "users"
user_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
email = db.Column(db.String, nullable=False)
birthday = db.Column(db.Date, nullable=False,
default=datetime.date(1923, 10, 16))
zip_code = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'<User user_id={self.user_id} name={self.name} email={self.email} birthday={self.birthday} zip_code={self.zip_code}'
def connect_to_db(flask_app, db_uri='postgresql:///people-flask', echo=True):
flask_app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
flask_app.config['SQLALCHEMY_ECHO'] = echo
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = flask_app
db.init_app(flask_app)
print('Connected to the db!')
if __name__ == "__main__":
from server import app
| 28.914286 | 132 | 0.699605 | from flask_sqlalchemy import SQLAlchemy
import datetime
db = SQLAlchemy()
class User(db.Model):
__tablename__ = "users"
user_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
email = db.Column(db.String, nullable=False)
birthday = db.Column(db.Date, nullable=False,
default=datetime.date(1923, 10, 16))
zip_code = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'<User user_id={self.user_id} name={self.name} email={self.email} birthday={self.birthday} zip_code={self.zip_code}'
def connect_to_db(flask_app, db_uri='postgresql:///people-flask', echo=True):
flask_app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
flask_app.config['SQLALCHEMY_ECHO'] = echo
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = flask_app
db.init_app(flask_app)
print('Connected to the db!')
if __name__ == "__main__":
from server import app
| true | true |
1c2eff32b3ed455e7d06f354d6a40def7e71a731 | 2,722 | py | Python | redash/handlers/favorites.py | frextrite/redash | 74beed80d20d858b51b5560e7984b20d5d2c874e | [
"BSD-2-Clause"
] | 8 | 2019-05-05T10:33:43.000Z | 2021-07-14T11:21:52.000Z | redash/handlers/favorites.py | frextrite/redash | 74beed80d20d858b51b5560e7984b20d5d2c874e | [
"BSD-2-Clause"
] | 10 | 2017-10-17T09:17:53.000Z | 2019-12-05T07:13:41.000Z | redash/handlers/favorites.py | tradingfoe/redash-clone | 94065b8dce0e27f6f40a7adc2b99e078b03115b3 | [
"BSD-2-Clause"
] | 15 | 2019-06-29T13:58:00.000Z | 2022-02-27T14:57:03.000Z | from flask import request
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.base import (BaseResource,
get_object_or_404, paginate)
from redash.permissions import require_access, view_only
class QueryFavoriteResource(BaseResource):
def post(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
fav = models.Favorite(org_id=self.current_org.id, object=query, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in e.message:
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
def delete(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
models.Favorite.query.filter(
models.Favorite.object_id == query_id,
models.Favorite.object_type == u'Query',
models.Favorite.user == self.current_user,
).delete()
models.db.session.commit()
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
class DashboardFavoriteResource(BaseResource):
def post(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
fav = models.Favorite(org_id=self.current_org.id, object=dashboard, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in e.message:
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
def delete(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
models.Favorite.query.filter(models.Favorite.object == dashboard, models.Favorite.user == self.current_user).delete()
models.db.session.commit()
self.record_event({
'action': 'unfavorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
| 34.455696 | 125 | 0.621234 | from flask import request
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.base import (BaseResource,
get_object_or_404, paginate)
from redash.permissions import require_access, view_only
class QueryFavoriteResource(BaseResource):
def post(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
fav = models.Favorite(org_id=self.current_org.id, object=query, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in e.message:
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
def delete(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
models.Favorite.query.filter(
models.Favorite.object_id == query_id,
models.Favorite.object_type == u'Query',
models.Favorite.user == self.current_user,
).delete()
models.db.session.commit()
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
class DashboardFavoriteResource(BaseResource):
def post(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
fav = models.Favorite(org_id=self.current_org.id, object=dashboard, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in e.message:
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
def delete(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
models.Favorite.query.filter(models.Favorite.object == dashboard, models.Favorite.user == self.current_user).delete()
models.db.session.commit()
self.record_event({
'action': 'unfavorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
| true | true |
1c2eff74b8846d1e08285bd192df1f547ecba4fe | 5,323 | py | Python | userbot/modules/misc.py | ronaldyganteng/NightCore | 81c2172996248bb8b4c016222a418e405865e989 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/misc.py | ronaldyganteng/NightCore | 81c2172996248bb8b4c016222a418e405865e989 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/misc.py | ronaldyganteng/NightCore | 81c2172996248bb8b4c016222a418e405865e989 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# You can find misc modules, which dont fit in anything xD
""" Userbot module for other small commands. """
import io
import sys
from os import execl
from random import randint
from time import sleep
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from userbot.events import register
from userbot.utils import time_formatter
@register(outgoing=True, pattern=r"^\.random")
async def randomise(items):
""" For .random command, get a random item from the list of items. """
itemo = (items.text[8:]).split()
if len(itemo) < 2:
return await items.edit(
"`2 or more items are required! Check .help random for more info.`"
)
index = randint(1, len(itemo) - 1)
await items.edit(
"**Query: **\n`" + items.text[8:] + "`\n**Output: **\n`" + itemo[index] + "`"
)
@register(outgoing=True, pattern=r"^\.sleep ([0-9]+)$")
async def sleepybot(time):
""" For .sleep command, let the userbot snooze for a few second. """
counter = int(time.pattern_match.group(1))
await time.edit("`I am sulking and snoozing...`")
if BOTLOG:
str_counter = time_formatter(counter)
await time.client.send_message(
BOTLOG_CHATID,
f"You put the bot to sleep for {str_counter}.",
)
sleep(counter)
await time.edit("`OK, I'm awake now.`")
@register(outgoing=True, pattern=r"^\.shutdown$")
async def killthebot(event):
""" For .shutdown command, shut the bot down."""
await event.edit("`Goodbye...`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n" "Bot shut down")
await bot.disconnect()
@register(outgoing=True, pattern=r"^\.restart$")
async def killdabot(event):
await event.edit("`*i would be back in a moment*`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n" "Bot Restarted")
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
@register(outgoing=True, pattern=r"^\.readme$")
async def reedme(e):
await e.edit(
"Here's something for you to read:\n"
"\n[NightCore's README.md file](https://github.com/IrhamFadzillah/NightCore/blob/master/README.md)"
"\n[Setup Guide - Basic](https://telegra.ph/How-to-host-a-Telegram-Userbot-11-02)"
"\n[Setup Guide - Google Drive](https://telegra.ph/How-To-Setup-Google-Drive-04-03)"
"\n[Setup Guide - LastFM Module](https://telegra.ph/How-to-set-up-LastFM-module-for-Paperplane-userbot-11-02)"
"\n[Setup Guide - How to get Deezer ARL TOKEN](https://notabug.org/RemixDevs/DeezloaderRemix/wiki/Login+via+userToken)"
"\n[Special - Note](https://telegra.ph/Special-Note-11-02)"
)
# Copyright (c) Gegham Zakaryan | 2019
@register(outgoing=True, pattern=r"^\.repeat (.*)")
async def repeat(rep):
cnt, txt = rep.pattern_match.group(1).split(" ", 1)
replyCount = int(cnt)
toBeRepeated = txt
replyText = toBeRepeated + "\n"
for i in range(0, replyCount - 1):
replyText += toBeRepeated + "\n"
await rep.edit(replyText)
@register(outgoing=True, pattern=r"^\.repo$")
async def repo_is_here(wannasee):
""" For .repo command, just returns the repo URL. """
await wannasee.edit(
"My Repo: [NightCore](https://github.com/IrhamFadzillah/NightCore)\nOwner: [Irham](https://t.me/StayWithMe69)\nSupport: [Group](https://t.me/NightCoreUserbot)"
)
@register(outgoing=True, pattern=r"^\.raw$")
async def raw(event):
the_real_message = None
reply_to_id = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
the_real_message = previous_message.stringify()
reply_to_id = event.reply_to_msg_id
else:
the_real_message = event.stringify()
reply_to_id = event.message.id
with io.BytesIO(str.encode(the_real_message)) as out_file:
out_file.name = "raw_message_data.txt"
await event.edit("`Check the userbot log for the decoded message data !!`")
await event.client.send_file(
BOTLOG_CHATID,
out_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
caption="`Here's the decoded message data !!`",
)
CMD_HELP.update(
{
"random": ">`.random <item1> <item2> ... <itemN>`"
"\nUsage: Get a random item from the list of items.",
"sleep": ">`.sleep <seconds>`" "\nUsage: Let yours snooze for a few seconds.",
"shutdown": ">`.shutdown`" "\nUsage: Shutdown bot",
"repo": ">`.repo`" "\nUsage: Github Repo of this bot",
"readme": ">`.readme`"
"\nUsage: Provide links to setup the userbot and it's modules.",
"repeat": ">`.repeat <no> <text>`"
"\nUsage: Repeats the text for a number of times. Don't confuse this with spam tho.",
"restart": ">`.restart`" "\nUsage: Restarts the bot !!",
"raw": ">`.raw`"
"\nUsage: Get detailed JSON-like formatted data about replied message.",
}
)
| 36.458904 | 167 | 0.642871 |
import io
import sys
from os import execl
from random import randint
from time import sleep
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from userbot.events import register
from userbot.utils import time_formatter
@register(outgoing=True, pattern=r"^\.random")
async def randomise(items):
itemo = (items.text[8:]).split()
if len(itemo) < 2:
return await items.edit(
"`2 or more items are required! Check .help random for more info.`"
)
index = randint(1, len(itemo) - 1)
await items.edit(
"**Query: **\n`" + items.text[8:] + "`\n**Output: **\n`" + itemo[index] + "`"
)
@register(outgoing=True, pattern=r"^\.sleep ([0-9]+)$")
async def sleepybot(time):
counter = int(time.pattern_match.group(1))
await time.edit("`I am sulking and snoozing...`")
if BOTLOG:
str_counter = time_formatter(counter)
await time.client.send_message(
BOTLOG_CHATID,
f"You put the bot to sleep for {str_counter}.",
)
sleep(counter)
await time.edit("`OK, I'm awake now.`")
@register(outgoing=True, pattern=r"^\.shutdown$")
async def killthebot(event):
await event.edit("`Goodbye...`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n" "Bot shut down")
await bot.disconnect()
@register(outgoing=True, pattern=r"^\.restart$")
async def killdabot(event):
await event.edit("`*i would be back in a moment*`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n" "Bot Restarted")
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
@register(outgoing=True, pattern=r"^\.readme$")
async def reedme(e):
await e.edit(
"Here's something for you to read:\n"
"\n[NightCore's README.md file](https://github.com/IrhamFadzillah/NightCore/blob/master/README.md)"
"\n[Setup Guide - Basic](https://telegra.ph/How-to-host-a-Telegram-Userbot-11-02)"
"\n[Setup Guide - Google Drive](https://telegra.ph/How-To-Setup-Google-Drive-04-03)"
"\n[Setup Guide - LastFM Module](https://telegra.ph/How-to-set-up-LastFM-module-for-Paperplane-userbot-11-02)"
"\n[Setup Guide - How to get Deezer ARL TOKEN](https://notabug.org/RemixDevs/DeezloaderRemix/wiki/Login+via+userToken)"
"\n[Special - Note](https://telegra.ph/Special-Note-11-02)"
)
# Copyright (c) Gegham Zakaryan | 2019
@register(outgoing=True, pattern=r"^\.repeat (.*)")
async def repeat(rep):
cnt, txt = rep.pattern_match.group(1).split(" ", 1)
replyCount = int(cnt)
toBeRepeated = txt
replyText = toBeRepeated + "\n"
for i in range(0, replyCount - 1):
replyText += toBeRepeated + "\n"
await rep.edit(replyText)
@register(outgoing=True, pattern=r"^\.repo$")
async def repo_is_here(wannasee):
await wannasee.edit(
"My Repo: [NightCore](https://github.com/IrhamFadzillah/NightCore)\nOwner: [Irham](https://t.me/StayWithMe69)\nSupport: [Group](https://t.me/NightCoreUserbot)"
)
@register(outgoing=True, pattern=r"^\.raw$")
async def raw(event):
the_real_message = None
reply_to_id = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
the_real_message = previous_message.stringify()
reply_to_id = event.reply_to_msg_id
else:
the_real_message = event.stringify()
reply_to_id = event.message.id
with io.BytesIO(str.encode(the_real_message)) as out_file:
out_file.name = "raw_message_data.txt"
await event.edit("`Check the userbot log for the decoded message data !!`")
await event.client.send_file(
BOTLOG_CHATID,
out_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
caption="`Here's the decoded message data !!`",
)
CMD_HELP.update(
{
"random": ">`.random <item1> <item2> ... <itemN>`"
"\nUsage: Get a random item from the list of items.",
"sleep": ">`.sleep <seconds>`" "\nUsage: Let yours snooze for a few seconds.",
"shutdown": ">`.shutdown`" "\nUsage: Shutdown bot",
"repo": ">`.repo`" "\nUsage: Github Repo of this bot",
"readme": ">`.readme`"
"\nUsage: Provide links to setup the userbot and it's modules.",
"repeat": ">`.repeat <no> <text>`"
"\nUsage: Repeats the text for a number of times. Don't confuse this with spam tho.",
"restart": ">`.restart`" "\nUsage: Restarts the bot !!",
"raw": ">`.raw`"
"\nUsage: Get detailed JSON-like formatted data about replied message.",
}
)
| true | true |
1c2effdc4ff98d786d5290890c85cc3aad50030c | 5,913 | py | Python | Visualizaion/hands_association.py | DanLuoNEU/CLASP2 | 262fb1f151c14bfe3b1c452cdf65187d8caa10bd | [
"MIT"
] | 1 | 2019-11-17T21:38:54.000Z | 2019-11-17T21:38:54.000Z | Visualizaion/hands_association.py | DanLuoNEU/CLASP2 | 262fb1f151c14bfe3b1c452cdf65187d8caa10bd | [
"MIT"
] | null | null | null | Visualizaion/hands_association.py | DanLuoNEU/CLASP2 | 262fb1f151c14bfe3b1c452cdf65187d8caa10bd | [
"MIT"
] | null | null | null | # Build up association between hands and persons ID,
# depending on IOU between skeleton and person bounding boxes
# Intersection part reference: https://github.com/amdegroot/ssd.pytorch/blob/master/layers/box_utils.py#L48
# Points to improve:
# 1. same hands for two persons using IOU, they should be unique
# Dan, 09/29/2019
########## Import ##########
import os
import cv2
import json
import pickle
import numpy as np
import scipy.io as sio
from progress.bar import Bar
from numpy.core.records import fromarrays
########## Configuration ##########
file_people = 'CLASP-DATA-102419/cam09exp2_logs_fullv1.txt'
file_joints = 'data/joints_all_cam09exp2_102419.pkl'
file_save = 'data/hands_id_cam09exp2_102419.pkl'
# Load Person Detection result{'bbox':,
# 'id':,
# 'bins':
# }
# persons_joints: dictionary
# {- keys: frame
# - values: dictionary {
# ['image_name'],string of /path/to/image
# ['people'],list of joints
# }
# }
def jaccard(box_a, boxes_b):
"""Compute the jaccard overlap of one box and a list of boxes.
The jaccard overlap is simply the intersection over union of two boxes.
Here we operate on person box and skeleton boxes.
E.g.:
A n B / A U B = A n B / (area(A) + area(B) - A n B)
Args:
box_a: (list) Person bounding box, Shape: [4,]
boxes_b: (list) Skeleton bounding boxes, Shape: [num_skeletons,4]
Return:
jaccard overlap: (tensor) Shape: [boxes_b.size(0)]
"""
b_a = np.asarray(box_a)[np.newaxis,:]
b_b = np.asarray(boxes_b)
num_a = b_a.shape[0]
num_b = b_b.shape[0]
min_xy_a = np.repeat(np.expand_dims(b_a[:,:2], 1),num_b,axis=1)
min_xy_b = np.repeat(np.expand_dims(b_b[:,:2], 0),num_a,axis=0)
max_xy_a = np.repeat(np.expand_dims(b_a[:,2:], 1),num_b,axis=1)
max_xy_b = np.repeat(np.expand_dims(b_b[:,2:], 0),num_a,axis=0)
min_xy = np.maximum(min_xy_a, min_xy_b)
max_xy = np.minimum(max_xy_a, max_xy_b)
inter_xy = np.clip((max_xy - min_xy), 0, np.inf)
inter = inter_xy[:,:,0] * inter_xy[:,:,1]
area_a = np.repeat(np.expand_dims(((b_a[:, 2]-b_a[:, 0]) * (b_a[:, 3]-b_a[:, 1])), 1),num_b,axis=1)
area_b = np.repeat(np.expand_dims(((b_b[:, 2]-b_b[:, 0]) * (b_b[:, 3]-b_b[:, 1])), 0),num_a,axis=0)
union = area_a + area_b - inter
return (inter/union)[0,:]
def main():
# Load persons data
with open(file_people, 'r') as f:
# frame, id, x1,y1,x2,y2
lines = f.readlines()
persons = {'id':{}, 'bbox':{},'bins':{},'hands':{}}
for line in lines:
splitted = line.split(',')
frame_num = int(splitted[0])
pid = splitted[1]
x1 = int(splitted[2])
y1 = int(splitted[3])
x2 = int(splitted[4])
y2 = int(splitted[5])
if(frame_num not in persons['id'].keys()):
persons['id'][frame_num] = []
persons['bbox'][frame_num] = []
persons['hands'][frame_num] = []
persons['bins'][frame_num] = []
persons['id'][frame_num].append(pid)
persons['bbox'][frame_num].append([x1,y1,x2,y2])
persons['hands'][frame_num].append([])
persons['bins'][frame_num].append([])
# # Load joints estimation results, .mat file
# joints_mat = sio.loadmat(joints_path)
# skeletons = joints_mat['people'][0]
# Load joints estimation results, .pkl file
with open( file_joints, 'rb') as f:
persons_joints = pickle.load(f)
# For every frame, for every person bbox, for every skeleton
# compute IOU between person bbox and skeleton bbox
# Attach hands info to persons data
bar = Bar('Processing hands association:', max=len(persons['id']))
for frame_id in persons['id'].keys():
# Build bounding box for each skeleton
# REMEMBER to filter the (0,0) joints
bboxes_skeleton = []
if len(persons_joints[frame_id]['people']) == 0:
bar.next()
continue
for skeleton in persons_joints[frame_id]['people']:
## Avoid that (0,0) point is always the top left point
for joint in skeleton:
if joint[0] != 0 and joint[1] != 0:
x_min, x_max = joint[0],joint[0]
y_min, y_max = joint[1],joint[1]
for joint in skeleton:
if joint[0] != 0 and joint[1] != 0:
if joint[0] < x_min: x_min = joint[0]
elif joint[0] > x_max: x_max = joint[0]
if joint[1] < y_min: y_min = joint[1]
elif joint[1] > y_max: y_max = joint[1]
bboxes_skeleton.append([int(x_min), int(y_min), int(x_max), int(y_max)])
# Find the skeleton with largest IOU with the person bounding box
for ind in range(len(persons['bbox'][frame_id])):
bbox = persons['bbox'][frame_id][ind]
# compute IOU
IOUs = jaccard(bbox, bboxes_skeleton)
skeleton_id = np.argmax(IOUs)
if IOUs[skeleton_id] != 0:
persons['hands'][frame_id][ind]=(persons_joints[frame_id]['people'][skeleton_id][[4,7]]).astype(int)
bar.next()
bar.finish()
# # Test if hands in person bounding box
# for frame_id in persons['id'].keys():
# print(persons['hands'][frame_id],persons['bbox'][frame_id])
# Save using pickle, success
with open(file_save,'wb') as f:
pickle.dump(persons, f)
# # Test if save the right file
# with open(file_save,'r') as f:
# persons = pickle.load(f)
print("Well Done!")
if __name__ == "__main__":
main() | 39.684564 | 116 | 0.562659 |
nd_dims(b_b[:,:2], 0),num_a,axis=0)
max_xy_a = np.repeat(np.expand_dims(b_a[:,2:], 1),num_b,axis=1)
max_xy_b = np.repeat(np.expand_dims(b_b[:,2:], 0),num_a,axis=0)
min_xy = np.maximum(min_xy_a, min_xy_b)
max_xy = np.minimum(max_xy_a, max_xy_b)
inter_xy = np.clip((max_xy - min_xy), 0, np.inf)
inter = inter_xy[:,:,0] * inter_xy[:,:,1]
area_a = np.repeat(np.expand_dims(((b_a[:, 2]-b_a[:, 0]) * (b_a[:, 3]-b_a[:, 1])), 1),num_b,axis=1)
area_b = np.repeat(np.expand_dims(((b_b[:, 2]-b_b[:, 0]) * (b_b[:, 3]-b_b[:, 1])), 0),num_a,axis=0)
union = area_a + area_b - inter
return (inter/union)[0,:]
def main():
with open(file_people, 'r') as f:
lines = f.readlines()
persons = {'id':{}, 'bbox':{},'bins':{},'hands':{}}
for line in lines:
splitted = line.split(',')
frame_num = int(splitted[0])
pid = splitted[1]
x1 = int(splitted[2])
y1 = int(splitted[3])
x2 = int(splitted[4])
y2 = int(splitted[5])
if(frame_num not in persons['id'].keys()):
persons['id'][frame_num] = []
persons['bbox'][frame_num] = []
persons['hands'][frame_num] = []
persons['bins'][frame_num] = []
persons['id'][frame_num].append(pid)
persons['bbox'][frame_num].append([x1,y1,x2,y2])
persons['hands'][frame_num].append([])
persons['bins'][frame_num].append([])
'rb') as f:
persons_joints = pickle.load(f)
bar = Bar('Processing hands association:', max=len(persons['id']))
for frame_id in persons['id'].keys():
bboxes_skeleton = []
if len(persons_joints[frame_id]['people']) == 0:
bar.next()
continue
for skeleton in persons_joints[frame_id]['people']:
f joint[0] != 0 and joint[1] != 0:
x_min, x_max = joint[0],joint[0]
y_min, y_max = joint[1],joint[1]
for joint in skeleton:
if joint[0] != 0 and joint[1] != 0:
if joint[0] < x_min: x_min = joint[0]
elif joint[0] > x_max: x_max = joint[0]
if joint[1] < y_min: y_min = joint[1]
elif joint[1] > y_max: y_max = joint[1]
bboxes_skeleton.append([int(x_min), int(y_min), int(x_max), int(y_max)])
for ind in range(len(persons['bbox'][frame_id])):
bbox = persons['bbox'][frame_id][ind]
IOUs = jaccard(bbox, bboxes_skeleton)
skeleton_id = np.argmax(IOUs)
if IOUs[skeleton_id] != 0:
persons['hands'][frame_id][ind]=(persons_joints[frame_id]['people'][skeleton_id][[4,7]]).astype(int)
bar.next()
bar.finish()
with open(file_save,'wb') as f:
pickle.dump(persons, f)
ne!")
if __name__ == "__main__":
main() | true | true |
1c2f00d423ac28a4cc888bde6820527261a6f12c | 541 | py | Python | app/core/migrations/0005_auto_20210404_0300.py | matelanbo/recipe-app-api | c050b0a0dc430484c6ca91588048eae0ab5c647b | [
"MIT"
] | null | null | null | app/core/migrations/0005_auto_20210404_0300.py | matelanbo/recipe-app-api | c050b0a0dc430484c6ca91588048eae0ab5c647b | [
"MIT"
] | null | null | null | app/core/migrations/0005_auto_20210404_0300.py | matelanbo/recipe-app-api | c050b0a0dc430484c6ca91588048eae0ab5c647b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2021-04-04 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='time_miniutes',
new_name='time_minutes',
),
migrations.AlterField(
model_name='recipe',
name='ingredients',
field=models.ManyToManyField(to='core.Ingredient'),
),
]
| 22.541667 | 63 | 0.573013 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='time_miniutes',
new_name='time_minutes',
),
migrations.AlterField(
model_name='recipe',
name='ingredients',
field=models.ManyToManyField(to='core.Ingredient'),
),
]
| true | true |
1c2f01fcf53219c692092a28ca321bd5e3ab2dd1 | 5,227 | py | Python | webStorm-APICloud/python_tools/Lib/sched.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/sched.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/sched.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence).
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
Event = namedtuple('Event', 'time, priority, action, argument')
class scheduler:
def __init__(self, timefunc, delayfunc):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises RuntimeError.
"""
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
return not self._queue
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
| 38.718519 | 79 | 0.647599 |
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
Event = namedtuple('Event', 'time, priority, action, argument')
class scheduler:
def __init__(self, timefunc, delayfunc):
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event
def enter(self, delay, priority, action, argument):
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
return not self._queue
def run(self):
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
if event is checked_event:
action(*argument)
delayfunc(0)
else:
heapq.heappush(q, event)
@property
def queue(self):
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
| true | true |
1c2f02aed167f8a57906e18c482a7a0d68e37add | 24,267 | py | Python | torch/ao/quantization/_dbr/auto_trace.py | xiaohanhuang/pytorch | a31aea8eaa99a5ff72b5d002c206cd68d5467a5e | [
"Intel"
] | null | null | null | torch/ao/quantization/_dbr/auto_trace.py | xiaohanhuang/pytorch | a31aea8eaa99a5ff72b5d002c206cd68d5467a5e | [
"Intel"
] | null | null | null | torch/ao/quantization/_dbr/auto_trace.py | xiaohanhuang/pytorch | a31aea8eaa99a5ff72b5d002c206cd68d5467a5e | [
"Intel"
] | 1 | 2021-12-07T12:36:25.000Z | 2021-12-07T12:36:25.000Z | import logging
from typing import Tuple, Any, List, Dict
import torch
from torch.fx.node import map_aggregate
from .quantization_state import (
AutoQuantizationState,
)
from .utils import (
trace_with_inputs,
is_leaf,
HookType,
get_torch_function_hook_type,
get_module_hook_type,
)
from .model_utils import (
pack_weights_for_functionals,
attach_scale_zp_values_to_model,
attach_op_convert_info_to_model,
)
from . import auto_trace_rewriter
logger = logging.getLogger('auto_trace')
logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.INFO)
# enabling this tanks performance, make sure to disable for benchmarking
# TODO(future PR): clean this up
enable_logging = False
# enable_logging = True
def add_auto_observation(
model : torch.nn.Module,
example_inputs: Tuple[Any],
input_dtypes: Any = (torch.float,), # must be same structure as model inputs
output_dtypes: Any = (torch.float,), # must be same structure as model outputs
) -> torch.nn.Module:
def convert_to_interception_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationPrepareTensorProxy) # type: ignore[arg-type]
else:
return x
cur_module = None
first_call = True
module_stack : List[torch.nn.Module] = []
# Counter for tensor IDs, will be modified inplace by quant state.
# This is used to track tensors from output ops to input ops. For example,
# if op_n had a tensor output with id=1, and op_n+2 had a tensor input with
# id=1, we know that the output of op_n is the input to op_n+2. Note,
# this is a list because it needs to incremented inplace.
qtensor_id = [0]
module_id_to_fqn: Dict[int, str] = {}
# Counter for global quantizeable ops, useful for intermediate activation
# logging.
global_op_idx = [0]
class QuantizationPrepareTensorProxy(torch.Tensor):
"""
An override of `torch.Tensor` to enable dynamic tracing for
quantization.
For each function with a `__torch_function__` override, this proxy does
the following for functions which need quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls `_auto_quant_state.op_prepare_before_hook`
3. executes the original function
4. calls `_auto_quant_state.op_prepare_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original function.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# to prevent printing things from going into an infinite loop
if func == torch.Tensor.__repr__:
return super().__torch_function__(func, types, args, kwargs)
if enable_logging:
logger.debug(f'__torch_function__ {str(func)} len_args {len(args)}')
nonlocal qtensor_id
nonlocal cur_module
kwargs = kwargs if kwargs else {}
# if we are in a function, the current module is always a parent
parent_module = cur_module
hook_type = get_torch_function_hook_type(parent_module, func)
if hook_type is HookType.OP_HOOKS:
qstate = parent_module._auto_quant_state # type: ignore[attr-defined]
fqn = module_id_to_fqn[id(parent_module)] if parent_module else None
if not first_call:
qstate.validate_cur_op(func)
# run "before" hook
args, kwargs = qstate.op_prepare_before_hook(
func, args, kwargs, first_call, qtensor_id, fqn, parent_module)
# forward
output = super().__torch_function__(func, types, args, kwargs)
# run "after" hook
output = qstate.op_prepare_after_hook(
func, output, args, first_call, qtensor_id, parent_module,
global_op_idx)
qstate.mark_cur_op_complete(func)
else:
output = super().__torch_function__(func, types, args, kwargs)
# TODO: is this right? Don't really understand this
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationPrepareTensorProxy)
assert output is not NotImplemented
return output
def __repr__(self):
return f'QuantizationPrepareTensorProxy({super().__repr__()})'
# TODO(future PR): add other math overrides
class QuantizationInterceptionModule(type(model)): # type: ignore[misc]
"""
An override of user defined subclass of `nn.Module` to enable
dynamic tracing for quantization.
`cur_module` keeps track of the current module in the stack.
During the fist call, an `AutoQuantizationState` object is created and
attached to each non-leaf modules which we need to check for
quantizeable operations.
We override the `__call__` function to do the following for each
module:
If the module is an op which needs quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls parent module's `._auto_quant_state.op_prepare_before_hook`
3. executes the original module forward
4. calls parent module's `_auto_quant_state.op_prepare_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
If the module can contain children ops that need quantization:
1. calls `_auto_quant_state.inputs_prepare_hook` (not implemented yet)
2. executes the original module forward
3. calls `_auto_quant_state.outputs_prepare_hook`
Otherwise, calls the original module forward.
"""
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_interception_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_interception_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
if enable_logging:
logger.debug(f"_patched_module_call: {type(self)}")
nonlocal cur_module
old_module = cur_module
cur_module = self
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
fqn = module_id_to_fqn.get(id(self), None)
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f"\nstarting fqn {fqn}")
hook_type = get_module_hook_type(parent_module, cur_module)
if hook_type is HookType.OP_HOOKS:
parent_qstate: AutoQuantizationState = \
parent_module._auto_quant_state # type: ignore[union-attr, assignment]
# before hooks
if not first_call:
parent_qstate.validate_cur_op(cur_module)
args, kwargs = parent_qstate.op_prepare_before_hook(
cur_module, args, kwargs, first_call, qtensor_id,
fqn, cur_module)
# original forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
# TODO is it correct to call_cur_module twice here?
output = parent_qstate.op_prepare_after_hook(
cur_module, output, args, first_call, qtensor_id,
cur_module, global_op_idx)
parent_qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
# TODO(future PR): add inputs io hook
cur_qstate = cur_module._auto_quant_state
cur_qstate.reset_to_new_call()
# original forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
output = cur_qstate.outputs_prepare_hook(
output, first_call, qtensor_id)
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
output = orig_module_call(self, *args, **kwargs)
# if this fp32 was inplace, make sure to set the output dtype
# back to torch.float
if hasattr(output, '_qtensor_info'):
del output._qtensor_info
else:
output = orig_module_call(self, *args, **kwargs)
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f"\nending fqn {fqn}")
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
nonlocal first_call
try:
if first_call:
# Create a list before iterating because we are adding new
# named modules inside the loop.
named_modules = list(self.named_modules())
for k, v in named_modules:
# k is the global FQN, i.e. 'foo.bar.baz'
# v is the module instance
#
# we need to associate the global FQN with SeenOp
# for modules, this is the module FQN
# for functions, this is the parent module FQN
module_id_to_fqn[id(v)] = k
has_qconfig = hasattr(v, 'qconfig') and v.qconfig is not None
if has_qconfig and not is_leaf(v):
if v is self:
# for the top level module only, specify input
# and output dtypes
v._auto_quant_state = AutoQuantizationState(
v.qconfig, input_dtypes, output_dtypes)
pass
else:
v._auto_quant_state = AutoQuantizationState(
v.qconfig)
global_op_idx[0] = 0
output = super().__call__(*new_args, **new_kwargs)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
first_call = False
model.__class__ = QuantizationInterceptionModule
# create the graph
trace_with_inputs(model, example_inputs)
return model
# TODO(future PR): add serialization support
def add_auto_convert(module : torch.nn.Module) -> torch.nn.Module:
def convert_to_dispatch_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationConvertTensorProxy) # type: ignore[arg-type]
else:
return x
module_id_to_fqn: Dict[int, str] = {}
# Counter for global quantizeable ops, useful for intermediate activation
# logging.
global_op_idx = [0]
class QuantizationConvertTensorProxy(torch.Tensor):
"""
An override of `torch.Tensor` to enable dynamic dispatch for
quantization inference.
For each function with a `__torch_fuction__` override, this proxy does
the following for functions which need quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls `_auto_quant_state.op_convert_before_hook`.
3. executes the function, with target, args and kwargs possibly modified
by (2)
4. calls `_auto_quant_state.inference_function_after_hook`.
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original function.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# to prevent printing things from going into an infinite loop
if func == torch.Tensor.__repr__:
return super().__torch_function__(func, types, args, kwargs)
kwargs = kwargs if kwargs else {}
# if we are in a function, the current module is always a parent
parent_module = cur_module
hook_type = get_torch_function_hook_type(parent_module, func)
if enable_logging:
with torch._C.DisableTorchFunction():
logger.debug(
f"__torch_function__ {func} " +
f"hook_type {hook_type} " +
# f"arg_types {[type(arg) for arg in args]}) " +
f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]}")
if hook_type is HookType.OP_HOOKS:
qstate: AutoQuantizationState = parent_module._auto_quant_state # type: ignore[union-attr]
# before hooks
qstate.validate_cur_op(func)
func, args, kwargs = qstate.op_convert_before_hook(
func, args, kwargs, parent_module) # type: ignore[arg-type]
# forward
output = super().__torch_function__(func, types, args, kwargs)
# after hooks
output = qstate.op_convert_after_hook(
func, output, global_op_idx)
qstate.mark_cur_op_complete(func)
elif hook_type is HookType.ARG_DEQUANTS:
# disabling torch function to prevent infinite recursion on
# getset
# TODO(future PR): handle more dtypes
with torch._C.DisableTorchFunction():
new_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.is_quantized:
new_args.append(arg.dequantize())
else:
new_args.append(arg)
args = tuple(new_args)
output = super().__torch_function__(func, types, args, kwargs)
else: # HookType.NONE
output = super().__torch_function__(func, types, args, kwargs)
# TODO: is this right? Don't really understand this
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationConvertTensorProxy)
assert output is not NotImplemented
if enable_logging:
out_dtype = None
if isinstance(output, torch.Tensor):
out_dtype = output.dtype
logger.debug(f"__torch_function__ {func} out {out_dtype} end")
return output
def __repr__(self):
return f'QuantizationConvertTensorProxy({super().__repr__()})'
cur_module = None
module_stack : List[torch.nn.Module] = []
assert len(module.__class__.__bases__) == 1
class QuantizationDispatchModule(module.__class__.__bases__[0]): # type: ignore[name-defined]
"""
An override of user defined subclass of `nn.Module` to enable
dynamic tracing for quantization, after model conversion
to quantized domain.
`cur_module` keeps track of the current module in the stack.
Tensor arguments are converted to `QuantizationConvertTensorProxy`.
We override the `__call__` function to do the following for each
module:
If the module is an op which needs quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls parent module's `._auto_quant_state.op_convert_before_hook`
3. executes the original module forward
4. calls parent module's `_auto_quant_state.op_convert_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
If the module can contain children ops that need quantization:
1. calls `_auto_quant_state.inputs_convert_hook` (not implemented yet)
2. executes the original module forward
3. calls `_auto_quant_state.outputs_convert_hook`
Otherwise, calls the original module forward.
"""
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_dispatch_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_dispatch_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f"\nstarting fqn {fqn}")
nonlocal cur_module
old_module = cur_module
cur_module = self
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
hook_type = get_module_hook_type(parent_module, cur_module)
if enable_logging:
logger.debug(
f"_patched_module_call {type(self)} " +
# f"arg_types {[type(arg) for arg in args]} " +
f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]} " +
f"hook_type {hook_type}")
if hook_type is HookType.OP_HOOKS:
# before hooks
qstate: AutoQuantizationState = \
parent_module._auto_quant_state # type: ignore[union-attr, assignment]
if enable_logging:
logger.debug(qstate)
qstate.validate_cur_op(cur_module)
_, args, kwargs = qstate.op_convert_before_hook(
cur_module, args, kwargs, cur_module)
# forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
output = qstate.op_convert_after_hook(
cur_module, output, global_op_idx)
qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
cur_qstate: AutoQuantizationState = cur_module._auto_quant_state
if enable_logging:
logger.debug(cur_qstate)
cur_qstate.reset_to_new_call()
# before hooks (TODO)
# forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
output = cur_qstate.outputs_convert_hook(output)
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
# disabling torch function to prevent infinite recursion on
# getset
# TODO(future PR): handle more dtypes
with torch._C.DisableTorchFunction():
new_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.is_quantized:
dequant = arg.dequantize().as_subclass(
QuantizationConvertTensorProxy) # type: ignore[arg-type]
new_args.append(dequant)
else:
new_args.append(arg)
args = tuple(new_args)
output = orig_module_call(self, *args, **kwargs)
else:
output = orig_module_call(self, *args, **kwargs)
if enable_logging:
logger.debug(
f"_patched_module_call {type(self)} " +
# f"out {type(output)} " +
f"dtype {output.dtype if isinstance(output, torch.Tensor) else None} " +
"end")
logger.debug(f"ending fqn {fqn}\n")
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
try:
global_op_idx[0] = 0
needs_io_hooks = hasattr(self, '_auto_quant_state')
# handle module input dtype conversions
# TODO(implement)
output = super().__call__(*new_args, **new_kwargs)
# handle module output dtype conversions
if needs_io_hooks:
qstate = self._auto_quant_state
assert isinstance(qstate, AutoQuantizationState)
output = qstate.outputs_convert_hook(output)
def unwrap_proxy(a):
if isinstance(a, QuantizationConvertTensorProxy):
a.__class__ = torch.Tensor # type: ignore[assignment]
return a
output = map_aggregate(output, unwrap_proxy)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
def rewrite_for_scripting(self):
return auto_trace_rewriter.rewrite_for_scripting(self)
pack_weights_for_functionals(module)
attach_scale_zp_values_to_model(module)
attach_op_convert_info_to_model(module)
module.__class__ = QuantizationDispatchModule
return module
# AutoQuantizationState lives in parent module's _modules.
# Currently, `torch.nn.Sequential`'s forward iterates over all
# items in _modules. To avoid changing the meaning of the program, for
# now we patch the forward to ignore our quantization state.
# Note: this is a hackedy hack, before launching we should consider
# checking the fix into `torch.nn.Sequential` to avoid the patch.
def _nn_sequential_patched_forward(cls, input):
for module in cls:
if not isinstance(module, AutoQuantizationState):
input = module(input)
return input
| 43.411449 | 117 | 0.576915 | import logging
from typing import Tuple, Any, List, Dict
import torch
from torch.fx.node import map_aggregate
from .quantization_state import (
AutoQuantizationState,
)
from .utils import (
trace_with_inputs,
is_leaf,
HookType,
get_torch_function_hook_type,
get_module_hook_type,
)
from .model_utils import (
pack_weights_for_functionals,
attach_scale_zp_values_to_model,
attach_op_convert_info_to_model,
)
from . import auto_trace_rewriter
logger = logging.getLogger('auto_trace')
logging.basicConfig(level=logging.DEBUG)
enable_logging = False
def add_auto_observation(
model : torch.nn.Module,
example_inputs: Tuple[Any],
input_dtypes: Any = (torch.float,),
output_dtypes: Any = (torch.float,),
) -> torch.nn.Module:
def convert_to_interception_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationPrepareTensorProxy)
else:
return x
cur_module = None
first_call = True
module_stack : List[torch.nn.Module] = []
qtensor_id = [0]
module_id_to_fqn: Dict[int, str] = {}
global_op_idx = [0]
class QuantizationPrepareTensorProxy(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if func == torch.Tensor.__repr__:
return super().__torch_function__(func, types, args, kwargs)
if enable_logging:
logger.debug(f'__torch_function__ {str(func)} len_args {len(args)}')
nonlocal qtensor_id
nonlocal cur_module
kwargs = kwargs if kwargs else {}
parent_module = cur_module
hook_type = get_torch_function_hook_type(parent_module, func)
if hook_type is HookType.OP_HOOKS:
qstate = parent_module._auto_quant_state
fqn = module_id_to_fqn[id(parent_module)] if parent_module else None
if not first_call:
qstate.validate_cur_op(func)
args, kwargs = qstate.op_prepare_before_hook(
func, args, kwargs, first_call, qtensor_id, fqn, parent_module)
output = super().__torch_function__(func, types, args, kwargs)
output = qstate.op_prepare_after_hook(
func, output, args, first_call, qtensor_id, parent_module,
global_op_idx)
qstate.mark_cur_op_complete(func)
else:
output = super().__torch_function__(func, types, args, kwargs)
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationPrepareTensorProxy)
assert output is not NotImplemented
return output
def __repr__(self):
return f'QuantizationPrepareTensorProxy({super().__repr__()})'
# TODO(future PR): add other math overrides
class QuantizationInterceptionModule(type(model)): # type: ignore[misc]
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_interception_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_interception_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
if enable_logging:
logger.debug(f"_patched_module_call: {type(self)}")
nonlocal cur_module
old_module = cur_module
cur_module = self
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
fqn = module_id_to_fqn.get(id(self), None)
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f"\nstarting fqn {fqn}")
hook_type = get_module_hook_type(parent_module, cur_module)
if hook_type is HookType.OP_HOOKS:
parent_qstate: AutoQuantizationState = \
parent_module._auto_quant_state # type: ignore[union-attr, assignment]
# before hooks
if not first_call:
parent_qstate.validate_cur_op(cur_module)
args, kwargs = parent_qstate.op_prepare_before_hook(
cur_module, args, kwargs, first_call, qtensor_id,
fqn, cur_module)
# original forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
# TODO is it correct to call_cur_module twice here?
output = parent_qstate.op_prepare_after_hook(
cur_module, output, args, first_call, qtensor_id,
cur_module, global_op_idx)
parent_qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
# TODO(future PR): add inputs io hook
cur_qstate = cur_module._auto_quant_state
cur_qstate.reset_to_new_call()
# original forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
output = cur_qstate.outputs_prepare_hook(
output, first_call, qtensor_id)
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
output = orig_module_call(self, *args, **kwargs)
# if this fp32 was inplace, make sure to set the output dtype
# back to torch.float
if hasattr(output, '_qtensor_info'):
del output._qtensor_info
else:
output = orig_module_call(self, *args, **kwargs)
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f"\nending fqn {fqn}")
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
nonlocal first_call
try:
if first_call:
# Create a list before iterating because we are adding new
# named modules inside the loop.
named_modules = list(self.named_modules())
for k, v in named_modules:
# k is the global FQN, i.e. 'foo.bar.baz'
# v is the module instance
#
# we need to associate the global FQN with SeenOp
# for modules, this is the module FQN
# for functions, this is the parent module FQN
module_id_to_fqn[id(v)] = k
has_qconfig = hasattr(v, 'qconfig') and v.qconfig is not None
if has_qconfig and not is_leaf(v):
if v is self:
# for the top level module only, specify input
# and output dtypes
v._auto_quant_state = AutoQuantizationState(
v.qconfig, input_dtypes, output_dtypes)
pass
else:
v._auto_quant_state = AutoQuantizationState(
v.qconfig)
global_op_idx[0] = 0
output = super().__call__(*new_args, **new_kwargs)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
first_call = False
model.__class__ = QuantizationInterceptionModule
# create the graph
trace_with_inputs(model, example_inputs)
return model
# TODO(future PR): add serialization support
def add_auto_convert(module : torch.nn.Module) -> torch.nn.Module:
def convert_to_dispatch_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationConvertTensorProxy) # type: ignore[arg-type]
else:
return x
module_id_to_fqn: Dict[int, str] = {}
# Counter for global quantizeable ops, useful for intermediate activation
# logging.
global_op_idx = [0]
class QuantizationConvertTensorProxy(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# to prevent printing things from going into an infinite loop
if func == torch.Tensor.__repr__:
return super().__torch_function__(func, types, args, kwargs)
kwargs = kwargs if kwargs else {}
# if we are in a function, the current module is always a parent
parent_module = cur_module
hook_type = get_torch_function_hook_type(parent_module, func)
if enable_logging:
with torch._C.DisableTorchFunction():
logger.debug(
f"__torch_function__ {func} " +
f"hook_type {hook_type} " +
# f"arg_types {[type(arg) for arg in args]}) " +
f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]}")
if hook_type is HookType.OP_HOOKS:
qstate: AutoQuantizationState = parent_module._auto_quant_state # type: ignore[union-attr]
# before hooks
qstate.validate_cur_op(func)
func, args, kwargs = qstate.op_convert_before_hook(
func, args, kwargs, parent_module) # type: ignore[arg-type]
# forward
output = super().__torch_function__(func, types, args, kwargs)
# after hooks
output = qstate.op_convert_after_hook(
func, output, global_op_idx)
qstate.mark_cur_op_complete(func)
elif hook_type is HookType.ARG_DEQUANTS:
# disabling torch function to prevent infinite recursion on
# getset
# TODO(future PR): handle more dtypes
with torch._C.DisableTorchFunction():
new_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.is_quantized:
new_args.append(arg.dequantize())
else:
new_args.append(arg)
args = tuple(new_args)
output = super().__torch_function__(func, types, args, kwargs)
else: # HookType.NONE
output = super().__torch_function__(func, types, args, kwargs)
# TODO: is this right? Don't really understand this
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationConvertTensorProxy)
assert output is not NotImplemented
if enable_logging:
out_dtype = None
if isinstance(output, torch.Tensor):
out_dtype = output.dtype
logger.debug(f"__torch_function__ {func} out {out_dtype} end")
return output
def __repr__(self):
return f'QuantizationConvertTensorProxy({super().__repr__()})'
cur_module = None
module_stack : List[torch.nn.Module] = []
assert len(module.__class__.__bases__) == 1
class QuantizationDispatchModule(module.__class__.__bases__[0]):
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_dispatch_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_dispatch_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f"\nstarting fqn {fqn}")
nonlocal cur_module
old_module = cur_module
cur_module = self
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
hook_type = get_module_hook_type(parent_module, cur_module)
if enable_logging:
logger.debug(
f"_patched_module_call {type(self)} " +
f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]} " +
f"hook_type {hook_type}")
if hook_type is HookType.OP_HOOKS:
qstate: AutoQuantizationState = \
parent_module._auto_quant_state
if enable_logging:
logger.debug(qstate)
qstate.validate_cur_op(cur_module)
_, args, kwargs = qstate.op_convert_before_hook(
cur_module, args, kwargs, cur_module)
output = orig_module_call(self, *args, **kwargs)
output = qstate.op_convert_after_hook(
cur_module, output, global_op_idx)
qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
cur_qstate: AutoQuantizationState = cur_module._auto_quant_state
if enable_logging:
logger.debug(cur_qstate)
cur_qstate.reset_to_new_call()
output = orig_module_call(self, *args, **kwargs)
output = cur_qstate.outputs_convert_hook(output)
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
with torch._C.DisableTorchFunction():
new_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.is_quantized:
dequant = arg.dequantize().as_subclass(
QuantizationConvertTensorProxy)
new_args.append(dequant)
else:
new_args.append(arg)
args = tuple(new_args)
output = orig_module_call(self, *args, **kwargs)
else:
output = orig_module_call(self, *args, **kwargs)
if enable_logging:
logger.debug(
f"_patched_module_call {type(self)} " +
f"dtype {output.dtype if isinstance(output, torch.Tensor) else None} " +
"end")
logger.debug(f"ending fqn {fqn}\n")
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward
try:
global_op_idx[0] = 0
needs_io_hooks = hasattr(self, '_auto_quant_state')
output = super().__call__(*new_args, **new_kwargs)
if needs_io_hooks:
qstate = self._auto_quant_state
assert isinstance(qstate, AutoQuantizationState)
output = qstate.outputs_convert_hook(output)
def unwrap_proxy(a):
if isinstance(a, QuantizationConvertTensorProxy):
a.__class__ = torch.Tensor
return a
output = map_aggregate(output, unwrap_proxy)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward
def rewrite_for_scripting(self):
return auto_trace_rewriter.rewrite_for_scripting(self)
pack_weights_for_functionals(module)
attach_scale_zp_values_to_model(module)
attach_op_convert_info_to_model(module)
module.__class__ = QuantizationDispatchModule
return module
# Currently, `torch.nn.Sequential`'s forward iterates over all
def _nn_sequential_patched_forward(cls, input):
for module in cls:
if not isinstance(module, AutoQuantizationState):
input = module(input)
return input
| true | true |
1c2f02b3852faad8ec8d5d7b16940faac3b83676 | 6,172 | py | Python | backend/corpora/lambdas/api/v1/dataset.py | chanzuckerberg/single-cell-data-portal | d8901ef978ad96de75510d5eb0e459a4790197ea | [
"MIT"
] | 7 | 2021-09-17T23:44:31.000Z | 2022-03-25T22:36:07.000Z | backend/corpora/lambdas/api/v1/dataset.py | chanzuckerberg/single-cell-data-portal | d8901ef978ad96de75510d5eb0e459a4790197ea | [
"MIT"
] | 784 | 2021-08-18T23:38:09.000Z | 2022-03-31T21:18:54.000Z | backend/corpora/lambdas/api/v1/dataset.py | chanzuckerberg/single-cell-data-portal | d8901ef978ad96de75510d5eb0e459a4790197ea | [
"MIT"
] | 2 | 2021-09-07T19:04:17.000Z | 2021-12-23T21:51:36.000Z | from flask import make_response, jsonify, g
from ....common.corpora_orm import CollectionVisibility, DatasetArtifactFileType
from ....common.entities import Dataset, Collection
from ....common.entities.geneset import GenesetDatasetLink
from ....api_server.db import dbconnect
from ....common.utils.exceptions import (
NotFoundHTTPException,
ServerErrorHTTPException,
ForbiddenHTTPException,
CorporaException,
)
from backend.corpora.lambdas.api.v1.collection import _owner_or_allowed
@dbconnect
def post_dataset_asset(dataset_uuid: str, asset_uuid: str):
db_session = g.db_session
# retrieve the dataset
dataset = Dataset.get(db_session, dataset_uuid)
if not dataset:
raise NotFoundHTTPException(f"'dataset/{dataset_uuid}' not found.")
# retrieve the artifact
asset = dataset.get_asset(asset_uuid)
if not asset:
raise NotFoundHTTPException(f"'dataset/{dataset_uuid}/asset/{asset_uuid}' not found.")
# Retrieve S3 metadata
file_size = asset.get_file_size()
if not file_size:
raise ServerErrorHTTPException()
# Generate pre-signed URL
presigned_url = asset.generate_file_url()
if not presigned_url:
raise ServerErrorHTTPException()
return make_response(
jsonify(
dataset_id=dataset_uuid,
file_name=asset.filename,
file_size=file_size,
presigned_url=presigned_url,
),
200,
)
@dbconnect
def get_dataset_assets(dataset_uuid: str):
db_session = g.db_session
# retrieve the dataset
dataset = Dataset.get(db_session, dataset_uuid)
assets = dataset.get_assets()
return make_response(jsonify(assets=assets))
@dbconnect
def get_status(dataset_uuid: str, user: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid)
if not dataset:
raise ForbiddenHTTPException()
collection = Collection.get_collection(
db_session,
dataset.collection.id,
dataset.collection.visibility,
owner=_owner_or_allowed(user),
)
if not collection:
raise ForbiddenHTTPException()
status = dataset.processing_status.to_dict(remove_none=True)
for remove in ["dataset", "created_at", "updated_at"]:
status.pop(remove)
return make_response(jsonify(status), 200)
@dbconnect
def get_datasets_index():
db_session = g.db_session
datasets = Dataset.list_for_index(db_session)
return make_response(jsonify(datasets), 200)
@dbconnect
def delete_dataset(dataset_uuid: str, user: str):
"""
Deletes an existing dataset or cancels an in progress upload.
"""
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid, include_tombstones=True)
if not dataset:
raise ForbiddenHTTPException()
collection = Collection.get_collection(
db_session,
dataset.collection.id,
dataset.collection.visibility,
owner=_owner_or_allowed(user),
)
if not collection:
raise ForbiddenHTTPException()
if dataset.collection_visibility == CollectionVisibility.PUBLIC:
return make_response(jsonify("Can not delete a public dataset"), 405)
if dataset.tombstone is False:
if dataset.published:
dataset.update(tombstone=True, published=False)
else:
if dataset.original_id:
original = Dataset.get(db_session, dataset.original_id)
original.create_revision()
dataset.asset_deletion()
dataset.delete()
return "", 202
@dbconnect
def get_dataset_identifiers(url: str):
db_session = g.db_session
dataset = Dataset.get_by_explorer_url(db_session, url)
if not dataset:
raise NotFoundHTTPException()
artifact = dataset.get_most_recent_artifact(filetype=DatasetArtifactFileType.CXG)
s3_uri = artifact.s3_uri if artifact else None
dataset_identifiers = {
"s3_uri": s3_uri,
"dataset_id": dataset.id,
"collection_id": dataset.collection_id,
"collection_visibility": dataset.collection_visibility,
"tombstoned": dataset.tombstone,
}
return make_response(jsonify(dataset_identifiers), 200)
@dbconnect
def post_dataset_gene_sets(dataset_uuid: str, body: object, user: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid)
if not dataset:
raise ForbiddenHTTPException()
collection = Collection.get_collection(
db_session, dataset.collection.id, CollectionVisibility.PRIVATE.name, owner=_owner_or_allowed(user)
)
if not collection:
raise ForbiddenHTTPException()
validate_genesets_in_collection_and_linked_to_dataset(dataset, collection, body)
try:
GenesetDatasetLink.update_links_for_a_dataset(db_session, dataset_uuid, add=body["add"], remove=body["remove"])
except CorporaException:
raise NotFoundHTTPException()
gene_sets = [
x.to_dict(
remove_attr=[
"collection",
"collection_visibility",
"collection_id",
"created_at",
"updated_at",
"genes",
]
)
for x in dataset.genesets
]
return make_response(jsonify(gene_sets), 202)
def validate_genesets_in_collection_and_linked_to_dataset(dataset, collection, update_list):
dataset_geneset_ids = [x.id for x in dataset.genesets]
collection_geneset_ids = [x.id for x in collection.genesets]
add_list_in_collection = all(item in collection_geneset_ids for item in update_list["add"])
remove_list_in_collection = all(item in collection_geneset_ids for item in update_list["remove"])
if not (add_list_in_collection and remove_list_in_collection):
raise NotFoundHTTPException()
remove_list_in_dataset = all(item in dataset_geneset_ids for item in update_list["remove"])
if not remove_list_in_dataset:
raise NotFoundHTTPException()
add_list_in_dataset = any(item in dataset_geneset_ids for item in update_list["add"])
if add_list_in_dataset:
raise NotFoundHTTPException()
| 33.912088 | 119 | 0.700907 | from flask import make_response, jsonify, g
from ....common.corpora_orm import CollectionVisibility, DatasetArtifactFileType
from ....common.entities import Dataset, Collection
from ....common.entities.geneset import GenesetDatasetLink
from ....api_server.db import dbconnect
from ....common.utils.exceptions import (
NotFoundHTTPException,
ServerErrorHTTPException,
ForbiddenHTTPException,
CorporaException,
)
from backend.corpora.lambdas.api.v1.collection import _owner_or_allowed
@dbconnect
def post_dataset_asset(dataset_uuid: str, asset_uuid: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid)
if not dataset:
raise NotFoundHTTPException(f"'dataset/{dataset_uuid}' not found.")
asset = dataset.get_asset(asset_uuid)
if not asset:
raise NotFoundHTTPException(f"'dataset/{dataset_uuid}/asset/{asset_uuid}' not found.")
file_size = asset.get_file_size()
if not file_size:
raise ServerErrorHTTPException()
presigned_url = asset.generate_file_url()
if not presigned_url:
raise ServerErrorHTTPException()
return make_response(
jsonify(
dataset_id=dataset_uuid,
file_name=asset.filename,
file_size=file_size,
presigned_url=presigned_url,
),
200,
)
@dbconnect
def get_dataset_assets(dataset_uuid: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid)
assets = dataset.get_assets()
return make_response(jsonify(assets=assets))
@dbconnect
def get_status(dataset_uuid: str, user: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid)
if not dataset:
raise ForbiddenHTTPException()
collection = Collection.get_collection(
db_session,
dataset.collection.id,
dataset.collection.visibility,
owner=_owner_or_allowed(user),
)
if not collection:
raise ForbiddenHTTPException()
status = dataset.processing_status.to_dict(remove_none=True)
for remove in ["dataset", "created_at", "updated_at"]:
status.pop(remove)
return make_response(jsonify(status), 200)
@dbconnect
def get_datasets_index():
db_session = g.db_session
datasets = Dataset.list_for_index(db_session)
return make_response(jsonify(datasets), 200)
@dbconnect
def delete_dataset(dataset_uuid: str, user: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid, include_tombstones=True)
if not dataset:
raise ForbiddenHTTPException()
collection = Collection.get_collection(
db_session,
dataset.collection.id,
dataset.collection.visibility,
owner=_owner_or_allowed(user),
)
if not collection:
raise ForbiddenHTTPException()
if dataset.collection_visibility == CollectionVisibility.PUBLIC:
return make_response(jsonify("Can not delete a public dataset"), 405)
if dataset.tombstone is False:
if dataset.published:
dataset.update(tombstone=True, published=False)
else:
if dataset.original_id:
original = Dataset.get(db_session, dataset.original_id)
original.create_revision()
dataset.asset_deletion()
dataset.delete()
return "", 202
@dbconnect
def get_dataset_identifiers(url: str):
db_session = g.db_session
dataset = Dataset.get_by_explorer_url(db_session, url)
if not dataset:
raise NotFoundHTTPException()
artifact = dataset.get_most_recent_artifact(filetype=DatasetArtifactFileType.CXG)
s3_uri = artifact.s3_uri if artifact else None
dataset_identifiers = {
"s3_uri": s3_uri,
"dataset_id": dataset.id,
"collection_id": dataset.collection_id,
"collection_visibility": dataset.collection_visibility,
"tombstoned": dataset.tombstone,
}
return make_response(jsonify(dataset_identifiers), 200)
@dbconnect
def post_dataset_gene_sets(dataset_uuid: str, body: object, user: str):
db_session = g.db_session
dataset = Dataset.get(db_session, dataset_uuid)
if not dataset:
raise ForbiddenHTTPException()
collection = Collection.get_collection(
db_session, dataset.collection.id, CollectionVisibility.PRIVATE.name, owner=_owner_or_allowed(user)
)
if not collection:
raise ForbiddenHTTPException()
validate_genesets_in_collection_and_linked_to_dataset(dataset, collection, body)
try:
GenesetDatasetLink.update_links_for_a_dataset(db_session, dataset_uuid, add=body["add"], remove=body["remove"])
except CorporaException:
raise NotFoundHTTPException()
gene_sets = [
x.to_dict(
remove_attr=[
"collection",
"collection_visibility",
"collection_id",
"created_at",
"updated_at",
"genes",
]
)
for x in dataset.genesets
]
return make_response(jsonify(gene_sets), 202)
def validate_genesets_in_collection_and_linked_to_dataset(dataset, collection, update_list):
dataset_geneset_ids = [x.id for x in dataset.genesets]
collection_geneset_ids = [x.id for x in collection.genesets]
add_list_in_collection = all(item in collection_geneset_ids for item in update_list["add"])
remove_list_in_collection = all(item in collection_geneset_ids for item in update_list["remove"])
if not (add_list_in_collection and remove_list_in_collection):
raise NotFoundHTTPException()
remove_list_in_dataset = all(item in dataset_geneset_ids for item in update_list["remove"])
if not remove_list_in_dataset:
raise NotFoundHTTPException()
add_list_in_dataset = any(item in dataset_geneset_ids for item in update_list["add"])
if add_list_in_dataset:
raise NotFoundHTTPException()
| true | true |
1c2f034d6dfba5b487fa562eb6a82da3b9d57793 | 78,620 | py | Python | mesonbuild/modules/gnome.py | megatux/meson | 047db1c64cd5b7ef070f73e1d580e36236ac9613 | [
"Apache-2.0"
] | null | null | null | mesonbuild/modules/gnome.py | megatux/meson | 047db1c64cd5b7ef070f73e1d580e36236ac9613 | [
"Apache-2.0"
] | null | null | null | mesonbuild/modules/gnome.py | megatux/meson | 047db1c64cd5b7ef070f73e1d580e36236ac9613 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for Gnome/GLib related
functionality such as gobject-introspection, gresources and gtk-doc'''
import os
import copy
import subprocess
from .. import build
from .. import mlog
from .. import mesonlib
from .. import compilers
from .. import interpreter
from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget
from . import get_include_args
from . import ExtensionModule
from . import ModuleReturnValue
from ..mesonlib import MesonException, OrderedSet, Popen_safe, extract_as_list
from ..dependencies import Dependency, PkgConfigDependency, InternalDependency
from ..interpreterbase import noKwargs, permittedKwargs, FeatureNew, FeatureNewKwargs
# gresource compilation is broken due to the way
# the resource compiler and Ninja clash about it
#
# https://github.com/ninja-build/ninja/issues/1184
# https://bugzilla.gnome.org/show_bug.cgi?id=774368
gresource_dep_needed_version = '>= 2.51.1'
native_glib_version = None
girwarning_printed = False
gdbuswarning_printed = False
gresource_warning_printed = False
_gir_has_extra_lib_arg = None
def gir_has_extra_lib_arg(intr_obj):
global _gir_has_extra_lib_arg
if _gir_has_extra_lib_arg is not None:
return _gir_has_extra_lib_arg
_gir_has_extra_lib_arg = False
try:
g_ir_scanner = intr_obj.find_program_impl('g-ir-scanner').get_command()
opts = Popen_safe(g_ir_scanner + ['--help'], stderr=subprocess.STDOUT)[1]
_gir_has_extra_lib_arg = '--extra-library' in opts
except (MesonException, FileNotFoundError, subprocess.CalledProcessError):
pass
return _gir_has_extra_lib_arg
class GnomeModule(ExtensionModule):
gir_dep = None
@staticmethod
def _get_native_glib_version(state):
global native_glib_version
if native_glib_version is None:
glib_dep = PkgConfigDependency('glib-2.0', state.environment,
{'native': True, 'required': False})
if glib_dep.found():
native_glib_version = glib_dep.get_version()
else:
mlog.warning('Could not detect glib version, assuming 2.54. '
'You may get build errors if your glib is older.')
native_glib_version = '2.54'
return native_glib_version
def __print_gresources_warning(self, state):
global gresource_warning_printed
if not gresource_warning_printed:
if not mesonlib.version_compare(self._get_native_glib_version(state), gresource_dep_needed_version):
mlog.warning('GLib compiled dependencies do not work reliably with \n'
'the current version of GLib. See the following upstream issue:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=774368'))
gresource_warning_printed = True
return []
@staticmethod
def _print_gdbus_warning():
global gdbuswarning_printed
if not gdbuswarning_printed:
mlog.warning('Code generated with gdbus_codegen() requires the root directory be added to\n'
' include_directories of targets with GLib < 2.51.3:',
mlog.bold('https://github.com/mesonbuild/meson/issues/1387'))
gdbuswarning_printed = True
@FeatureNewKwargs('gnome.compile_resources', '0.37.0', ['gresource_bundle', 'export', 'install_header'])
@permittedKwargs({'source_dir', 'c_name', 'dependencies', 'export', 'gresource_bundle', 'install_header',
'install', 'install_dir', 'extra_args', 'build_by_default'})
def compile_resources(self, state, args, kwargs):
self.__print_gresources_warning(state)
glib_version = self._get_native_glib_version(state)
cmd = ['glib-compile-resources', '@INPUT@']
source_dirs, dependencies = mesonlib.extract_as_list(kwargs, 'source_dir', 'dependencies', pop=True)
if len(args) < 2:
raise MesonException('Not enough arguments; the name of the resource '
'and the path to the XML file are required')
# Validate dependencies
for (ii, dep) in enumerate(dependencies):
if hasattr(dep, 'held_object'):
dependencies[ii] = dep = dep.held_object
if not isinstance(dep, (mesonlib.File, build.CustomTarget, build.CustomTargetIndex)):
m = 'Unexpected dependency type {!r} for gnome.compile_resources() ' \
'"dependencies" argument.\nPlease pass the return value of ' \
'custom_target() or configure_file()'
raise MesonException(m.format(dep))
if isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
m = 'The "dependencies" argument of gnome.compile_resources() can not\n' \
'be used with the current version of glib-compile-resources due to\n' \
'<https://bugzilla.gnome.org/show_bug.cgi?id=774368>'
raise MesonException(m)
ifile = args[1]
if isinstance(ifile, mesonlib.File):
# glib-compile-resources will be run inside the source dir,
# so we need either 'src_to_build' or the absolute path.
# Absolute path is the easiest choice.
if ifile.is_built:
ifile = os.path.join(state.environment.get_build_dir(), ifile.subdir, ifile.fname)
else:
ifile = os.path.join(ifile.subdir, ifile.fname)
elif isinstance(ifile, str):
ifile = os.path.join(state.subdir, ifile)
elif isinstance(ifile, (interpreter.CustomTargetHolder,
interpreter.CustomTargetIndexHolder,
interpreter.GeneratedObjectsHolder)):
m = 'Resource xml files generated at build-time cannot be used ' \
'with gnome.compile_resources() because we need to scan ' \
'the xml for dependencies. Use configure_file() instead ' \
'to generate it at configure-time.'
raise MesonException(m)
else:
raise MesonException('Invalid file argument: {!r}'.format(ifile))
depend_files, depends, subdirs = self._get_gresource_dependencies(
state, ifile, source_dirs, dependencies)
# Make source dirs relative to build dir now
source_dirs = [os.path.join(state.build_to_src, state.subdir, d) for d in source_dirs]
# Always include current directory, but after paths set by user
source_dirs.append(os.path.join(state.build_to_src, state.subdir))
# Ensure build directories of generated deps are included
source_dirs += subdirs
for source_dir in OrderedSet(source_dirs):
cmd += ['--sourcedir', source_dir]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
export = kwargs.pop('export', False)
if not export:
cmd += ['--internal']
cmd += ['--generate', '--target', '@OUTPUT@']
cmd += mesonlib.stringlistify(kwargs.pop('extra_args', []))
gresource = kwargs.pop('gresource_bundle', False)
if gresource:
output = args[0] + '.gresource'
name = args[0] + '_gresource'
else:
output = args[0] + '.c'
name = args[0] + '_c'
if kwargs.get('install', False) and not gresource:
raise MesonException('The install kwarg only applies to gresource bundles, see install_header')
install_header = kwargs.pop('install_header', False)
if install_header and gresource:
raise MesonException('The install_header kwarg does not apply to gresource bundles')
if install_header and not export:
raise MesonException('GResource header is installed yet export is not enabled')
kwargs['input'] = args[1]
kwargs['output'] = output
kwargs['depends'] = depends
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
# This will eventually go out of sync if dependencies are added
kwargs['depend_files'] = depend_files
kwargs['command'] = cmd
else:
depfile = kwargs['output'] + '.d'
kwargs['depfile'] = depfile
kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
target_c = GResourceTarget(name, state.subdir, state.subproject, kwargs)
if gresource: # Only one target for .gresource files
return ModuleReturnValue(target_c, [target_c])
h_kwargs = {
'command': cmd,
'input': args[1],
'output': args[0] + '.h',
# The header doesn't actually care about the files yet it errors if missing
'depends': depends
}
if 'build_by_default' in kwargs:
h_kwargs['build_by_default'] = kwargs['build_by_default']
if install_header:
h_kwargs['install'] = install_header
h_kwargs['install_dir'] = kwargs.get('install_dir',
state.environment.coredata.get_builtin_option('includedir'))
target_h = GResourceHeaderTarget(args[0] + '_h', state.subdir, state.subproject, h_kwargs)
rv = [target_c, target_h]
return ModuleReturnValue(rv, rv)
def _get_gresource_dependencies(self, state, input_file, source_dirs, dependencies):
cmd = ['glib-compile-resources',
input_file,
'--generate-dependencies']
# Prefer generated files over source files
cmd += ['--sourcedir', state.subdir] # Current build dir
for source_dir in source_dirs:
cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)]
pc, stdout, stderr = Popen_safe(cmd, cwd=state.environment.get_source_dir())
if pc.returncode != 0:
m = 'glib-compile-resources failed to get dependencies for {}:\n{}'
mlog.warning(m.format(cmd[1], stderr))
raise subprocess.CalledProcessError(pc.returncode, cmd)
dep_files = stdout.split('\n')[:-1]
depends = []
subdirs = []
for resfile in dep_files[:]:
resbasename = os.path.basename(resfile)
for dep in dependencies:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, mesonlib.File):
if dep.fname != resbasename:
continue
dep_files.remove(resfile)
dep_files.append(dep)
subdirs.append(dep.subdir)
break
elif isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
fname = None
outputs = {(o, os.path.basename(o)) for o in dep.get_outputs()}
for o, baseo in outputs:
if baseo == resbasename:
fname = o
break
if fname is not None:
dep_files.remove(resfile)
depends.append(dep)
subdirs.append(dep.get_subdir())
break
else:
# In generate-dependencies mode, glib-compile-resources doesn't raise
# an error for missing resources but instead prints whatever filename
# was listed in the input file. That's good because it means we can
# handle resource files that get generated as part of the build, as
# follows.
#
# If there are multiple generated resource files with the same basename
# then this code will get confused.
try:
f = mesonlib.File.from_source_file(state.environment.get_source_dir(),
".", resfile)
except MesonException:
raise MesonException(
'Resource "%s" listed in "%s" was not found. If this is a '
'generated file, pass the target that generates it to '
'gnome.compile_resources() using the "dependencies" '
'keyword argument.' % (resfile, input_file))
dep_files.remove(resfile)
dep_files.append(f)
return dep_files, depends, subdirs
def _get_link_args(self, state, lib, depends, include_rpath=False,
use_gir_args=False):
link_command = []
# Construct link args
if isinstance(lib, build.SharedLibrary):
libdir = os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(lib))
link_command.append('-L' + libdir)
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(lib):
d = os.path.join(state.environment.get_build_dir(), d)
link_command.append('-L' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + libdir)
depends.append(lib)
if gir_has_extra_lib_arg(self.interpreter) and use_gir_args:
link_command.append('--extra-library=' + lib.name)
else:
link_command.append('-l' + lib.name)
return link_command
def _get_dependencies_flags(self, deps, state, depends, include_rpath=False,
use_gir_args=False, separate_nodedup=False):
cflags = OrderedSet()
internal_ldflags = OrderedSet()
external_ldflags = OrderedSet()
# External linker flags that can't be de-duped reliably because they
# require two args in order, such as -framework AVFoundation
external_ldflags_nodedup = []
gi_includes = OrderedSet()
deps = mesonlib.listify(deps, unholder=True)
for dep in deps:
if isinstance(dep, InternalDependency):
cflags.update(dep.get_compile_args())
cflags.update(get_include_args(dep.include_directories))
for lib in dep.libraries:
if hasattr(lib, 'held_object'):
lib = lib.held_object
if isinstance(lib, build.SharedLibrary):
internal_ldflags.update(self._get_link_args(state, lib, depends, include_rpath))
libdepflags = self._get_dependencies_flags(lib.get_external_deps(), state, depends, include_rpath,
use_gir_args, True)
cflags.update(libdepflags[0])
internal_ldflags.update(libdepflags[1])
external_ldflags.update(libdepflags[2])
external_ldflags_nodedup += libdepflags[3]
gi_includes.update(libdepflags[4])
extdepflags = self._get_dependencies_flags(dep.ext_deps, state, depends, include_rpath,
use_gir_args, True)
cflags.update(extdepflags[0])
internal_ldflags.update(extdepflags[1])
external_ldflags.update(extdepflags[2])
external_ldflags_nodedup += extdepflags[3]
gi_includes.update(extdepflags[4])
for source in dep.sources:
if hasattr(source, 'held_object'):
source = source.held_object
if isinstance(source, GirTarget):
gi_includes.update([os.path.join(state.environment.get_build_dir(),
source.get_subdir())])
# This should be any dependency other than an internal one.
elif isinstance(dep, Dependency):
cflags.update(dep.get_compile_args())
ldflags = iter(dep.get_link_args(raw=True))
for lib in ldflags:
if (os.path.isabs(lib) and
# For PkgConfigDependency only:
getattr(dep, 'is_libtool', False)):
lib_dir = os.path.dirname(lib)
external_ldflags.update(["-L%s" % lib_dir])
if include_rpath:
external_ldflags.update(['-Wl,-rpath {}'.format(lib_dir)])
libname = os.path.basename(lib)
if libname.startswith("lib"):
libname = libname[3:]
libname = libname.split(".so")[0]
lib = "-l%s" % libname
# FIXME: Hack to avoid passing some compiler options in
if lib.startswith("-W"):
continue
# If it's a framework arg, slurp the framework name too
# to preserve the order of arguments
if lib == '-framework':
external_ldflags_nodedup += [lib, next(ldflags)]
else:
external_ldflags.update([lib])
if isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir:
gi_includes.update([girdir])
elif isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
cflags.update(get_include_args(dep.get_include_dirs()))
depends.append(dep)
else:
mlog.log('dependency {!r} not handled to build gir files'.format(dep))
continue
if gir_has_extra_lib_arg(self.interpreter) and use_gir_args:
def fix_ldflags(ldflags):
fixed_ldflags = OrderedSet()
for ldflag in ldflags:
if ldflag.startswith("-l"):
ldflag = ldflag.replace('-l', '--extra-library=', 1)
fixed_ldflags.add(ldflag)
return fixed_ldflags
internal_ldflags = fix_ldflags(internal_ldflags)
external_ldflags = fix_ldflags(external_ldflags)
if not separate_nodedup:
external_ldflags.update(external_ldflags_nodedup)
return cflags, internal_ldflags, external_ldflags, gi_includes
else:
return cflags, internal_ldflags, external_ldflags, external_ldflags_nodedup, gi_includes
def _unwrap_gir_target(self, girtarget):
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary)):
raise MesonException('Gir target must be an executable or shared library')
return girtarget
def _get_gir_dep(self, state):
try:
gir_dep = self.gir_dep or PkgConfigDependency('gobject-introspection-1.0',
state.environment,
{'native': True})
pkgargs = gir_dep.get_compile_args()
except Exception:
raise MesonException('gobject-introspection dependency was not found, gir cannot be generated.')
return gir_dep, pkgargs
def _scan_header(self, kwargs):
ret = []
header = kwargs.pop('header', None)
if header:
if not isinstance(header, str):
raise MesonException('header must be a string')
ret = ['--c-include=' + header]
return ret
def _scan_extra_args(self, kwargs):
return mesonlib.stringlistify(kwargs.pop('extra_args', []))
def _scan_link_withs(self, state, depends, kwargs):
ret = []
if 'link_with' in kwargs:
link_with = mesonlib.extract_as_list(kwargs, 'link_with', pop = True)
for link in link_with:
ret += self._get_link_args(state, link.held_object, depends,
use_gir_args=True)
return ret
# May mutate depends and gir_inc_dirs
def _scan_include(self, state, depends, gir_inc_dirs, kwargs):
ret = []
if 'includes' in kwargs:
includes = mesonlib.extract_as_list(kwargs, 'includes', pop = True)
for inc in includes:
if hasattr(inc, 'held_object'):
inc = inc.held_object
if isinstance(inc, str):
ret += ['--include=%s' % (inc, )]
elif isinstance(inc, GirTarget):
gir_inc_dirs += [
os.path.join(state.environment.get_build_dir(),
inc.get_subdir()),
]
ret += [
"--include-uninstalled=%s" % (os.path.join(inc.get_subdir(), inc.get_basename()), )
]
depends += [inc]
else:
raise MesonException(
'Gir includes must be str, GirTarget, or list of them')
return ret
def _scan_symbol_prefix(self, kwargs):
ret = []
if 'symbol_prefix' in kwargs:
sym_prefixes = mesonlib.stringlistify(kwargs.pop('symbol_prefix', []))
ret += ['--symbol-prefix=%s' % sym_prefix for sym_prefix in sym_prefixes]
return ret
def _scan_identifier_prefix(self, kwargs):
ret = []
if 'identifier_prefix' in kwargs:
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
ret += ['--identifier-prefix=%s' % identifier_prefix]
return ret
def _scan_export_packages(self, kwargs):
ret = []
if 'export_packages' in kwargs:
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
ret += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
ret += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
return ret
def _scan_inc_dirs(self, kwargs):
ret = mesonlib.extract_as_list(kwargs, 'include_directories', pop = True)
for incd in ret:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
return ret
def _scan_langs(self, state, langs):
ret = []
for lang in langs:
if state.environment.is_cross_build():
link_args = state.environment.cross_info.config["properties"].get(lang + '_link_args', "")
else:
link_args = state.environment.coredata.get_external_link_args(lang)
for link_arg in link_args:
if link_arg.startswith('-L'):
ret.append(link_arg)
return ret
def _scan_gir_targets(self, state, girtargets):
ret = []
for girtarget in girtargets:
if isinstance(girtarget, build.Executable):
ret += ['--program', girtarget]
elif isinstance(girtarget, build.SharedLibrary):
libname = girtarget.get_basename()
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(girtarget):
d = os.path.join(state.environment.get_build_dir(), d)
ret.append('-L' + d)
ret += ['--library', libname]
# need to put our output directory first as we need to use the
# generated libraries instead of any possibly installed system/prefix
# ones.
ret += ["-L@PRIVATE_OUTDIR_ABS_%s@" % girtarget.get_id()]
return ret
def _get_girtargets_langs_compilers(self, girtargets):
ret = []
for girtarget in girtargets:
for lang, compiler in girtarget.compilers.items():
# XXX: Can you use g-i with any other language?
if lang in ('c', 'cpp', 'objc', 'objcpp', 'd'):
ret.append((lang, compiler))
break
return ret
def _get_gir_targets_deps(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_all_link_deps()
ret += girtarget.get_external_deps()
return ret
def _get_gir_targets_inc_dirs(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_include_dirs()
return ret
def _get_langs_compilers_flags(self, state, langs_compilers):
cflags = []
internal_ldflags = []
external_ldflags = []
for lang, compiler in langs_compilers:
if state.global_args.get(lang):
cflags += state.global_args[lang]
if state.project_args.get(lang):
cflags += state.project_args[lang]
if 'b_sanitize' in compiler.base_options:
sanitize = state.environment.coredata.base_options['b_sanitize'].value
cflags += compilers.sanitizer_compile_args(sanitize)
if 'address' in sanitize.split(','):
internal_ldflags += ['-lasan'] # This must be first in ldflags
# FIXME: Linking directly to libasan is not recommended but g-ir-scanner
# does not understand -f LDFLAGS. https://bugzilla.gnome.org/show_bug.cgi?id=783892
# ldflags += compilers.sanitizer_link_args(sanitize)
return cflags, internal_ldflags, external_ldflags
def _make_gir_filelist(self, state, srcdir, ns, nsversion, girtargets, libsources):
gir_filelist_dir = state.backend.get_target_private_dir_abs(girtargets[0])
if not os.path.isdir(gir_filelist_dir):
os.mkdir(gir_filelist_dir)
gir_filelist_filename = os.path.join(gir_filelist_dir, '%s_%s_gir_filelist' % (ns, nsversion))
with open(gir_filelist_filename, 'w', encoding='utf-8') as gir_filelist:
for s in libsources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
for custom_output in s.get_outputs():
gir_filelist.write(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
custom_output) + '\n')
elif isinstance(s, mesonlib.File):
gir_filelist.write(s.rel_to_builddir(state.build_to_src) + '\n')
elif isinstance(s, build.GeneratedList):
for gen_src in s.get_outputs():
gir_filelist.write(os.path.join(srcdir, gen_src) + '\n')
else:
gir_filelist.write(os.path.join(srcdir, s) + '\n')
return gir_filelist_filename
def _make_gir_target(self, state, girfile, scan_command, depends, kwargs):
scankwargs = {'output': girfile,
'command': scan_command,
'depends': depends}
if 'install' in kwargs:
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = kwargs.get('install_dir_gir',
os.path.join(state.environment.get_datadir(), 'gir-1.0'))
if 'build_by_default' in kwargs:
scankwargs['build_by_default'] = kwargs['build_by_default']
return GirTarget(girfile, state.subdir, state.subproject, scankwargs)
def _make_typelib_target(self, state, typelib_output, typelib_cmd, kwargs):
typelib_kwargs = {
'output': typelib_output,
'command': typelib_cmd,
}
if 'install' in kwargs:
typelib_kwargs['install'] = kwargs['install']
typelib_kwargs['install_dir'] = kwargs.get('install_dir_typelib',
os.path.join(state.environment.get_libdir(), 'girepository-1.0'))
if 'build_by_default' in kwargs:
typelib_kwargs['build_by_default'] = kwargs['build_by_default']
return TypelibTarget(typelib_output, state.subdir, state.subproject, typelib_kwargs)
# May mutate depends
def _gather_typelib_includes_and_update_depends(self, state, deps, depends):
# Need to recursively add deps on GirTarget sources from our
# dependencies and also find the include directories needed for the
# typelib generation custom target below.
typelib_includes = []
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
# Add a dependency on each GirTarget listed in dependencies and add
# the directory where it will be generated to the typelib includes
if isinstance(dep, InternalDependency):
for source in dep.sources:
if hasattr(source, 'held_object'):
source = source.held_object
if isinstance(source, GirTarget) and source not in depends:
depends.append(source)
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
# Do the same, but for dependencies of dependencies. These are
# stored in the list of generated sources for each link dep (from
# girtarget.get_all_link_deps() above).
# FIXME: Store this in the original form from declare_dependency()
# so it can be used here directly.
elif isinstance(dep, build.SharedLibrary):
for source in dep.generated:
if isinstance(source, GirTarget):
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
elif isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir and girdir not in typelib_includes:
typelib_includes.append(girdir)
return typelib_includes
def _get_external_args_for_langs(self, state, langs):
ret = []
for lang in langs:
if state.environment.is_cross_build():
ret += state.environment.cross_info.config["properties"].get(lang + '_args', "")
else:
ret += state.environment.coredata.get_external_args(lang)
return ret
@staticmethod
def _get_scanner_cflags(cflags):
'g-ir-scanner only accepts -I/-D/-U; must ignore all other flags'
for f in cflags:
if f.startswith(('-D', '-U', '-I')):
yield f
@staticmethod
def _get_scanner_ldflags(ldflags):
'g-ir-scanner only accepts -L/-l; must ignore -F and other linker flags'
for f in ldflags:
if f.startswith(('-L', '-l', '--extra-library')):
yield f
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'sources', 'nsversion', 'namespace', 'symbol_prefix', 'identifier_prefix',
'export_packages', 'includes', 'dependencies', 'link_with', 'include_directories',
'install', 'install_dir_gir', 'install_dir_typelib', 'extra_args',
'packages', 'header', 'build_by_default'})
def generate_gir(self, state, args, kwargs):
if not args:
raise MesonException('generate_gir takes at least one argument')
if kwargs.get('install_dir'):
raise MesonException('install_dir is not supported with generate_gir(), see "install_dir_gir" and "install_dir_typelib"')
giscanner = self.interpreter.find_program_impl('g-ir-scanner')
gicompiler = self.interpreter.find_program_impl('g-ir-compiler')
girtargets = [self._unwrap_gir_target(arg) for arg in args]
if len(girtargets) > 1 and any([isinstance(el, build.Executable) for el in girtargets]):
raise MesonException('generate_gir only accepts a single argument when one of the arguments is an executable')
self.gir_dep, pkgargs = self._get_gir_dep(state)
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = mesonlib.extract_as_list(kwargs, 'sources', pop=True)
girfile = '%s-%s.gir' % (ns, nsversion)
srcdir = os.path.join(state.environment.get_source_dir(), state.subdir)
builddir = os.path.join(state.environment.get_build_dir(), state.subdir)
depends = [] + girtargets
gir_inc_dirs = []
langs_compilers = self._get_girtargets_langs_compilers(girtargets)
cflags, internal_ldflags, external_ldflags = self._get_langs_compilers_flags(state, langs_compilers)
deps = self._get_gir_targets_deps(girtargets)
deps += extract_as_list(kwargs, 'dependencies', pop=True, unholder=True)
typelib_includes = self._gather_typelib_includes_and_update_depends(state, deps, depends)
# ldflags will be misinterpreted by gir scanner (showing
# spurious dependencies) but building GStreamer fails if they
# are not used here.
dep_cflags, dep_internal_ldflags, dep_external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, use_gir_args=True)
cflags += list(self._get_scanner_cflags(dep_cflags))
cflags += list(self._get_scanner_cflags(self._get_external_args_for_langs(state, [lc[0] for lc in langs_compilers])))
internal_ldflags += list(self._get_scanner_ldflags(dep_internal_ldflags))
external_ldflags += list(self._get_scanner_ldflags(dep_external_ldflags))
girtargets_inc_dirs = self._get_gir_targets_inc_dirs(girtargets)
inc_dirs = self._scan_inc_dirs(kwargs)
scan_command = [giscanner]
scan_command += pkgargs
scan_command += ['--no-libtool']
scan_command += ['--namespace=' + ns, '--nsversion=' + nsversion]
scan_command += ['--warn-all']
scan_command += ['--output', '@OUTPUT@']
scan_command += self._scan_header(kwargs)
scan_command += self._scan_extra_args(kwargs)
scan_command += ['-I' + srcdir, '-I' + builddir]
scan_command += get_include_args(girtargets_inc_dirs)
scan_command += ['--filelist=' + self._make_gir_filelist(state, srcdir, ns, nsversion, girtargets, libsources)]
scan_command += self._scan_link_withs(state, depends, kwargs)
scan_command += self._scan_include(state, depends, gir_inc_dirs, kwargs)
scan_command += self._scan_symbol_prefix(kwargs)
scan_command += self._scan_identifier_prefix(kwargs)
scan_command += self._scan_export_packages(kwargs)
scan_command += ['--cflags-begin']
scan_command += cflags
scan_command += ['--cflags-end']
scan_command += get_include_args(inc_dirs)
scan_command += get_include_args(list(gi_includes) + gir_inc_dirs + inc_dirs, prefix='--add-include-path=')
scan_command += list(internal_ldflags)
scan_command += self._scan_gir_targets(state, girtargets)
scan_command += self._scan_langs(state, [lc[0] for lc in langs_compilers])
scan_command += list(external_ldflags)
scan_target = self._make_gir_target(state, girfile, scan_command, depends, kwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = [gicompiler, scan_target, '--output', '@OUTPUT@']
typelib_cmd += get_include_args(gir_inc_dirs, prefix='--includedir=')
for incdir in typelib_includes:
typelib_cmd += ["--includedir=" + incdir]
typelib_target = self._make_typelib_target(state, typelib_output, typelib_cmd, kwargs)
rv = [scan_target, typelib_target]
return ModuleReturnValue(rv, rv)
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'build_by_default', 'depend_files'})
def compile_schemas(self, state, args, kwargs):
if args:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = [self.interpreter.find_program_impl('glib-compile-schemas')]
cmd += ['--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir.replace('/', '_')
target_g = build.CustomTarget(targetname, state.subdir, state.subproject, kwargs)
return ModuleReturnValue(target_g, [target_g])
@permittedKwargs({'sources', 'media', 'symlink_media', 'languages'})
def yelp(self, state, args, kwargs):
if len(args) < 1:
raise MesonException('Yelp requires a project id')
project_id = args[0]
sources = mesonlib.stringlistify(kwargs.pop('sources', []))
if not sources:
if len(args) > 1:
sources = mesonlib.stringlistify(args[1:])
if not sources:
raise MesonException('Yelp requires a list of sources')
source_str = '@@'.join(sources)
langs = mesonlib.stringlistify(kwargs.pop('languages', []))
if langs:
mlog.deprecation('''The "languages" argument of gnome.yelp() is deprecated.
Use a LINGUAS file in the sources directory instead.
This will become a hard error in the future.''')
media = mesonlib.stringlistify(kwargs.pop('media', []))
symlinks = kwargs.pop('symlink_media', True)
if not isinstance(symlinks, bool):
raise MesonException('symlink_media must be a boolean')
if kwargs:
raise MesonException('Unknown arguments passed: {}'.format(', '.join(kwargs.keys())))
script = state.environment.get_build_command()
args = ['--internal',
'yelphelper',
'install',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--installdir=' + os.path.join(state.environment.get_datadir(), 'help'),
'--sources=' + source_str]
if symlinks:
args.append('--symlinks=true')
if media:
args.append('--media=' + '@@'.join(media))
if langs:
args.append('--langs=' + '@@'.join(langs))
inscript = build.RunScript(script, args)
potargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'pot',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
]
pottarget = build.RunTarget('help-' + project_id + '-pot', potargs[0],
potargs[1:], [], state.subdir, state.subproject)
poargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'update-po',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
'--langs=' + '@@'.join(langs),
]
potarget = build.RunTarget('help-' + project_id + '-update-po', poargs[0],
poargs[1:], [], state.subdir, state.subproject)
rv = [inscript, pottarget, potarget]
return ModuleReturnValue(None, rv)
@FeatureNewKwargs('gnome.gtkdoc', '0.48.0', ['c_args'])
@FeatureNewKwargs('gnome.gtkdoc', '0.48.0', ['module_version'])
@FeatureNewKwargs('gnome.gtkdoc', '0.37.0', ['namespace', 'mode'])
@permittedKwargs({'main_xml', 'main_sgml', 'src_dir', 'dependencies', 'install',
'install_dir', 'scan_args', 'scanobjs_args', 'gobject_typesfile',
'fixxref_args', 'html_args', 'html_assets', 'content_files',
'mkdb_args', 'ignore_headers', 'include_directories',
'namespace', 'mode', 'expand_content_files', 'module_version'})
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if 'src_dir' not in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
moduleversion = kwargs.get('module_version', '')
if not isinstance(moduleversion, str):
raise MesonException('Module version keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
targetname = modulename + ('-' + moduleversion if moduleversion else '') + '-doc'
command = state.environment.get_build_command()
namespace = kwargs.get('namespace', '')
mode = kwargs.get('mode', 'auto')
VALID_MODES = ('xml', 'sgml', 'none', 'auto')
if mode not in VALID_MODES:
raise MesonException('gtkdoc: Mode {} is not a valid mode: {}'.format(mode, VALID_MODES))
src_dirs = mesonlib.extract_as_list(kwargs, 'src_dir')
header_dirs = []
for src_dir in src_dirs:
if hasattr(src_dir, 'held_object'):
src_dir = src_dir.held_object
if not isinstance(src_dir, build.IncludeDirs):
raise MesonException('Invalid keyword argument for src_dir.')
for inc_dir in src_dir.get_incdirs():
header_dirs.append(os.path.join(state.environment.get_source_dir(),
src_dir.get_curdir(), inc_dir))
header_dirs.append(os.path.join(state.environment.get_build_dir(),
src_dir.get_curdir(), inc_dir))
else:
header_dirs.append(src_dir)
args = ['--internal', 'gtkdoc',
'--sourcedir=' + state.environment.get_source_dir(),
'--builddir=' + state.environment.get_build_dir(),
'--subdir=' + state.subdir,
'--headerdirs=' + '@@'.join(header_dirs),
'--mainfile=' + main_file,
'--modulename=' + modulename,
'--moduleversion=' + moduleversion,
'--mode=' + mode]
if namespace:
args.append('--namespace=' + namespace)
args += self._unpack_args('--htmlargs=', 'html_args', kwargs)
args += self._unpack_args('--scanargs=', 'scan_args', kwargs)
args += self._unpack_args('--scanobjsargs=', 'scanobjs_args', kwargs)
args += self._unpack_args('--gobjects-types-file=', 'gobject_typesfile', kwargs, state)
args += self._unpack_args('--fixxrefargs=', 'fixxref_args', kwargs)
args += self._unpack_args('--mkdbargs=', 'mkdb_args', kwargs)
args += self._unpack_args('--html-assets=', 'html_assets', kwargs, state)
depends = []
content_files = []
for s in mesonlib.extract_as_list(kwargs, 'content_files'):
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
depends.append(s)
for o in s.get_outputs():
content_files.append(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
o))
elif isinstance(s, mesonlib.File):
content_files.append(s.absolute_path(state.environment.get_source_dir(),
state.environment.get_build_dir()))
elif isinstance(s, build.GeneratedList):
depends.append(s)
for gen_src in s.get_outputs():
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
gen_src))
elif isinstance(s, str):
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
s))
else:
raise MesonException(
'Invalid object type: {!r}'.format(s.__class__.__name__))
args += ['--content-files=' + '@@'.join(content_files)]
args += self._unpack_args('--expand-content-files=', 'expand_content_files', kwargs, state)
args += self._unpack_args('--ignore-headers=', 'ignore_headers', kwargs)
args += self._unpack_args('--installdir=', 'install_dir', kwargs)
args += self._get_build_args(kwargs, state, depends)
res = [build.RunTarget(targetname, command[0], command[1:] + args, depends, state.subdir, state.subproject)]
if kwargs.get('install', True):
res.append(build.RunScript(command, args))
return ModuleReturnValue(None, res)
def _get_build_args(self, kwargs, state, depends):
args = []
deps = extract_as_list(kwargs, 'dependencies', unholder=True)
cflags = OrderedSet()
cflags.update(mesonlib.stringlistify(kwargs.pop('c_args', [])))
deps_cflags, internal_ldflags, external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, include_rpath=True)
inc_dirs = mesonlib.extract_as_list(kwargs, 'include_directories')
for incd in inc_dirs:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
cflags.update(deps_cflags)
cflags.update(get_include_args(inc_dirs))
ldflags = OrderedSet()
ldflags.update(internal_ldflags)
ldflags.update(external_ldflags)
if state.environment.is_cross_build():
cflags.update(state.environment.cross_info.config["properties"].get('c_args', ""))
ldflags.update(state.environment.cross_info.config["properties"].get('c_link_args', ""))
compiler = state.environment.coredata.cross_compilers.get('c')
else:
cflags.update(state.environment.coredata.get_external_args('c'))
ldflags.update(state.environment.coredata.get_external_link_args('c'))
compiler = state.environment.coredata.compilers.get('c')
compiler_flags = self._get_langs_compilers_flags(state, [('c', compiler)])
cflags.update(compiler_flags[0])
ldflags.update(compiler_flags[1])
ldflags.update(compiler_flags[2])
if compiler:
args += ['--cc=%s' % ' '.join(compiler.get_exelist())]
args += ['--ld=%s' % ' '.join(compiler.get_linker_exelist())]
if cflags:
args += ['--cflags=%s' % ' '.join(cflags)]
if ldflags:
args += ['--ldflags=%s' % ' '.join(ldflags)]
return args
@noKwargs
def gtkdoc_html_dir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Must have exactly one argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Argument must be a string')
return ModuleReturnValue(os.path.join('share/gtk-doc/html', modulename), [])
@staticmethod
def _unpack_args(arg, kwarg_name, kwargs, expend_file_state=None):
if kwarg_name not in kwargs:
return []
new_args = mesonlib.extract_as_list(kwargs, kwarg_name)
args = []
for i in new_args:
if expend_file_state and isinstance(i, mesonlib.File):
i = i.absolute_path(expend_file_state.environment.get_source_dir(), expend_file_state.environment.get_build_dir())
elif expend_file_state and isinstance(i, str):
i = os.path.join(expend_file_state.environment.get_source_dir(), expend_file_state.subdir, i)
elif not isinstance(i, str):
raise MesonException(kwarg_name + ' values must be strings.')
args.append(i)
if args:
return [arg + '@@'.join(args)]
return []
def _get_autocleanup_args(self, kwargs, glib_version):
if not mesonlib.version_compare(glib_version, '>= 2.49.1'):
# Warn if requested, silently disable if not
if 'autocleanup' in kwargs:
mlog.warning('Glib version ({}) is too old to support the \'autocleanup\' '
'kwarg, need 2.49.1 or newer'.format(glib_version))
return []
autocleanup = kwargs.pop('autocleanup', 'all')
values = ('none', 'objects', 'all')
if autocleanup not in values:
raise MesonException('gdbus_codegen does not support {!r} as an autocleanup value, '
'must be one of: {!r}'.format(autocleanup, ', '.join(values)))
return ['--c-generate-autocleanup', autocleanup]
@FeatureNewKwargs('build target', '0.46.0', ['install_header', 'install_dir', 'sources'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.47.0', ['extra_args', 'autocleanup'])
@permittedKwargs({'interface_prefix', 'namespace', 'extra_args', 'autocleanup', 'object_manager', 'build_by_default',
'annotations', 'docbook', 'install_header', 'install_dir', 'sources'})
def gdbus_codegen(self, state, args, kwargs):
if len(args) not in (1, 2):
raise MesonException('gdbus_codegen takes at most two arguments, name and xml file.')
namebase = args[0]
xml_files = args[1:]
cmd = [self.interpreter.find_program_impl('gdbus-codegen')]
extra_args = mesonlib.stringlistify(kwargs.pop('extra_args', []))
cmd += extra_args
# Autocleanup supported?
glib_version = self._get_native_glib_version(state)
cmd += self._get_autocleanup_args(kwargs, glib_version)
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
if kwargs.get('object_manager', False):
cmd += ['--c-generate-object-manager']
if 'sources' in kwargs:
xml_files += mesonlib.listify(kwargs.pop('sources'))
build_by_default = kwargs.get('build_by_default', False)
# Annotations are a bit ugly in that they are a list of lists of strings...
annotations = kwargs.pop('annotations', [])
if not isinstance(annotations, list):
raise MesonException('annotations takes a list')
if annotations and isinstance(annotations, list) and not isinstance(annotations[0], list):
annotations = [annotations]
for annotation in annotations:
if len(annotation) != 3 or not all(isinstance(i, str) for i in annotation):
raise MesonException('Annotations must be made up of 3 strings for ELEMENT, KEY, and VALUE')
cmd += ['--annotate'] + annotation
targets = []
install_header = kwargs.get('install_header', False)
install_dir = kwargs.get('install_dir', state.environment.coredata.get_builtin_option('includedir'))
output = namebase + '.c'
# Added in https://gitlab.gnome.org/GNOME/glib/commit/e4d68c7b3e8b01ab1a4231bf6da21d045cb5a816 (2.55.2)
# Fixed in https://gitlab.gnome.org/GNOME/glib/commit/cd1f82d8fc741a2203582c12cc21b4dacf7e1872 (2.56.2)
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--body', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default
}
else:
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
cmd += ['--generate-docbook', docbook]
# https://git.gnome.org/browse/glib/commit/?id=ee09bb704fe9ccb24d92dd86696a0e6bb8f0dc1a
if mesonlib.version_compare(glib_version, '>= 2.51.3'):
cmd += ['--output-directory', '@OUTDIR@', '--generate-c-code', namebase, '@INPUT@']
else:
self._print_gdbus_warning()
cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@']
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default
}
cfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(cfile_custom_target)
output = namebase + '.h'
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--header', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir
}
else:
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir,
'depends': cfile_custom_target
}
hfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(hfile_custom_target)
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
docbook_cmd = cmd + ['--output-directory', '@OUTDIR@', '--generate-docbook', docbook, '@INPUT@']
# The docbook output is always ${docbook}-${name_of_xml_file}
output = namebase + '-docbook'
outputs = []
for f in xml_files:
outputs.append('{}-{}'.format(docbook, os.path.basename(str(f))))
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': docbook_cmd,
'build_by_default': build_by_default
}
else:
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': cmd,
'build_by_default': build_by_default,
'depends': cfile_custom_target
}
docbook_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(docbook_custom_target)
return ModuleReturnValue(targets, targets)
@permittedKwargs({'sources', 'c_template', 'h_template', 'install_header', 'install_dir',
'comments', 'identifier_prefix', 'symbol_prefix', 'eprod', 'vprod',
'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'depends'})
def mkenums(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Mkenums requires one positional argument.')
basename = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
cmd = []
known_kwargs = ['comments', 'eprod', 'fhead', 'fprod', 'ftail',
'identifier_prefix', 'symbol_prefix', 'template',
'vhead', 'vprod', 'vtail']
known_custom_target_kwargs = ['install_dir', 'build_always',
'depends', 'depend_files']
c_template = h_template = None
install_header = False
for arg, value in kwargs.items():
if arg == 'sources':
raise AssertionError("sources should've already been handled")
elif arg == 'c_template':
c_template = value
if isinstance(c_template, mesonlib.File):
c_template = c_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'c_template and template keyword '
'arguments at the same time.')
elif arg == 'h_template':
h_template = value
if isinstance(h_template, mesonlib.File):
h_template = h_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'h_template and template keyword '
'arguments at the same time.')
elif arg == 'install_header':
install_header = value
elif arg in known_kwargs:
cmd += ['--' + arg.replace('_', '-'), value]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Mkenums does not take a %s keyword argument.' % (arg, ))
cmd = [self.interpreter.find_program_impl(['glib-mkenums', 'mkenums'])] + cmd
custom_kwargs = {}
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
targets = []
if h_template is not None:
h_output = os.path.basename(os.path.splitext(h_template)[0])
# We always set template as the first element in the source array
# so --template consumes it.
h_cmd = cmd + ['--template', '@INPUT@']
h_sources = [h_template] + sources
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
h_target = self._make_mkenum_custom_target(state, h_sources,
h_output, h_cmd,
custom_kwargs)
targets.append(h_target)
if c_template is not None:
c_output = os.path.basename(os.path.splitext(c_template)[0])
# We always set template as the first element in the source array
# so --template consumes it.
c_cmd = cmd + ['--template', '@INPUT@']
c_sources = [c_template] + sources
# Never install the C file. Complain on bug tracker if you need it.
custom_kwargs['install'] = False
if h_template is not None:
if 'depends' in custom_kwargs:
custom_kwargs['depends'] += [h_target]
else:
custom_kwargs['depends'] = h_target
c_target = self._make_mkenum_custom_target(state, c_sources,
c_output, c_cmd,
custom_kwargs)
targets.insert(0, c_target)
if c_template is None and h_template is None:
generic_cmd = cmd + ['@INPUT@']
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
target = self._make_mkenum_custom_target(state, sources, basename,
generic_cmd, custom_kwargs)
return ModuleReturnValue(target, [target])
elif len(targets) == 1:
return ModuleReturnValue(targets[0], [targets[0]])
else:
return ModuleReturnValue(targets, targets)
@FeatureNew('gnome.mkenums_simple', '0.42.0')
def mkenums_simple(self, state, args, kwargs):
hdr_filename = args[0] + '.h'
body_filename = args[0] + '.c'
# not really needed, just for sanity checking
forbidden_kwargs = ['c_template', 'h_template', 'eprod', 'fhead',
'fprod', 'ftail', 'vhead', 'vtail', 'comments']
for arg in forbidden_kwargs:
if arg in kwargs:
raise MesonException('mkenums_simple() does not take a %s keyword argument' % (arg, ))
# kwargs to pass as-is from mkenums_simple() to mkenums()
shared_kwargs = ['sources', 'install_header', 'install_dir',
'identifier_prefix', 'symbol_prefix']
mkenums_kwargs = {}
for arg in shared_kwargs:
if arg in kwargs:
mkenums_kwargs[arg] = kwargs[arg]
# .c file generation
c_file_kwargs = copy.deepcopy(mkenums_kwargs)
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs['sources']
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
# The `install_header` argument will be used by mkenums() when
# not using template files, so we need to forcibly unset it
# when generating the C source file, otherwise we will end up
# installing it
c_file_kwargs['install_header'] = False
header_prefix = kwargs.get('header_prefix', '')
decl_decorator = kwargs.get('decorator', '')
func_prefix = kwargs.get('function_prefix', '')
body_prefix = kwargs.get('body_prefix', '')
# Maybe we should write our own template files into the build dir
# instead, but that seems like much more work, nice as it would be.
fhead = ''
if body_prefix != '':
fhead += '%s\n' % body_prefix
fhead += '#include "%s"\n' % hdr_filename
for hdr in sources:
fhead += '#include "%s"\n' % os.path.basename(str(hdr))
fhead += '''
#define C_ENUM(v) ((gint) v)
#define C_FLAGS(v) ((guint) v)
'''
c_file_kwargs['fhead'] = fhead
c_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
c_file_kwargs['vhead'] = '''
GType
%s@enum_name@_get_type (void)
{
static volatile gsize gtype_id = 0;
static const G@Type@Value values[] = {''' % func_prefix
c_file_kwargs['vprod'] = ' { C_@TYPE@(@VALUENAME@), "@VALUENAME@", "@valuenick@" },'
c_file_kwargs['vtail'] = ''' { 0, NULL, NULL }
};
if (g_once_init_enter (>ype_id)) {
GType new_type = g_@type@_register_static ("@EnumName@", values);
g_once_init_leave (>ype_id, new_type);
}
return (GType) gtype_id;
}'''
rv = self.mkenums(state, [body_filename], c_file_kwargs)
c_file = rv.return_value
# .h file generation
h_file_kwargs = copy.deepcopy(mkenums_kwargs)
h_file_kwargs['fhead'] = '''#pragma once
#include <glib-object.h>
{}
G_BEGIN_DECLS
'''.format(header_prefix)
h_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
h_file_kwargs['vhead'] = '''
{}
GType {}@enum_name@_get_type (void);
#define @ENUMPREFIX@_TYPE_@ENUMSHORT@ ({}@enum_name@_get_type())'''.format(decl_decorator, func_prefix, func_prefix)
h_file_kwargs['ftail'] = '''
G_END_DECLS'''
rv = self.mkenums(state, [hdr_filename], h_file_kwargs)
h_file = rv.return_value
return ModuleReturnValue([c_file, h_file], [c_file, h_file])
@staticmethod
def _make_mkenum_custom_target(state, sources, output, cmd, kwargs):
custom_kwargs = {
'input': sources,
'output': output,
'capture': True,
'command': cmd
}
custom_kwargs.update(kwargs)
return build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs,
# https://github.com/mesonbuild/meson/issues/973
absolute_paths=True)
@permittedKwargs({'sources', 'prefix', 'install_header', 'install_dir', 'stdinc',
'nostdinc', 'internal', 'skip_source', 'valist_marshallers',
'extra_args'})
def genmarshal(self, state, args, kwargs):
if len(args) != 1:
raise MesonException(
'Genmarshal requires one positional argument.')
output = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
new_genmarshal = mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.3')
cmd = [self.interpreter.find_program_impl('glib-genmarshal')]
known_kwargs = ['internal', 'nostdinc', 'skip_source', 'stdinc',
'valist_marshallers', 'extra_args']
known_custom_target_kwargs = ['build_always', 'depends',
'depend_files', 'install_dir',
'install_header']
for arg, value in kwargs.items():
if arg == 'prefix':
cmd += ['--prefix', value]
elif arg == 'extra_args':
if new_genmarshal:
cmd += mesonlib.stringlistify(value)
else:
mlog.warning('The current version of GLib does not support extra arguments \n'
'for glib-genmarshal. You need at least GLib 2.53.3. See ',
mlog.bold('https://github.com/mesonbuild/meson/pull/2049'))
elif arg in known_kwargs and value:
cmd += ['--' + arg.replace('_', '-')]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Genmarshal does not take a %s keyword argument.' % (
arg, ))
install_header = kwargs.pop('install_header', False)
install_dir = kwargs.pop('install_dir', None)
custom_kwargs = {
'input': sources,
}
# https://github.com/GNOME/glib/commit/0fbc98097fac4d3e647684f344e508abae109fdf
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.51.0'):
cmd += ['--output', '@OUTPUT@']
else:
custom_kwargs['capture'] = True
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
header_file = output + '.h'
custom_kwargs['command'] = cmd + ['--body', '@INPUT@']
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.4'):
# Silence any warnings about missing prototypes
custom_kwargs['command'] += ['--include-header', header_file]
custom_kwargs['output'] = output + '.c'
body = build.CustomTarget(output + '_c', state.subdir, state.subproject, custom_kwargs)
custom_kwargs['install'] = install_header
if install_dir is not None:
custom_kwargs['install_dir'] = install_dir
if new_genmarshal:
cmd += ['--pragma-once']
custom_kwargs['command'] = cmd + ['--header', '@INPUT@']
custom_kwargs['output'] = header_file
header = build.CustomTarget(output + '_h', state.subdir, state.subproject, custom_kwargs)
rv = [body, header]
return ModuleReturnValue(rv, rv)
@staticmethod
def _vapi_args_to_command(prefix, variable, kwargs, accept_vapi=False):
arg_list = mesonlib.extract_as_list(kwargs, variable)
ret = []
for arg in arg_list:
if not isinstance(arg, str):
types = 'strings' + ' or InternalDependencys' if accept_vapi else ''
raise MesonException('All {} must be {}'.format(variable, types))
ret.append(prefix + arg)
return ret
def _extract_vapi_packages(self, state, kwargs):
'''
Packages are special because we need to:
- Get a list of packages for the .deps file
- Get a list of depends for any VapiTargets
- Get package name from VapiTargets
- Add include dirs for any VapiTargets
'''
arg_list = kwargs.get('packages')
if not arg_list:
return [], [], [], []
arg_list = mesonlib.listify(arg_list)
vapi_depends = []
vapi_packages = []
vapi_includes = []
ret = []
remaining_args = []
for arg in arg_list:
if hasattr(arg, 'held_object'):
arg = arg.held_object
if isinstance(arg, InternalDependency):
targets = [t for t in arg.sources if isinstance(t, VapiTarget)]
for target in targets:
srcdir = os.path.join(state.environment.get_source_dir(),
target.get_subdir())
outdir = os.path.join(state.environment.get_build_dir(),
target.get_subdir())
outfile = target.get_outputs()[0][:-5] # Strip .vapi
ret.append('--vapidir=' + outdir)
ret.append('--girdir=' + outdir)
ret.append('--pkg=' + outfile)
vapi_depends.append(target)
vapi_packages.append(outfile)
vapi_includes.append(srcdir)
else:
vapi_packages.append(arg)
remaining_args.append(arg)
kwargs['packages'] = remaining_args
vapi_args = ret + self._vapi_args_to_command('--pkg=', 'packages', kwargs, accept_vapi=True)
return vapi_args, vapi_depends, vapi_packages, vapi_includes
def _generate_deps(self, state, library, packages, install_dir):
outdir = state.environment.scratch_dir
fname = os.path.join(outdir, library + '.deps')
with open(fname, 'w') as ofile:
for package in packages:
ofile.write(package + '\n')
return build.Data(mesonlib.File(True, outdir, fname), install_dir)
def _get_vapi_link_with(self, target):
link_with = []
for dep in target.get_target_dependencies():
if isinstance(dep, build.SharedLibrary):
link_with.append(dep)
elif isinstance(dep, GirTarget):
link_with += self._get_vapi_link_with(dep)
return link_with
@permittedKwargs({'sources', 'packages', 'metadata_dirs', 'gir_dirs',
'vapi_dirs', 'install', 'install_dir'})
def generate_vapi(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('The library name is required')
if not isinstance(args[0], str):
raise MesonException('The first argument must be the name of the library')
created_values = []
library = args[0]
build_dir = os.path.join(state.environment.get_build_dir(), state.subdir)
source_dir = os.path.join(state.environment.get_source_dir(), state.subdir)
pkg_cmd, vapi_depends, vapi_packages, vapi_includes = self._extract_vapi_packages(state, kwargs)
if 'VAPIGEN' in os.environ:
cmd = [self.interpreter.find_program_impl(os.environ['VAPIGEN'])]
else:
cmd = [self.interpreter.find_program_impl('vapigen')]
cmd += ['--quiet', '--library=' + library, '--directory=' + build_dir]
cmd += self._vapi_args_to_command('--vapidir=', 'vapi_dirs', kwargs)
cmd += self._vapi_args_to_command('--metadatadir=', 'metadata_dirs', kwargs)
cmd += self._vapi_args_to_command('--girdir=', 'gir_dirs', kwargs)
cmd += pkg_cmd
cmd += ['--metadatadir=' + source_dir]
if 'sources' not in kwargs:
raise MesonException('sources are required to generate the vapi file')
inputs = mesonlib.extract_as_list(kwargs, 'sources')
link_with = []
for i in inputs:
if isinstance(i, str):
cmd.append(os.path.join(source_dir, i))
elif hasattr(i, 'held_object') and isinstance(i.held_object, GirTarget):
link_with += self._get_vapi_link_with(i.held_object)
subdir = os.path.join(state.environment.get_build_dir(),
i.held_object.get_subdir())
gir_file = os.path.join(subdir, i.held_object.get_outputs()[0])
cmd.append(gir_file)
else:
raise MesonException('Input must be a str or GirTarget')
vapi_output = library + '.vapi'
custom_kwargs = {
'command': cmd,
'input': inputs,
'output': vapi_output,
'depends': vapi_depends,
}
install_dir = kwargs.get('install_dir',
os.path.join(state.environment.coredata.get_builtin_option('datadir'),
'vala', 'vapi'))
if kwargs.get('install'):
custom_kwargs['install'] = kwargs['install']
custom_kwargs['install_dir'] = install_dir
# We shouldn't need this locally but we install it
deps_target = self._generate_deps(state, library, vapi_packages, install_dir)
created_values.append(deps_target)
vapi_target = VapiTarget(vapi_output, state.subdir, state.subproject, custom_kwargs)
# So to try our best to get this to just work we need:
# - link with with the correct library
# - include the vapi and dependent vapi files in sources
# - add relevant directories to include dirs
incs = [build.IncludeDirs(state.subdir, ['.'] + vapi_includes, False)]
sources = [vapi_target] + vapi_depends
rv = InternalDependency(None, incs, [], [], link_with, [], sources, [])
created_values.append(rv)
return ModuleReturnValue(rv, created_values)
def initialize(*args, **kwargs):
return GnomeModule(*args, **kwargs)
| 47.049671 | 133 | 0.578008 |
import os
import copy
import subprocess
from .. import build
from .. import mlog
from .. import mesonlib
from .. import compilers
from .. import interpreter
from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget
from . import get_include_args
from . import ExtensionModule
from . import ModuleReturnValue
from ..mesonlib import MesonException, OrderedSet, Popen_safe, extract_as_list
from ..dependencies import Dependency, PkgConfigDependency, InternalDependency
from ..interpreterbase import noKwargs, permittedKwargs, FeatureNew, FeatureNewKwargs
gresource_dep_needed_version = '>= 2.51.1'
native_glib_version = None
girwarning_printed = False
gdbuswarning_printed = False
gresource_warning_printed = False
_gir_has_extra_lib_arg = None
def gir_has_extra_lib_arg(intr_obj):
global _gir_has_extra_lib_arg
if _gir_has_extra_lib_arg is not None:
return _gir_has_extra_lib_arg
_gir_has_extra_lib_arg = False
try:
g_ir_scanner = intr_obj.find_program_impl('g-ir-scanner').get_command()
opts = Popen_safe(g_ir_scanner + ['--help'], stderr=subprocess.STDOUT)[1]
_gir_has_extra_lib_arg = '--extra-library' in opts
except (MesonException, FileNotFoundError, subprocess.CalledProcessError):
pass
return _gir_has_extra_lib_arg
class GnomeModule(ExtensionModule):
gir_dep = None
@staticmethod
def _get_native_glib_version(state):
global native_glib_version
if native_glib_version is None:
glib_dep = PkgConfigDependency('glib-2.0', state.environment,
{'native': True, 'required': False})
if glib_dep.found():
native_glib_version = glib_dep.get_version()
else:
mlog.warning('Could not detect glib version, assuming 2.54. '
'You may get build errors if your glib is older.')
native_glib_version = '2.54'
return native_glib_version
def __print_gresources_warning(self, state):
global gresource_warning_printed
if not gresource_warning_printed:
if not mesonlib.version_compare(self._get_native_glib_version(state), gresource_dep_needed_version):
mlog.warning('GLib compiled dependencies do not work reliably with \n'
'the current version of GLib. See the following upstream issue:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=774368'))
gresource_warning_printed = True
return []
@staticmethod
def _print_gdbus_warning():
global gdbuswarning_printed
if not gdbuswarning_printed:
mlog.warning('Code generated with gdbus_codegen() requires the root directory be added to\n'
' include_directories of targets with GLib < 2.51.3:',
mlog.bold('https://github.com/mesonbuild/meson/issues/1387'))
gdbuswarning_printed = True
@FeatureNewKwargs('gnome.compile_resources', '0.37.0', ['gresource_bundle', 'export', 'install_header'])
@permittedKwargs({'source_dir', 'c_name', 'dependencies', 'export', 'gresource_bundle', 'install_header',
'install', 'install_dir', 'extra_args', 'build_by_default'})
def compile_resources(self, state, args, kwargs):
self.__print_gresources_warning(state)
glib_version = self._get_native_glib_version(state)
cmd = ['glib-compile-resources', '@INPUT@']
source_dirs, dependencies = mesonlib.extract_as_list(kwargs, 'source_dir', 'dependencies', pop=True)
if len(args) < 2:
raise MesonException('Not enough arguments; the name of the resource '
'and the path to the XML file are required')
for (ii, dep) in enumerate(dependencies):
if hasattr(dep, 'held_object'):
dependencies[ii] = dep = dep.held_object
if not isinstance(dep, (mesonlib.File, build.CustomTarget, build.CustomTargetIndex)):
m = 'Unexpected dependency type {!r} for gnome.compile_resources() ' \
'"dependencies" argument.\nPlease pass the return value of ' \
'custom_target() or configure_file()'
raise MesonException(m.format(dep))
if isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
m = 'The "dependencies" argument of gnome.compile_resources() can not\n' \
'be used with the current version of glib-compile-resources due to\n' \
'<https://bugzilla.gnome.org/show_bug.cgi?id=774368>'
raise MesonException(m)
ifile = args[1]
if isinstance(ifile, mesonlib.File):
if ifile.is_built:
ifile = os.path.join(state.environment.get_build_dir(), ifile.subdir, ifile.fname)
else:
ifile = os.path.join(ifile.subdir, ifile.fname)
elif isinstance(ifile, str):
ifile = os.path.join(state.subdir, ifile)
elif isinstance(ifile, (interpreter.CustomTargetHolder,
interpreter.CustomTargetIndexHolder,
interpreter.GeneratedObjectsHolder)):
m = 'Resource xml files generated at build-time cannot be used ' \
'with gnome.compile_resources() because we need to scan ' \
'the xml for dependencies. Use configure_file() instead ' \
'to generate it at configure-time.'
raise MesonException(m)
else:
raise MesonException('Invalid file argument: {!r}'.format(ifile))
depend_files, depends, subdirs = self._get_gresource_dependencies(
state, ifile, source_dirs, dependencies)
source_dirs = [os.path.join(state.build_to_src, state.subdir, d) for d in source_dirs]
source_dirs.append(os.path.join(state.build_to_src, state.subdir))
source_dirs += subdirs
for source_dir in OrderedSet(source_dirs):
cmd += ['--sourcedir', source_dir]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
export = kwargs.pop('export', False)
if not export:
cmd += ['--internal']
cmd += ['--generate', '--target', '@OUTPUT@']
cmd += mesonlib.stringlistify(kwargs.pop('extra_args', []))
gresource = kwargs.pop('gresource_bundle', False)
if gresource:
output = args[0] + '.gresource'
name = args[0] + '_gresource'
else:
output = args[0] + '.c'
name = args[0] + '_c'
if kwargs.get('install', False) and not gresource:
raise MesonException('The install kwarg only applies to gresource bundles, see install_header')
install_header = kwargs.pop('install_header', False)
if install_header and gresource:
raise MesonException('The install_header kwarg does not apply to gresource bundles')
if install_header and not export:
raise MesonException('GResource header is installed yet export is not enabled')
kwargs['input'] = args[1]
kwargs['output'] = output
kwargs['depends'] = depends
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
kwargs['depend_files'] = depend_files
kwargs['command'] = cmd
else:
depfile = kwargs['output'] + '.d'
kwargs['depfile'] = depfile
kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
target_c = GResourceTarget(name, state.subdir, state.subproject, kwargs)
if gresource:
return ModuleReturnValue(target_c, [target_c])
h_kwargs = {
'command': cmd,
'input': args[1],
'output': args[0] + '.h',
'depends': depends
}
if 'build_by_default' in kwargs:
h_kwargs['build_by_default'] = kwargs['build_by_default']
if install_header:
h_kwargs['install'] = install_header
h_kwargs['install_dir'] = kwargs.get('install_dir',
state.environment.coredata.get_builtin_option('includedir'))
target_h = GResourceHeaderTarget(args[0] + '_h', state.subdir, state.subproject, h_kwargs)
rv = [target_c, target_h]
return ModuleReturnValue(rv, rv)
def _get_gresource_dependencies(self, state, input_file, source_dirs, dependencies):
cmd = ['glib-compile-resources',
input_file,
'--generate-dependencies']
# Prefer generated files over source files
cmd += ['--sourcedir', state.subdir] # Current build dir
for source_dir in source_dirs:
cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)]
pc, stdout, stderr = Popen_safe(cmd, cwd=state.environment.get_source_dir())
if pc.returncode != 0:
m = 'glib-compile-resources failed to get dependencies for {}:\n{}'
mlog.warning(m.format(cmd[1], stderr))
raise subprocess.CalledProcessError(pc.returncode, cmd)
dep_files = stdout.split('\n')[:-1]
depends = []
subdirs = []
for resfile in dep_files[:]:
resbasename = os.path.basename(resfile)
for dep in dependencies:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, mesonlib.File):
if dep.fname != resbasename:
continue
dep_files.remove(resfile)
dep_files.append(dep)
subdirs.append(dep.subdir)
break
elif isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
fname = None
outputs = {(o, os.path.basename(o)) for o in dep.get_outputs()}
for o, baseo in outputs:
if baseo == resbasename:
fname = o
break
if fname is not None:
dep_files.remove(resfile)
depends.append(dep)
subdirs.append(dep.get_subdir())
break
else:
# In generate-dependencies mode, glib-compile-resources doesn't raise
# handle resource files that get generated as part of the build, as
# follows.
#
# If there are multiple generated resource files with the same basename
# then this code will get confused.
try:
f = mesonlib.File.from_source_file(state.environment.get_source_dir(),
".", resfile)
except MesonException:
raise MesonException(
'Resource "%s" listed in "%s" was not found. If this is a '
'generated file, pass the target that generates it to '
'gnome.compile_resources() using the "dependencies" '
'keyword argument.' % (resfile, input_file))
dep_files.remove(resfile)
dep_files.append(f)
return dep_files, depends, subdirs
def _get_link_args(self, state, lib, depends, include_rpath=False,
use_gir_args=False):
link_command = []
# Construct link args
if isinstance(lib, build.SharedLibrary):
libdir = os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(lib))
link_command.append('-L' + libdir)
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(lib):
d = os.path.join(state.environment.get_build_dir(), d)
link_command.append('-L' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + libdir)
depends.append(lib)
if gir_has_extra_lib_arg(self.interpreter) and use_gir_args:
link_command.append('--extra-library=' + lib.name)
else:
link_command.append('-l' + lib.name)
return link_command
def _get_dependencies_flags(self, deps, state, depends, include_rpath=False,
use_gir_args=False, separate_nodedup=False):
cflags = OrderedSet()
internal_ldflags = OrderedSet()
external_ldflags = OrderedSet()
# External linker flags that can't be de-duped reliably because they
external_ldflags_nodedup = []
gi_includes = OrderedSet()
deps = mesonlib.listify(deps, unholder=True)
for dep in deps:
if isinstance(dep, InternalDependency):
cflags.update(dep.get_compile_args())
cflags.update(get_include_args(dep.include_directories))
for lib in dep.libraries:
if hasattr(lib, 'held_object'):
lib = lib.held_object
if isinstance(lib, build.SharedLibrary):
internal_ldflags.update(self._get_link_args(state, lib, depends, include_rpath))
libdepflags = self._get_dependencies_flags(lib.get_external_deps(), state, depends, include_rpath,
use_gir_args, True)
cflags.update(libdepflags[0])
internal_ldflags.update(libdepflags[1])
external_ldflags.update(libdepflags[2])
external_ldflags_nodedup += libdepflags[3]
gi_includes.update(libdepflags[4])
extdepflags = self._get_dependencies_flags(dep.ext_deps, state, depends, include_rpath,
use_gir_args, True)
cflags.update(extdepflags[0])
internal_ldflags.update(extdepflags[1])
external_ldflags.update(extdepflags[2])
external_ldflags_nodedup += extdepflags[3]
gi_includes.update(extdepflags[4])
for source in dep.sources:
if hasattr(source, 'held_object'):
source = source.held_object
if isinstance(source, GirTarget):
gi_includes.update([os.path.join(state.environment.get_build_dir(),
source.get_subdir())])
elif isinstance(dep, Dependency):
cflags.update(dep.get_compile_args())
ldflags = iter(dep.get_link_args(raw=True))
for lib in ldflags:
if (os.path.isabs(lib) and
getattr(dep, 'is_libtool', False)):
lib_dir = os.path.dirname(lib)
external_ldflags.update(["-L%s" % lib_dir])
if include_rpath:
external_ldflags.update(['-Wl,-rpath {}'.format(lib_dir)])
libname = os.path.basename(lib)
if libname.startswith("lib"):
libname = libname[3:]
libname = libname.split(".so")[0]
lib = "-l%s" % libname
if lib.startswith("-W"):
continue
# to preserve the order of arguments
if lib == '-framework':
external_ldflags_nodedup += [lib, next(ldflags)]
else:
external_ldflags.update([lib])
if isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir:
gi_includes.update([girdir])
elif isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
cflags.update(get_include_args(dep.get_include_dirs()))
depends.append(dep)
else:
mlog.log('dependency {!r} not handled to build gir files'.format(dep))
continue
if gir_has_extra_lib_arg(self.interpreter) and use_gir_args:
def fix_ldflags(ldflags):
fixed_ldflags = OrderedSet()
for ldflag in ldflags:
if ldflag.startswith("-l"):
ldflag = ldflag.replace('-l', '--extra-library=', 1)
fixed_ldflags.add(ldflag)
return fixed_ldflags
internal_ldflags = fix_ldflags(internal_ldflags)
external_ldflags = fix_ldflags(external_ldflags)
if not separate_nodedup:
external_ldflags.update(external_ldflags_nodedup)
return cflags, internal_ldflags, external_ldflags, gi_includes
else:
return cflags, internal_ldflags, external_ldflags, external_ldflags_nodedup, gi_includes
def _unwrap_gir_target(self, girtarget):
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary)):
raise MesonException('Gir target must be an executable or shared library')
return girtarget
def _get_gir_dep(self, state):
try:
gir_dep = self.gir_dep or PkgConfigDependency('gobject-introspection-1.0',
state.environment,
{'native': True})
pkgargs = gir_dep.get_compile_args()
except Exception:
raise MesonException('gobject-introspection dependency was not found, gir cannot be generated.')
return gir_dep, pkgargs
def _scan_header(self, kwargs):
ret = []
header = kwargs.pop('header', None)
if header:
if not isinstance(header, str):
raise MesonException('header must be a string')
ret = ['--c-include=' + header]
return ret
def _scan_extra_args(self, kwargs):
return mesonlib.stringlistify(kwargs.pop('extra_args', []))
def _scan_link_withs(self, state, depends, kwargs):
ret = []
if 'link_with' in kwargs:
link_with = mesonlib.extract_as_list(kwargs, 'link_with', pop = True)
for link in link_with:
ret += self._get_link_args(state, link.held_object, depends,
use_gir_args=True)
return ret
# May mutate depends and gir_inc_dirs
def _scan_include(self, state, depends, gir_inc_dirs, kwargs):
ret = []
if 'includes' in kwargs:
includes = mesonlib.extract_as_list(kwargs, 'includes', pop = True)
for inc in includes:
if hasattr(inc, 'held_object'):
inc = inc.held_object
if isinstance(inc, str):
ret += ['--include=%s' % (inc, )]
elif isinstance(inc, GirTarget):
gir_inc_dirs += [
os.path.join(state.environment.get_build_dir(),
inc.get_subdir()),
]
ret += [
"--include-uninstalled=%s" % (os.path.join(inc.get_subdir(), inc.get_basename()), )
]
depends += [inc]
else:
raise MesonException(
'Gir includes must be str, GirTarget, or list of them')
return ret
def _scan_symbol_prefix(self, kwargs):
ret = []
if 'symbol_prefix' in kwargs:
sym_prefixes = mesonlib.stringlistify(kwargs.pop('symbol_prefix', []))
ret += ['--symbol-prefix=%s' % sym_prefix for sym_prefix in sym_prefixes]
return ret
def _scan_identifier_prefix(self, kwargs):
ret = []
if 'identifier_prefix' in kwargs:
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
ret += ['--identifier-prefix=%s' % identifier_prefix]
return ret
def _scan_export_packages(self, kwargs):
ret = []
if 'export_packages' in kwargs:
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
ret += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
ret += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
return ret
def _scan_inc_dirs(self, kwargs):
ret = mesonlib.extract_as_list(kwargs, 'include_directories', pop = True)
for incd in ret:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
return ret
def _scan_langs(self, state, langs):
ret = []
for lang in langs:
if state.environment.is_cross_build():
link_args = state.environment.cross_info.config["properties"].get(lang + '_link_args', "")
else:
link_args = state.environment.coredata.get_external_link_args(lang)
for link_arg in link_args:
if link_arg.startswith('-L'):
ret.append(link_arg)
return ret
def _scan_gir_targets(self, state, girtargets):
ret = []
for girtarget in girtargets:
if isinstance(girtarget, build.Executable):
ret += ['--program', girtarget]
elif isinstance(girtarget, build.SharedLibrary):
libname = girtarget.get_basename()
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(girtarget):
d = os.path.join(state.environment.get_build_dir(), d)
ret.append('-L' + d)
ret += ['--library', libname]
# need to put our output directory first as we need to use the
# generated libraries instead of any possibly installed system/prefix
# ones.
ret += ["-L@PRIVATE_OUTDIR_ABS_%s@" % girtarget.get_id()]
return ret
def _get_girtargets_langs_compilers(self, girtargets):
ret = []
for girtarget in girtargets:
for lang, compiler in girtarget.compilers.items():
# XXX: Can you use g-i with any other language?
if lang in ('c', 'cpp', 'objc', 'objcpp', 'd'):
ret.append((lang, compiler))
break
return ret
def _get_gir_targets_deps(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_all_link_deps()
ret += girtarget.get_external_deps()
return ret
def _get_gir_targets_inc_dirs(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_include_dirs()
return ret
def _get_langs_compilers_flags(self, state, langs_compilers):
cflags = []
internal_ldflags = []
external_ldflags = []
for lang, compiler in langs_compilers:
if state.global_args.get(lang):
cflags += state.global_args[lang]
if state.project_args.get(lang):
cflags += state.project_args[lang]
if 'b_sanitize' in compiler.base_options:
sanitize = state.environment.coredata.base_options['b_sanitize'].value
cflags += compilers.sanitizer_compile_args(sanitize)
if 'address' in sanitize.split(','):
internal_ldflags += ['-lasan'] # This must be first in ldflags
# FIXME: Linking directly to libasan is not recommended but g-ir-scanner
# does not understand -f LDFLAGS. https://bugzilla.gnome.org/show_bug.cgi?id=783892
# ldflags += compilers.sanitizer_link_args(sanitize)
return cflags, internal_ldflags, external_ldflags
def _make_gir_filelist(self, state, srcdir, ns, nsversion, girtargets, libsources):
gir_filelist_dir = state.backend.get_target_private_dir_abs(girtargets[0])
if not os.path.isdir(gir_filelist_dir):
os.mkdir(gir_filelist_dir)
gir_filelist_filename = os.path.join(gir_filelist_dir, '%s_%s_gir_filelist' % (ns, nsversion))
with open(gir_filelist_filename, 'w', encoding='utf-8') as gir_filelist:
for s in libsources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
for custom_output in s.get_outputs():
gir_filelist.write(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
custom_output) + '\n')
elif isinstance(s, mesonlib.File):
gir_filelist.write(s.rel_to_builddir(state.build_to_src) + '\n')
elif isinstance(s, build.GeneratedList):
for gen_src in s.get_outputs():
gir_filelist.write(os.path.join(srcdir, gen_src) + '\n')
else:
gir_filelist.write(os.path.join(srcdir, s) + '\n')
return gir_filelist_filename
def _make_gir_target(self, state, girfile, scan_command, depends, kwargs):
scankwargs = {'output': girfile,
'command': scan_command,
'depends': depends}
if 'install' in kwargs:
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = kwargs.get('install_dir_gir',
os.path.join(state.environment.get_datadir(), 'gir-1.0'))
if 'build_by_default' in kwargs:
scankwargs['build_by_default'] = kwargs['build_by_default']
return GirTarget(girfile, state.subdir, state.subproject, scankwargs)
def _make_typelib_target(self, state, typelib_output, typelib_cmd, kwargs):
typelib_kwargs = {
'output': typelib_output,
'command': typelib_cmd,
}
if 'install' in kwargs:
typelib_kwargs['install'] = kwargs['install']
typelib_kwargs['install_dir'] = kwargs.get('install_dir_typelib',
os.path.join(state.environment.get_libdir(), 'girepository-1.0'))
if 'build_by_default' in kwargs:
typelib_kwargs['build_by_default'] = kwargs['build_by_default']
return TypelibTarget(typelib_output, state.subdir, state.subproject, typelib_kwargs)
# May mutate depends
def _gather_typelib_includes_and_update_depends(self, state, deps, depends):
# Need to recursively add deps on GirTarget sources from our
# dependencies and also find the include directories needed for the
# typelib generation custom target below.
typelib_includes = []
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
# Add a dependency on each GirTarget listed in dependencies and add
# the directory where it will be generated to the typelib includes
if isinstance(dep, InternalDependency):
for source in dep.sources:
if hasattr(source, 'held_object'):
source = source.held_object
if isinstance(source, GirTarget) and source not in depends:
depends.append(source)
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
# Do the same, but for dependencies of dependencies. These are
# stored in the list of generated sources for each link dep (from
# girtarget.get_all_link_deps() above).
# FIXME: Store this in the original form from declare_dependency()
# so it can be used here directly.
elif isinstance(dep, build.SharedLibrary):
for source in dep.generated:
if isinstance(source, GirTarget):
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
elif isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir and girdir not in typelib_includes:
typelib_includes.append(girdir)
return typelib_includes
def _get_external_args_for_langs(self, state, langs):
ret = []
for lang in langs:
if state.environment.is_cross_build():
ret += state.environment.cross_info.config["properties"].get(lang + '_args', "")
else:
ret += state.environment.coredata.get_external_args(lang)
return ret
@staticmethod
def _get_scanner_cflags(cflags):
for f in cflags:
if f.startswith(('-D', '-U', '-I')):
yield f
@staticmethod
def _get_scanner_ldflags(ldflags):
for f in ldflags:
if f.startswith(('-L', '-l', '--extra-library')):
yield f
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'sources', 'nsversion', 'namespace', 'symbol_prefix', 'identifier_prefix',
'export_packages', 'includes', 'dependencies', 'link_with', 'include_directories',
'install', 'install_dir_gir', 'install_dir_typelib', 'extra_args',
'packages', 'header', 'build_by_default'})
def generate_gir(self, state, args, kwargs):
if not args:
raise MesonException('generate_gir takes at least one argument')
if kwargs.get('install_dir'):
raise MesonException('install_dir is not supported with generate_gir(), see "install_dir_gir" and "install_dir_typelib"')
giscanner = self.interpreter.find_program_impl('g-ir-scanner')
gicompiler = self.interpreter.find_program_impl('g-ir-compiler')
girtargets = [self._unwrap_gir_target(arg) for arg in args]
if len(girtargets) > 1 and any([isinstance(el, build.Executable) for el in girtargets]):
raise MesonException('generate_gir only accepts a single argument when one of the arguments is an executable')
self.gir_dep, pkgargs = self._get_gir_dep(state)
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = mesonlib.extract_as_list(kwargs, 'sources', pop=True)
girfile = '%s-%s.gir' % (ns, nsversion)
srcdir = os.path.join(state.environment.get_source_dir(), state.subdir)
builddir = os.path.join(state.environment.get_build_dir(), state.subdir)
depends = [] + girtargets
gir_inc_dirs = []
langs_compilers = self._get_girtargets_langs_compilers(girtargets)
cflags, internal_ldflags, external_ldflags = self._get_langs_compilers_flags(state, langs_compilers)
deps = self._get_gir_targets_deps(girtargets)
deps += extract_as_list(kwargs, 'dependencies', pop=True, unholder=True)
typelib_includes = self._gather_typelib_includes_and_update_depends(state, deps, depends)
# ldflags will be misinterpreted by gir scanner (showing
# spurious dependencies) but building GStreamer fails if they
# are not used here.
dep_cflags, dep_internal_ldflags, dep_external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, use_gir_args=True)
cflags += list(self._get_scanner_cflags(dep_cflags))
cflags += list(self._get_scanner_cflags(self._get_external_args_for_langs(state, [lc[0] for lc in langs_compilers])))
internal_ldflags += list(self._get_scanner_ldflags(dep_internal_ldflags))
external_ldflags += list(self._get_scanner_ldflags(dep_external_ldflags))
girtargets_inc_dirs = self._get_gir_targets_inc_dirs(girtargets)
inc_dirs = self._scan_inc_dirs(kwargs)
scan_command = [giscanner]
scan_command += pkgargs
scan_command += ['--no-libtool']
scan_command += ['--namespace=' + ns, '--nsversion=' + nsversion]
scan_command += ['--warn-all']
scan_command += ['--output', '@OUTPUT@']
scan_command += self._scan_header(kwargs)
scan_command += self._scan_extra_args(kwargs)
scan_command += ['-I' + srcdir, '-I' + builddir]
scan_command += get_include_args(girtargets_inc_dirs)
scan_command += ['--filelist=' + self._make_gir_filelist(state, srcdir, ns, nsversion, girtargets, libsources)]
scan_command += self._scan_link_withs(state, depends, kwargs)
scan_command += self._scan_include(state, depends, gir_inc_dirs, kwargs)
scan_command += self._scan_symbol_prefix(kwargs)
scan_command += self._scan_identifier_prefix(kwargs)
scan_command += self._scan_export_packages(kwargs)
scan_command += ['--cflags-begin']
scan_command += cflags
scan_command += ['--cflags-end']
scan_command += get_include_args(inc_dirs)
scan_command += get_include_args(list(gi_includes) + gir_inc_dirs + inc_dirs, prefix='--add-include-path=')
scan_command += list(internal_ldflags)
scan_command += self._scan_gir_targets(state, girtargets)
scan_command += self._scan_langs(state, [lc[0] for lc in langs_compilers])
scan_command += list(external_ldflags)
scan_target = self._make_gir_target(state, girfile, scan_command, depends, kwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = [gicompiler, scan_target, '--output', '@OUTPUT@']
typelib_cmd += get_include_args(gir_inc_dirs, prefix='--includedir=')
for incdir in typelib_includes:
typelib_cmd += ["--includedir=" + incdir]
typelib_target = self._make_typelib_target(state, typelib_output, typelib_cmd, kwargs)
rv = [scan_target, typelib_target]
return ModuleReturnValue(rv, rv)
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'build_by_default', 'depend_files'})
def compile_schemas(self, state, args, kwargs):
if args:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = [self.interpreter.find_program_impl('glib-compile-schemas')]
cmd += ['--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir.replace('/', '_')
target_g = build.CustomTarget(targetname, state.subdir, state.subproject, kwargs)
return ModuleReturnValue(target_g, [target_g])
@permittedKwargs({'sources', 'media', 'symlink_media', 'languages'})
def yelp(self, state, args, kwargs):
if len(args) < 1:
raise MesonException('Yelp requires a project id')
project_id = args[0]
sources = mesonlib.stringlistify(kwargs.pop('sources', []))
if not sources:
if len(args) > 1:
sources = mesonlib.stringlistify(args[1:])
if not sources:
raise MesonException('Yelp requires a list of sources')
source_str = '@@'.join(sources)
langs = mesonlib.stringlistify(kwargs.pop('languages', []))
if langs:
mlog.deprecation('''The "languages" argument of gnome.yelp() is deprecated.
Use a LINGUAS file in the sources directory instead.
This will become a hard error in the future.''')
media = mesonlib.stringlistify(kwargs.pop('media', []))
symlinks = kwargs.pop('symlink_media', True)
if not isinstance(symlinks, bool):
raise MesonException('symlink_media must be a boolean')
if kwargs:
raise MesonException('Unknown arguments passed: {}'.format(', '.join(kwargs.keys())))
script = state.environment.get_build_command()
args = ['--internal',
'yelphelper',
'install',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--installdir=' + os.path.join(state.environment.get_datadir(), 'help'),
'--sources=' + source_str]
if symlinks:
args.append('--symlinks=true')
if media:
args.append('--media=' + '@@'.join(media))
if langs:
args.append('--langs=' + '@@'.join(langs))
inscript = build.RunScript(script, args)
potargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'pot',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
]
pottarget = build.RunTarget('help-' + project_id + '-pot', potargs[0],
potargs[1:], [], state.subdir, state.subproject)
poargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'update-po',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
'--langs=' + '@@'.join(langs),
]
potarget = build.RunTarget('help-' + project_id + '-update-po', poargs[0],
poargs[1:], [], state.subdir, state.subproject)
rv = [inscript, pottarget, potarget]
return ModuleReturnValue(None, rv)
@FeatureNewKwargs('gnome.gtkdoc', '0.48.0', ['c_args'])
@FeatureNewKwargs('gnome.gtkdoc', '0.48.0', ['module_version'])
@FeatureNewKwargs('gnome.gtkdoc', '0.37.0', ['namespace', 'mode'])
@permittedKwargs({'main_xml', 'main_sgml', 'src_dir', 'dependencies', 'install',
'install_dir', 'scan_args', 'scanobjs_args', 'gobject_typesfile',
'fixxref_args', 'html_args', 'html_assets', 'content_files',
'mkdb_args', 'ignore_headers', 'include_directories',
'namespace', 'mode', 'expand_content_files', 'module_version'})
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if 'src_dir' not in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
moduleversion = kwargs.get('module_version', '')
if not isinstance(moduleversion, str):
raise MesonException('Module version keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
targetname = modulename + ('-' + moduleversion if moduleversion else '') + '-doc'
command = state.environment.get_build_command()
namespace = kwargs.get('namespace', '')
mode = kwargs.get('mode', 'auto')
VALID_MODES = ('xml', 'sgml', 'none', 'auto')
if mode not in VALID_MODES:
raise MesonException('gtkdoc: Mode {} is not a valid mode: {}'.format(mode, VALID_MODES))
src_dirs = mesonlib.extract_as_list(kwargs, 'src_dir')
header_dirs = []
for src_dir in src_dirs:
if hasattr(src_dir, 'held_object'):
src_dir = src_dir.held_object
if not isinstance(src_dir, build.IncludeDirs):
raise MesonException('Invalid keyword argument for src_dir.')
for inc_dir in src_dir.get_incdirs():
header_dirs.append(os.path.join(state.environment.get_source_dir(),
src_dir.get_curdir(), inc_dir))
header_dirs.append(os.path.join(state.environment.get_build_dir(),
src_dir.get_curdir(), inc_dir))
else:
header_dirs.append(src_dir)
args = ['--internal', 'gtkdoc',
'--sourcedir=' + state.environment.get_source_dir(),
'--builddir=' + state.environment.get_build_dir(),
'--subdir=' + state.subdir,
'--headerdirs=' + '@@'.join(header_dirs),
'--mainfile=' + main_file,
'--modulename=' + modulename,
'--moduleversion=' + moduleversion,
'--mode=' + mode]
if namespace:
args.append('--namespace=' + namespace)
args += self._unpack_args('--htmlargs=', 'html_args', kwargs)
args += self._unpack_args('--scanargs=', 'scan_args', kwargs)
args += self._unpack_args('--scanobjsargs=', 'scanobjs_args', kwargs)
args += self._unpack_args('--gobjects-types-file=', 'gobject_typesfile', kwargs, state)
args += self._unpack_args('--fixxrefargs=', 'fixxref_args', kwargs)
args += self._unpack_args('--mkdbargs=', 'mkdb_args', kwargs)
args += self._unpack_args('--html-assets=', 'html_assets', kwargs, state)
depends = []
content_files = []
for s in mesonlib.extract_as_list(kwargs, 'content_files'):
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
depends.append(s)
for o in s.get_outputs():
content_files.append(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
o))
elif isinstance(s, mesonlib.File):
content_files.append(s.absolute_path(state.environment.get_source_dir(),
state.environment.get_build_dir()))
elif isinstance(s, build.GeneratedList):
depends.append(s)
for gen_src in s.get_outputs():
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
gen_src))
elif isinstance(s, str):
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
s))
else:
raise MesonException(
'Invalid object type: {!r}'.format(s.__class__.__name__))
args += ['--content-files=' + '@@'.join(content_files)]
args += self._unpack_args('--expand-content-files=', 'expand_content_files', kwargs, state)
args += self._unpack_args('--ignore-headers=', 'ignore_headers', kwargs)
args += self._unpack_args('--installdir=', 'install_dir', kwargs)
args += self._get_build_args(kwargs, state, depends)
res = [build.RunTarget(targetname, command[0], command[1:] + args, depends, state.subdir, state.subproject)]
if kwargs.get('install', True):
res.append(build.RunScript(command, args))
return ModuleReturnValue(None, res)
def _get_build_args(self, kwargs, state, depends):
args = []
deps = extract_as_list(kwargs, 'dependencies', unholder=True)
cflags = OrderedSet()
cflags.update(mesonlib.stringlistify(kwargs.pop('c_args', [])))
deps_cflags, internal_ldflags, external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, include_rpath=True)
inc_dirs = mesonlib.extract_as_list(kwargs, 'include_directories')
for incd in inc_dirs:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
cflags.update(deps_cflags)
cflags.update(get_include_args(inc_dirs))
ldflags = OrderedSet()
ldflags.update(internal_ldflags)
ldflags.update(external_ldflags)
if state.environment.is_cross_build():
cflags.update(state.environment.cross_info.config["properties"].get('c_args', ""))
ldflags.update(state.environment.cross_info.config["properties"].get('c_link_args', ""))
compiler = state.environment.coredata.cross_compilers.get('c')
else:
cflags.update(state.environment.coredata.get_external_args('c'))
ldflags.update(state.environment.coredata.get_external_link_args('c'))
compiler = state.environment.coredata.compilers.get('c')
compiler_flags = self._get_langs_compilers_flags(state, [('c', compiler)])
cflags.update(compiler_flags[0])
ldflags.update(compiler_flags[1])
ldflags.update(compiler_flags[2])
if compiler:
args += ['--cc=%s' % ' '.join(compiler.get_exelist())]
args += ['--ld=%s' % ' '.join(compiler.get_linker_exelist())]
if cflags:
args += ['--cflags=%s' % ' '.join(cflags)]
if ldflags:
args += ['--ldflags=%s' % ' '.join(ldflags)]
return args
@noKwargs
def gtkdoc_html_dir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Must have exactly one argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Argument must be a string')
return ModuleReturnValue(os.path.join('share/gtk-doc/html', modulename), [])
@staticmethod
def _unpack_args(arg, kwarg_name, kwargs, expend_file_state=None):
if kwarg_name not in kwargs:
return []
new_args = mesonlib.extract_as_list(kwargs, kwarg_name)
args = []
for i in new_args:
if expend_file_state and isinstance(i, mesonlib.File):
i = i.absolute_path(expend_file_state.environment.get_source_dir(), expend_file_state.environment.get_build_dir())
elif expend_file_state and isinstance(i, str):
i = os.path.join(expend_file_state.environment.get_source_dir(), expend_file_state.subdir, i)
elif not isinstance(i, str):
raise MesonException(kwarg_name + ' values must be strings.')
args.append(i)
if args:
return [arg + '@@'.join(args)]
return []
def _get_autocleanup_args(self, kwargs, glib_version):
if not mesonlib.version_compare(glib_version, '>= 2.49.1'):
# Warn if requested, silently disable if not
if 'autocleanup' in kwargs:
mlog.warning('Glib version ({}) is too old to support the \'autocleanup\' '
'kwarg, need 2.49.1 or newer'.format(glib_version))
return []
autocleanup = kwargs.pop('autocleanup', 'all')
values = ('none', 'objects', 'all')
if autocleanup not in values:
raise MesonException('gdbus_codegen does not support {!r} as an autocleanup value, '
'must be one of: {!r}'.format(autocleanup, ', '.join(values)))
return ['--c-generate-autocleanup', autocleanup]
@FeatureNewKwargs('build target', '0.46.0', ['install_header', 'install_dir', 'sources'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.47.0', ['extra_args', 'autocleanup'])
@permittedKwargs({'interface_prefix', 'namespace', 'extra_args', 'autocleanup', 'object_manager', 'build_by_default',
'annotations', 'docbook', 'install_header', 'install_dir', 'sources'})
def gdbus_codegen(self, state, args, kwargs):
if len(args) not in (1, 2):
raise MesonException('gdbus_codegen takes at most two arguments, name and xml file.')
namebase = args[0]
xml_files = args[1:]
cmd = [self.interpreter.find_program_impl('gdbus-codegen')]
extra_args = mesonlib.stringlistify(kwargs.pop('extra_args', []))
cmd += extra_args
# Autocleanup supported?
glib_version = self._get_native_glib_version(state)
cmd += self._get_autocleanup_args(kwargs, glib_version)
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
if kwargs.get('object_manager', False):
cmd += ['--c-generate-object-manager']
if 'sources' in kwargs:
xml_files += mesonlib.listify(kwargs.pop('sources'))
build_by_default = kwargs.get('build_by_default', False)
# Annotations are a bit ugly in that they are a list of lists of strings...
annotations = kwargs.pop('annotations', [])
if not isinstance(annotations, list):
raise MesonException('annotations takes a list')
if annotations and isinstance(annotations, list) and not isinstance(annotations[0], list):
annotations = [annotations]
for annotation in annotations:
if len(annotation) != 3 or not all(isinstance(i, str) for i in annotation):
raise MesonException('Annotations must be made up of 3 strings for ELEMENT, KEY, and VALUE')
cmd += ['--annotate'] + annotation
targets = []
install_header = kwargs.get('install_header', False)
install_dir = kwargs.get('install_dir', state.environment.coredata.get_builtin_option('includedir'))
output = namebase + '.c'
# Added in https://gitlab.gnome.org/GNOME/glib/commit/e4d68c7b3e8b01ab1a4231bf6da21d045cb5a816 (2.55.2)
# Fixed in https://gitlab.gnome.org/GNOME/glib/commit/cd1f82d8fc741a2203582c12cc21b4dacf7e1872 (2.56.2)
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--body', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default
}
else:
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
cmd += ['--generate-docbook', docbook]
# https://git.gnome.org/browse/glib/commit/?id=ee09bb704fe9ccb24d92dd86696a0e6bb8f0dc1a
if mesonlib.version_compare(glib_version, '>= 2.51.3'):
cmd += ['--output-directory', '@OUTDIR@', '--generate-c-code', namebase, '@INPUT@']
else:
self._print_gdbus_warning()
cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@']
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default
}
cfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(cfile_custom_target)
output = namebase + '.h'
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--header', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir
}
else:
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir,
'depends': cfile_custom_target
}
hfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(hfile_custom_target)
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
docbook_cmd = cmd + ['--output-directory', '@OUTDIR@', '--generate-docbook', docbook, '@INPUT@']
# The docbook output is always ${docbook}-${name_of_xml_file}
output = namebase + '-docbook'
outputs = []
for f in xml_files:
outputs.append('{}-{}'.format(docbook, os.path.basename(str(f))))
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': docbook_cmd,
'build_by_default': build_by_default
}
else:
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': cmd,
'build_by_default': build_by_default,
'depends': cfile_custom_target
}
docbook_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(docbook_custom_target)
return ModuleReturnValue(targets, targets)
@permittedKwargs({'sources', 'c_template', 'h_template', 'install_header', 'install_dir',
'comments', 'identifier_prefix', 'symbol_prefix', 'eprod', 'vprod',
'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'depends'})
def mkenums(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Mkenums requires one positional argument.')
basename = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
cmd = []
known_kwargs = ['comments', 'eprod', 'fhead', 'fprod', 'ftail',
'identifier_prefix', 'symbol_prefix', 'template',
'vhead', 'vprod', 'vtail']
known_custom_target_kwargs = ['install_dir', 'build_always',
'depends', 'depend_files']
c_template = h_template = None
install_header = False
for arg, value in kwargs.items():
if arg == 'sources':
raise AssertionError("sources should've already been handled")
elif arg == 'c_template':
c_template = value
if isinstance(c_template, mesonlib.File):
c_template = c_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'c_template and template keyword '
'arguments at the same time.')
elif arg == 'h_template':
h_template = value
if isinstance(h_template, mesonlib.File):
h_template = h_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'h_template and template keyword '
'arguments at the same time.')
elif arg == 'install_header':
install_header = value
elif arg in known_kwargs:
cmd += ['--' + arg.replace('_', '-'), value]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Mkenums does not take a %s keyword argument.' % (arg, ))
cmd = [self.interpreter.find_program_impl(['glib-mkenums', 'mkenums'])] + cmd
custom_kwargs = {}
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
targets = []
if h_template is not None:
h_output = os.path.basename(os.path.splitext(h_template)[0])
h_cmd = cmd + ['--template', '@INPUT@']
h_sources = [h_template] + sources
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
h_target = self._make_mkenum_custom_target(state, h_sources,
h_output, h_cmd,
custom_kwargs)
targets.append(h_target)
if c_template is not None:
c_output = os.path.basename(os.path.splitext(c_template)[0])
c_cmd = cmd + ['--template', '@INPUT@']
c_sources = [c_template] + sources
custom_kwargs['install'] = False
if h_template is not None:
if 'depends' in custom_kwargs:
custom_kwargs['depends'] += [h_target]
else:
custom_kwargs['depends'] = h_target
c_target = self._make_mkenum_custom_target(state, c_sources,
c_output, c_cmd,
custom_kwargs)
targets.insert(0, c_target)
if c_template is None and h_template is None:
generic_cmd = cmd + ['@INPUT@']
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
target = self._make_mkenum_custom_target(state, sources, basename,
generic_cmd, custom_kwargs)
return ModuleReturnValue(target, [target])
elif len(targets) == 1:
return ModuleReturnValue(targets[0], [targets[0]])
else:
return ModuleReturnValue(targets, targets)
@FeatureNew('gnome.mkenums_simple', '0.42.0')
def mkenums_simple(self, state, args, kwargs):
hdr_filename = args[0] + '.h'
body_filename = args[0] + '.c'
forbidden_kwargs = ['c_template', 'h_template', 'eprod', 'fhead',
'fprod', 'ftail', 'vhead', 'vtail', 'comments']
for arg in forbidden_kwargs:
if arg in kwargs:
raise MesonException('mkenums_simple() does not take a %s keyword argument' % (arg, ))
shared_kwargs = ['sources', 'install_header', 'install_dir',
'identifier_prefix', 'symbol_prefix']
mkenums_kwargs = {}
for arg in shared_kwargs:
if arg in kwargs:
mkenums_kwargs[arg] = kwargs[arg]
c_file_kwargs = copy.deepcopy(mkenums_kwargs)
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs['sources']
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
c_file_kwargs['install_header'] = False
header_prefix = kwargs.get('header_prefix', '')
decl_decorator = kwargs.get('decorator', '')
func_prefix = kwargs.get('function_prefix', '')
body_prefix = kwargs.get('body_prefix', '')
fhead = ''
if body_prefix != '':
fhead += '%s\n' % body_prefix
fhead += '#include "%s"\n' % hdr_filename
for hdr in sources:
fhead += '#include "%s"\n' % os.path.basename(str(hdr))
fhead += '''
#define C_ENUM(v) ((gint) v)
#define C_FLAGS(v) ((guint) v)
'''
c_file_kwargs['fhead'] = fhead
c_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
c_file_kwargs['vhead'] = '''
GType
%s@enum_name@_get_type (void)
{
static volatile gsize gtype_id = 0;
static const G@Type@Value values[] = {''' % func_prefix
c_file_kwargs['vprod'] = ' { C_@TYPE@(@VALUENAME@), "@VALUENAME@", "@valuenick@" },'
c_file_kwargs['vtail'] = ''' { 0, NULL, NULL }
};
if (g_once_init_enter (>ype_id)) {
GType new_type = g_@type@_register_static ("@EnumName@", values);
g_once_init_leave (>ype_id, new_type);
}
return (GType) gtype_id;
}'''
rv = self.mkenums(state, [body_filename], c_file_kwargs)
c_file = rv.return_value
h_file_kwargs = copy.deepcopy(mkenums_kwargs)
h_file_kwargs['fhead'] = '''#pragma once
#include <glib-object.h>
{}
G_BEGIN_DECLS
'''.format(header_prefix)
h_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
h_file_kwargs['vhead'] = '''
{}
GType {}@enum_name@_get_type (void);
#define @ENUMPREFIX@_TYPE_@ENUMSHORT@ ({}@enum_name@_get_type())'''.format(decl_decorator, func_prefix, func_prefix)
h_file_kwargs['ftail'] = '''
G_END_DECLS'''
rv = self.mkenums(state, [hdr_filename], h_file_kwargs)
h_file = rv.return_value
return ModuleReturnValue([c_file, h_file], [c_file, h_file])
@staticmethod
def _make_mkenum_custom_target(state, sources, output, cmd, kwargs):
custom_kwargs = {
'input': sources,
'output': output,
'capture': True,
'command': cmd
}
custom_kwargs.update(kwargs)
return build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs,
absolute_paths=True)
@permittedKwargs({'sources', 'prefix', 'install_header', 'install_dir', 'stdinc',
'nostdinc', 'internal', 'skip_source', 'valist_marshallers',
'extra_args'})
def genmarshal(self, state, args, kwargs):
if len(args) != 1:
raise MesonException(
'Genmarshal requires one positional argument.')
output = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
new_genmarshal = mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.3')
cmd = [self.interpreter.find_program_impl('glib-genmarshal')]
known_kwargs = ['internal', 'nostdinc', 'skip_source', 'stdinc',
'valist_marshallers', 'extra_args']
known_custom_target_kwargs = ['build_always', 'depends',
'depend_files', 'install_dir',
'install_header']
for arg, value in kwargs.items():
if arg == 'prefix':
cmd += ['--prefix', value]
elif arg == 'extra_args':
if new_genmarshal:
cmd += mesonlib.stringlistify(value)
else:
mlog.warning('The current version of GLib does not support extra arguments \n'
'for glib-genmarshal. You need at least GLib 2.53.3. See ',
mlog.bold('https://github.com/mesonbuild/meson/pull/2049'))
elif arg in known_kwargs and value:
cmd += ['--' + arg.replace('_', '-')]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Genmarshal does not take a %s keyword argument.' % (
arg, ))
install_header = kwargs.pop('install_header', False)
install_dir = kwargs.pop('install_dir', None)
custom_kwargs = {
'input': sources,
}
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.51.0'):
cmd += ['--output', '@OUTPUT@']
else:
custom_kwargs['capture'] = True
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
header_file = output + '.h'
custom_kwargs['command'] = cmd + ['--body', '@INPUT@']
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.4'):
custom_kwargs['command'] += ['--include-header', header_file]
custom_kwargs['output'] = output + '.c'
body = build.CustomTarget(output + '_c', state.subdir, state.subproject, custom_kwargs)
custom_kwargs['install'] = install_header
if install_dir is not None:
custom_kwargs['install_dir'] = install_dir
if new_genmarshal:
cmd += ['--pragma-once']
custom_kwargs['command'] = cmd + ['--header', '@INPUT@']
custom_kwargs['output'] = header_file
header = build.CustomTarget(output + '_h', state.subdir, state.subproject, custom_kwargs)
rv = [body, header]
return ModuleReturnValue(rv, rv)
@staticmethod
def _vapi_args_to_command(prefix, variable, kwargs, accept_vapi=False):
arg_list = mesonlib.extract_as_list(kwargs, variable)
ret = []
for arg in arg_list:
if not isinstance(arg, str):
types = 'strings' + ' or InternalDependencys' if accept_vapi else ''
raise MesonException('All {} must be {}'.format(variable, types))
ret.append(prefix + arg)
return ret
def _extract_vapi_packages(self, state, kwargs):
arg_list = kwargs.get('packages')
if not arg_list:
return [], [], [], []
arg_list = mesonlib.listify(arg_list)
vapi_depends = []
vapi_packages = []
vapi_includes = []
ret = []
remaining_args = []
for arg in arg_list:
if hasattr(arg, 'held_object'):
arg = arg.held_object
if isinstance(arg, InternalDependency):
targets = [t for t in arg.sources if isinstance(t, VapiTarget)]
for target in targets:
srcdir = os.path.join(state.environment.get_source_dir(),
target.get_subdir())
outdir = os.path.join(state.environment.get_build_dir(),
target.get_subdir())
outfile = target.get_outputs()[0][:-5]
ret.append('--vapidir=' + outdir)
ret.append('--girdir=' + outdir)
ret.append('--pkg=' + outfile)
vapi_depends.append(target)
vapi_packages.append(outfile)
vapi_includes.append(srcdir)
else:
vapi_packages.append(arg)
remaining_args.append(arg)
kwargs['packages'] = remaining_args
vapi_args = ret + self._vapi_args_to_command('--pkg=', 'packages', kwargs, accept_vapi=True)
return vapi_args, vapi_depends, vapi_packages, vapi_includes
def _generate_deps(self, state, library, packages, install_dir):
outdir = state.environment.scratch_dir
fname = os.path.join(outdir, library + '.deps')
with open(fname, 'w') as ofile:
for package in packages:
ofile.write(package + '\n')
return build.Data(mesonlib.File(True, outdir, fname), install_dir)
def _get_vapi_link_with(self, target):
link_with = []
for dep in target.get_target_dependencies():
if isinstance(dep, build.SharedLibrary):
link_with.append(dep)
elif isinstance(dep, GirTarget):
link_with += self._get_vapi_link_with(dep)
return link_with
@permittedKwargs({'sources', 'packages', 'metadata_dirs', 'gir_dirs',
'vapi_dirs', 'install', 'install_dir'})
def generate_vapi(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('The library name is required')
if not isinstance(args[0], str):
raise MesonException('The first argument must be the name of the library')
created_values = []
library = args[0]
build_dir = os.path.join(state.environment.get_build_dir(), state.subdir)
source_dir = os.path.join(state.environment.get_source_dir(), state.subdir)
pkg_cmd, vapi_depends, vapi_packages, vapi_includes = self._extract_vapi_packages(state, kwargs)
if 'VAPIGEN' in os.environ:
cmd = [self.interpreter.find_program_impl(os.environ['VAPIGEN'])]
else:
cmd = [self.interpreter.find_program_impl('vapigen')]
cmd += ['--quiet', '--library=' + library, '--directory=' + build_dir]
cmd += self._vapi_args_to_command('--vapidir=', 'vapi_dirs', kwargs)
cmd += self._vapi_args_to_command('--metadatadir=', 'metadata_dirs', kwargs)
cmd += self._vapi_args_to_command('--girdir=', 'gir_dirs', kwargs)
cmd += pkg_cmd
cmd += ['--metadatadir=' + source_dir]
if 'sources' not in kwargs:
raise MesonException('sources are required to generate the vapi file')
inputs = mesonlib.extract_as_list(kwargs, 'sources')
link_with = []
for i in inputs:
if isinstance(i, str):
cmd.append(os.path.join(source_dir, i))
elif hasattr(i, 'held_object') and isinstance(i.held_object, GirTarget):
link_with += self._get_vapi_link_with(i.held_object)
subdir = os.path.join(state.environment.get_build_dir(),
i.held_object.get_subdir())
gir_file = os.path.join(subdir, i.held_object.get_outputs()[0])
cmd.append(gir_file)
else:
raise MesonException('Input must be a str or GirTarget')
vapi_output = library + '.vapi'
custom_kwargs = {
'command': cmd,
'input': inputs,
'output': vapi_output,
'depends': vapi_depends,
}
install_dir = kwargs.get('install_dir',
os.path.join(state.environment.coredata.get_builtin_option('datadir'),
'vala', 'vapi'))
if kwargs.get('install'):
custom_kwargs['install'] = kwargs['install']
custom_kwargs['install_dir'] = install_dir
deps_target = self._generate_deps(state, library, vapi_packages, install_dir)
created_values.append(deps_target)
vapi_target = VapiTarget(vapi_output, state.subdir, state.subproject, custom_kwargs)
# So to try our best to get this to just work we need:
# - link with with the correct library
# - include the vapi and dependent vapi files in sources
# - add relevant directories to include dirs
incs = [build.IncludeDirs(state.subdir, ['.'] + vapi_includes, False)]
sources = [vapi_target] + vapi_depends
rv = InternalDependency(None, incs, [], [], link_with, [], sources, [])
created_values.append(rv)
return ModuleReturnValue(rv, created_values)
def initialize(*args, **kwargs):
return GnomeModule(*args, **kwargs)
| true | true |
1c2f0461dae88461ed47418747b251fbcdf88be2 | 9,502 | py | Python | src/python/turicreate/toolkits/drawing_classifier/_tf_drawing_classifier.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 1 | 2021-04-23T10:51:03.000Z | 2021-04-23T10:51:03.000Z | src/python/turicreate/toolkits/drawing_classifier/_tf_drawing_classifier.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | null | null | null | src/python/turicreate/toolkits/drawing_classifier/_tf_drawing_classifier.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import numpy as _np
from .._tf_model import TensorFlowModel
import turicreate.toolkits._tf_utils as _utils
import tensorflow.compat.v1 as _tf
# This toolkit is compatible with TensorFlow V2 behavior.
# However, until all toolkits are compatible, we must call `disable_v2_behavior()`.
_tf.disable_v2_behavior()
class DrawingClassifierTensorFlowModel(TensorFlowModel):
def __init__(self, net_params, batch_size, num_classes):
"""
Defines the TensorFlow model, loss, optimisation and accuracy. Then
loads the weights into the model.
"""
self.gpu_policy = _utils.TensorFlowGPUPolicy()
self.gpu_policy.start()
for key in net_params.keys():
net_params[key] = _utils.convert_shared_float_array_to_numpy(
net_params[key]
)
self.dc_graph = _tf.Graph()
self.num_classes = num_classes
self.batch_size = batch_size
self.sess = _tf.Session(graph=self.dc_graph)
with self.dc_graph.as_default():
self.init_drawing_classifier_graph(net_params)
def init_drawing_classifier_graph(self, net_params):
self.input = _tf.placeholder(_tf.float32, [self.batch_size, 28, 28, 1])
self.weights = _tf.placeholder(_tf.float32, [self.batch_size, 1])
self.labels = _tf.placeholder(_tf.int64, [self.batch_size, 1])
# One hot encoding target
reshaped_labels = _tf.reshape(self.labels, [self.batch_size])
one_hot_labels = _tf.one_hot(reshaped_labels, depth=self.num_classes, axis=-1)
# Reshaping weights
reshaped_weights = _tf.reshape(self.weights, [self.batch_size])
self.one_hot_labels = _tf.placeholder(_tf.int32, [None, self.num_classes])
# Weights
weights = {
name: _tf.Variable(
_utils.convert_conv2d_coreml_to_tf(net_params[name]), name=name
)
for name in (
"drawing_conv0_weight",
"drawing_conv1_weight",
"drawing_conv2_weight",
)
}
weights["drawing_dense1_weight"] = _tf.Variable(
_utils.convert_dense_coreml_to_tf(net_params["drawing_dense1_weight"]),
name="drawing_dense1_weight",
)
"""
To make output of CoreML pool3 (NCHW) compatible with TF (NHWC).
Decompose FC weights to NCHW. Transpose to NHWC. Reshape back to FC.
"""
coreml_128_576 = net_params["drawing_dense0_weight"]
coreml_128_576 = _np.reshape(coreml_128_576, (128, 64, 3, 3))
coreml_128_576 = _np.transpose(coreml_128_576, (0, 2, 3, 1))
coreml_128_576 = _np.reshape(coreml_128_576, (128, 576))
weights["drawing_dense0_weight"] = _tf.Variable(
_np.transpose(coreml_128_576, (1, 0)), name="drawing_dense0_weight"
)
# Biases
biases = {
name: _tf.Variable(net_params[name], name=name)
for name in (
"drawing_conv0_bias",
"drawing_conv1_bias",
"drawing_conv2_bias",
"drawing_dense0_bias",
"drawing_dense1_bias",
)
}
conv_1 = _tf.nn.conv2d(
self.input, weights["drawing_conv0_weight"], strides=1, padding="SAME"
)
conv_1 = _tf.nn.bias_add(conv_1, biases["drawing_conv0_bias"])
relu_1 = _tf.nn.relu(conv_1)
pool_1 = _tf.nn.max_pool2d(
relu_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID"
)
conv_2 = _tf.nn.conv2d(
pool_1, weights["drawing_conv1_weight"], strides=1, padding="SAME"
)
conv_2 = _tf.nn.bias_add(conv_2, biases["drawing_conv1_bias"])
relu_2 = _tf.nn.relu(conv_2)
pool_2 = _tf.nn.max_pool2d(
relu_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID"
)
conv_3 = _tf.nn.conv2d(
pool_2, weights["drawing_conv2_weight"], strides=1, padding="SAME"
)
conv_3 = _tf.nn.bias_add(conv_3, biases["drawing_conv2_bias"])
relu_3 = _tf.nn.relu(conv_3)
pool_3 = _tf.nn.max_pool2d(
relu_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID"
)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = _tf.reshape(pool_3, (-1, 576))
fc1 = _tf.nn.xw_plus_b(
fc1,
weights=weights["drawing_dense0_weight"],
biases=biases["drawing_dense0_bias"],
)
fc1 = _tf.nn.relu(fc1)
out = _tf.nn.xw_plus_b(
fc1,
weights=weights["drawing_dense1_weight"],
biases=biases["drawing_dense1_bias"],
)
self.predictions = _tf.nn.softmax(out)
# Loss
self.cost = _tf.losses.softmax_cross_entropy(
logits=out,
onehot_labels=one_hot_labels,
weights=reshaped_weights,
reduction=_tf.losses.Reduction.NONE,
)
# Optimizer
self.optimizer = _tf.train.AdamOptimizer(learning_rate=0.001).minimize(
self.cost
)
self.sess = _tf.Session()
self.sess.run(_tf.global_variables_initializer())
def __del__(self):
self.sess.close()
self.gpu_policy.stop()
def train(self, feed_dict):
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
_, final_train_loss, final_train_output = self.sess.run(
[self.optimizer, self.cost, self.predictions],
feed_dict={
self.input: feed_dict["input"],
self.labels: feed_dict["labels"],
self.weights: feed_dict["weights"],
},
)
result = {
"loss": _np.array(final_train_loss),
"output": _np.array(final_train_output),
}
return result
def predict(self, feed_dict):
is_train = "labels" in feed_dict
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
feed_dict_for_session = {self.input: feed_dict["input"]}
if is_train:
feed_dict_for_session[self.labels] = feed_dict["labels"]
feed_dict_for_session[self.weights] = feed_dict["weights"]
pred_probs, loss = self.sess.run(
[self.predictions, self.cost], feed_dict=feed_dict_for_session
)
result = {"loss": _np.array(loss), "output": _np.array(pred_probs)}
else:
pred_probs = self.sess.run(
[self.predictions], feed_dict=feed_dict_for_session
)
result = {"output": _np.array(pred_probs)}
return result
def export_weights(self):
"""
Retrieve weights from the TF model, convert to the format Core ML
expects and store in a dictionary.
Returns
-------
net_params : dict
Dictionary of weights, where the key is the name of the
layer (e.g. `drawing_conv0_weight`) and the value is the
respective weight of type `numpy.ndarray`.
"""
net_params = {}
with self.dc_graph.as_default():
layer_names = _tf.trainable_variables()
layer_weights = self.sess.run(layer_names)
for var, val in zip(layer_names, layer_weights):
if "bias" in var.name:
net_params.update({var.name.replace(":0", ""): val})
else:
if "dense" in var.name:
if "drawing_dense0_weight" in var.name:
"""
To make output of TF pool3 (NHWC) compatible with CoreML (NCHW).
Decompose FC weights to NHWC. Transpose to NCHW. Reshape back to FC.
"""
tf_576_128 = val
tf_576_128 = _np.reshape(tf_576_128, (3, 3, 64, 128))
tf_576_128 = _np.transpose(tf_576_128, (2, 0, 1, 3))
tf_576_128 = _np.reshape(tf_576_128, (576, 128))
net_params.update(
{
var.name.replace(":0", ""): _np.transpose(
tf_576_128, (1, 0)
)
}
)
else:
net_params.update(
{var.name.replace(":0", ""): val.transpose(1, 0)}
)
else:
# np.transpose won't change the underlying memory layout
# but in turicreate we will force it.
net_params.update(
{
var.name.replace(
":0", ""
): _utils.convert_conv2d_tf_to_coreml(val)
}
)
return net_params
| 36.40613 | 93 | 0.561461 |
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import numpy as _np
from .._tf_model import TensorFlowModel
import turicreate.toolkits._tf_utils as _utils
import tensorflow.compat.v1 as _tf
_tf.disable_v2_behavior()
class DrawingClassifierTensorFlowModel(TensorFlowModel):
def __init__(self, net_params, batch_size, num_classes):
self.gpu_policy = _utils.TensorFlowGPUPolicy()
self.gpu_policy.start()
for key in net_params.keys():
net_params[key] = _utils.convert_shared_float_array_to_numpy(
net_params[key]
)
self.dc_graph = _tf.Graph()
self.num_classes = num_classes
self.batch_size = batch_size
self.sess = _tf.Session(graph=self.dc_graph)
with self.dc_graph.as_default():
self.init_drawing_classifier_graph(net_params)
def init_drawing_classifier_graph(self, net_params):
self.input = _tf.placeholder(_tf.float32, [self.batch_size, 28, 28, 1])
self.weights = _tf.placeholder(_tf.float32, [self.batch_size, 1])
self.labels = _tf.placeholder(_tf.int64, [self.batch_size, 1])
reshaped_labels = _tf.reshape(self.labels, [self.batch_size])
one_hot_labels = _tf.one_hot(reshaped_labels, depth=self.num_classes, axis=-1)
reshaped_weights = _tf.reshape(self.weights, [self.batch_size])
self.one_hot_labels = _tf.placeholder(_tf.int32, [None, self.num_classes])
weights = {
name: _tf.Variable(
_utils.convert_conv2d_coreml_to_tf(net_params[name]), name=name
)
for name in (
"drawing_conv0_weight",
"drawing_conv1_weight",
"drawing_conv2_weight",
)
}
weights["drawing_dense1_weight"] = _tf.Variable(
_utils.convert_dense_coreml_to_tf(net_params["drawing_dense1_weight"]),
name="drawing_dense1_weight",
)
coreml_128_576 = net_params["drawing_dense0_weight"]
coreml_128_576 = _np.reshape(coreml_128_576, (128, 64, 3, 3))
coreml_128_576 = _np.transpose(coreml_128_576, (0, 2, 3, 1))
coreml_128_576 = _np.reshape(coreml_128_576, (128, 576))
weights["drawing_dense0_weight"] = _tf.Variable(
_np.transpose(coreml_128_576, (1, 0)), name="drawing_dense0_weight"
)
biases = {
name: _tf.Variable(net_params[name], name=name)
for name in (
"drawing_conv0_bias",
"drawing_conv1_bias",
"drawing_conv2_bias",
"drawing_dense0_bias",
"drawing_dense1_bias",
)
}
conv_1 = _tf.nn.conv2d(
self.input, weights["drawing_conv0_weight"], strides=1, padding="SAME"
)
conv_1 = _tf.nn.bias_add(conv_1, biases["drawing_conv0_bias"])
relu_1 = _tf.nn.relu(conv_1)
pool_1 = _tf.nn.max_pool2d(
relu_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID"
)
conv_2 = _tf.nn.conv2d(
pool_1, weights["drawing_conv1_weight"], strides=1, padding="SAME"
)
conv_2 = _tf.nn.bias_add(conv_2, biases["drawing_conv1_bias"])
relu_2 = _tf.nn.relu(conv_2)
pool_2 = _tf.nn.max_pool2d(
relu_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID"
)
conv_3 = _tf.nn.conv2d(
pool_2, weights["drawing_conv2_weight"], strides=1, padding="SAME"
)
conv_3 = _tf.nn.bias_add(conv_3, biases["drawing_conv2_bias"])
relu_3 = _tf.nn.relu(conv_3)
pool_3 = _tf.nn.max_pool2d(
relu_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID"
)
fc1 = _tf.reshape(pool_3, (-1, 576))
fc1 = _tf.nn.xw_plus_b(
fc1,
weights=weights["drawing_dense0_weight"],
biases=biases["drawing_dense0_bias"],
)
fc1 = _tf.nn.relu(fc1)
out = _tf.nn.xw_plus_b(
fc1,
weights=weights["drawing_dense1_weight"],
biases=biases["drawing_dense1_bias"],
)
self.predictions = _tf.nn.softmax(out)
self.cost = _tf.losses.softmax_cross_entropy(
logits=out,
onehot_labels=one_hot_labels,
weights=reshaped_weights,
reduction=_tf.losses.Reduction.NONE,
)
self.optimizer = _tf.train.AdamOptimizer(learning_rate=0.001).minimize(
self.cost
)
self.sess = _tf.Session()
self.sess.run(_tf.global_variables_initializer())
def __del__(self):
self.sess.close()
self.gpu_policy.stop()
def train(self, feed_dict):
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
_, final_train_loss, final_train_output = self.sess.run(
[self.optimizer, self.cost, self.predictions],
feed_dict={
self.input: feed_dict["input"],
self.labels: feed_dict["labels"],
self.weights: feed_dict["weights"],
},
)
result = {
"loss": _np.array(final_train_loss),
"output": _np.array(final_train_output),
}
return result
def predict(self, feed_dict):
is_train = "labels" in feed_dict
for key in feed_dict.keys():
feed_dict[key] = _utils.convert_shared_float_array_to_numpy(feed_dict[key])
feed_dict_for_session = {self.input: feed_dict["input"]}
if is_train:
feed_dict_for_session[self.labels] = feed_dict["labels"]
feed_dict_for_session[self.weights] = feed_dict["weights"]
pred_probs, loss = self.sess.run(
[self.predictions, self.cost], feed_dict=feed_dict_for_session
)
result = {"loss": _np.array(loss), "output": _np.array(pred_probs)}
else:
pred_probs = self.sess.run(
[self.predictions], feed_dict=feed_dict_for_session
)
result = {"output": _np.array(pred_probs)}
return result
def export_weights(self):
net_params = {}
with self.dc_graph.as_default():
layer_names = _tf.trainable_variables()
layer_weights = self.sess.run(layer_names)
for var, val in zip(layer_names, layer_weights):
if "bias" in var.name:
net_params.update({var.name.replace(":0", ""): val})
else:
if "dense" in var.name:
if "drawing_dense0_weight" in var.name:
"""
To make output of TF pool3 (NHWC) compatible with CoreML (NCHW).
Decompose FC weights to NHWC. Transpose to NCHW. Reshape back to FC.
"""
tf_576_128 = val
tf_576_128 = _np.reshape(tf_576_128, (3, 3, 64, 128))
tf_576_128 = _np.transpose(tf_576_128, (2, 0, 1, 3))
tf_576_128 = _np.reshape(tf_576_128, (576, 128))
net_params.update(
{
var.name.replace(":0", ""): _np.transpose(
tf_576_128, (1, 0)
)
}
)
else:
net_params.update(
{var.name.replace(":0", ""): val.transpose(1, 0)}
)
else:
# but in turicreate we will force it.
net_params.update(
{
var.name.replace(
":0", ""
): _utils.convert_conv2d_tf_to_coreml(val)
}
)
return net_params
| true | true |
1c2f0489ad7d51f38cba2c7890b7280416dbc116 | 1,226 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractArsbltranslationsWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractArsbltranslationsWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractArsbltranslationsWordpressCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractArsbltranslationsWordpressCom(item):
'''
Parser for 'arsbltranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Picked up a Strange Knight', 'Picked up a Strange Knight', 'translated'),
('Aloof King and Cold (Acting) Queen', 'Aloof King and Cold (Acting) Queen', 'translated'),
('Moonlight on the Snowfield', 'Moonlight on the Snowfield', 'translated'),
('Brought My Wife Back from Another World', 'Brought My Wife Back from Another World', 'translated'),
('Your Kingdom', 'Your Kingdom', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 49.04 | 130 | 0.533442 | def extractArsbltranslationsWordpressCom(item):
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Picked up a Strange Knight', 'Picked up a Strange Knight', 'translated'),
('Aloof King and Cold (Acting) Queen', 'Aloof King and Cold (Acting) Queen', 'translated'),
('Moonlight on the Snowfield', 'Moonlight on the Snowfield', 'translated'),
('Brought My Wife Back from Another World', 'Brought My Wife Back from Another World', 'translated'),
('Your Kingdom', 'Your Kingdom', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | true | true |
1c2f04d4a3524b401bdca8931931a853e81bf11f | 201 | py | Python | app/common/config.py | hust-sh/hookhub | b6a26f167d4294200e1cce0886a8c89976b687ec | [
"MIT"
] | 1 | 2017-11-30T13:16:43.000Z | 2017-11-30T13:16:43.000Z | app/common/config.py | hust-sh/hookhub | b6a26f167d4294200e1cce0886a8c89976b687ec | [
"MIT"
] | 1 | 2017-12-02T10:10:24.000Z | 2017-12-02T10:59:13.000Z | app/common/config.py | hust-sh/hookhub | b6a26f167d4294200e1cce0886a8c89976b687ec | [
"MIT"
] | null | null | null | # coding: utf-8
REDIS_URL = 'redis://redis:6379/0'
REDIS_CONF = {
'url': REDIS_URL,
'socket_timeout': 0.5,
}
WEBHOOK_TYPES = ['jira', 'jenkins']
LOG_DIR = '/var/log'
HOST = '120.78.197.57'
| 14.357143 | 35 | 0.61194 |
REDIS_URL = 'redis://redis:6379/0'
REDIS_CONF = {
'url': REDIS_URL,
'socket_timeout': 0.5,
}
WEBHOOK_TYPES = ['jira', 'jenkins']
LOG_DIR = '/var/log'
HOST = '120.78.197.57'
| true | true |
1c2f050463c4c33d9a85f97da89b550b5898c0e3 | 8,755 | py | Python | vta/config/vta_config.py | robo-corg/incubator-tvm | 4ddfdb4b15d05a5bf85a984837967d004efee5dd | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | vta/config/vta_config.py | robo-corg/incubator-tvm | 4ddfdb4b15d05a5bf85a984837967d004efee5dd | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | vta/config/vta_config.py | robo-corg/incubator-tvm | 4ddfdb4b15d05a5bf85a984837967d004efee5dd | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA config tool"""
import os
import sys
import json
import argparse
def get_pkg_config(cfg):
"""Get the pkg config object."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
pkg_config_py = os.path.join(proj_root, "vta/python/vta/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
return PkgConfig(cfg, proj_root)
def main():
"""Main funciton"""
parser = argparse.ArgumentParser()
parser.add_argument("--use-cfg", type=str, default="",
help="path to the config json")
parser.add_argument("--cflags", action="store_true",
help="print the cflags")
parser.add_argument("--defs", action="store_true",
help="print the macro defs")
parser.add_argument("--sources", action="store_true",
help="print the source file paths")
parser.add_argument("--update", action="store_true",
help="Print out the json option.")
parser.add_argument("--ldflags", action="store_true",
help="print the ldflags")
parser.add_argument("--cfg-json", action="store_true",
help="print all the config json")
parser.add_argument("--save-cfg-json", type=str, default="",
help="save config json to file")
parser.add_argument("--target", action="store_true",
help="print the target")
parser.add_argument("--cfg-str", action="store_true",
help="print the configuration string")
parser.add_argument("--get-inp-mem-banks", action="store_true",
help="returns number of input memory banks")
parser.add_argument("--get-inp-mem-width", action="store_true",
help="returns input memory read/write port width")
parser.add_argument("--get-inp-mem-depth", action="store_true",
help="returns input memory depth")
parser.add_argument("--get-inp-mem-axi-ratio", action="store_true",
help="returns ratio between input element width and axi width")
parser.add_argument("--get-wgt-mem-banks", action="store_true",
help="returns number of weight memory banks")
parser.add_argument("--get-wgt-mem-width", action="store_true",
help="returns weight memory read/write port width")
parser.add_argument("--get-wgt-mem-depth", action="store_true",
help="returns weight memory depth")
parser.add_argument("--get-wgt-mem-axi-ratio", action="store_true",
help="returns ratio between weight element width and axi width")
parser.add_argument("--get-out-mem-banks", action="store_true",
help="returns number of output memory banks")
parser.add_argument("--get-out-mem-width", action="store_true",
help="returns output memory read/write port width")
parser.add_argument("--get-out-mem-depth", action="store_true",
help="returns output memory depth")
parser.add_argument("--get-out-mem-axi-ratio", action="store_true",
help="returns ratio between output element width and axi width")
parser.add_argument("--get-axi-cache-bits", action="store_true",
help="returns AXI system ARCACHE/AWCACHE hardcoded bit value")
parser.add_argument("--get-axi-prot-bits", action="store_true",
help="returns AXI system ARPROT/AWPROT hardcoded bit value")
parser.add_argument("--get-ip-reg-map-range", action="store_true",
help="returns ip register map address range")
parser.add_argument("--get-fetch-base-addr", action="store_true",
help="returns fetch module base address")
parser.add_argument("--get-load-base-addr", action="store_true",
help="returns load module base address")
parser.add_argument("--get-compute-base-addr", action="store_true",
help="returns compute module base address")
parser.add_argument("--get-store-base-addr", action="store_true",
help="returns store module base address")
parser.add_argument("--get-fpga-dev", action="store_true",
help="returns FPGA device target")
parser.add_argument("--get-fpga-family", action="store_true",
help="returns FPGA device family")
parser.add_argument("--get-fpga-freq", action="store_true",
help="returns FPGA frequency")
parser.add_argument("--get-fpga-per", action="store_true",
help="returns HLS target clock period")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
return
curr_path = os.path.dirname(
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
path_list = [
os.path.join(proj_root, "vta/config/vta_config.json")
]
if args.use_cfg:
path_list = [args.use_cfg]
ok_path_list = [p for p in path_list if os.path.exists(p)]
if not ok_path_list:
raise RuntimeError("Cannot find config in %s" % str(path_list))
cfg = json.load(open(ok_path_list[0]))
pkg = get_pkg_config(cfg)
if args.target:
print(pkg.TARGET)
if args.defs:
print(" ".join(pkg.macro_defs))
if args.sources:
print(" ".join(pkg.lib_source))
if args.cflags:
cflags_str = " ".join(pkg.cflags)
if pkg.TARGET == "pynq":
cflags_str += " -DVTA_TARGET_PYNQ"
elif pkg.TARGET == "de10nano":
cflags_str += " -DVTA_TARGET_DE10_NANO"
elif pkg.TARGET == "ultra96":
cflags_str += " -DVTA_TARGET_ULTRA96"
print(cflags_str)
if args.ldflags:
print(" ".join(pkg.ldflags))
if args.cfg_json:
print(pkg.cfg_json)
if args.save_cfg_json:
with open(args.save_cfg_json, "w") as fo:
fo.write(pkg.cfg_json)
if args.cfg_str:
print(pkg.TARGET + "_" + pkg.bitstream)
if args.get_inp_mem_banks:
print(pkg.inp_mem_banks)
if args.get_inp_mem_width:
print(pkg.inp_mem_width)
if args.get_inp_mem_depth:
print(pkg.inp_mem_depth)
if args.get_inp_mem_axi_ratio:
print(pkg.inp_mem_axi_ratio)
if args.get_wgt_mem_banks:
print(pkg.wgt_mem_banks)
if args.get_wgt_mem_width:
print(pkg.wgt_mem_width)
if args.get_wgt_mem_depth:
print(pkg.wgt_mem_depth)
if args.get_wgt_mem_axi_ratio:
print(pkg.wgt_mem_axi_ratio)
if args.get_out_mem_banks:
print(pkg.out_mem_banks)
if args.get_out_mem_width:
print(pkg.out_mem_width)
if args.get_out_mem_depth:
print(pkg.out_mem_depth)
if args.get_out_mem_axi_ratio:
print(pkg.out_mem_axi_ratio)
if args.get_axi_cache_bits:
print(pkg.axi_cache_bits)
if args.get_axi_prot_bits:
print(pkg.axi_prot_bits)
if args.get_ip_reg_map_range:
print(pkg.ip_reg_map_range)
if args.get_fetch_base_addr:
print(pkg.fetch_base_addr)
if args.get_load_base_addr:
print(pkg.load_base_addr)
if args.get_compute_base_addr:
print(pkg.compute_base_addr)
if args.get_store_base_addr:
print(pkg.store_base_addr)
if args.get_fpga_dev:
print(pkg.fpga_device)
if args.get_fpga_family:
print(pkg.fpga_family)
if args.get_fpga_freq:
print(pkg.fpga_freq)
if args.get_fpga_per:
print(pkg.fpga_per)
if __name__ == "__main__":
main()
| 38.738938 | 90 | 0.632895 |
import os
import sys
import json
import argparse
def get_pkg_config(cfg):
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
pkg_config_py = os.path.join(proj_root, "vta/python/vta/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
return PkgConfig(cfg, proj_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--use-cfg", type=str, default="",
help="path to the config json")
parser.add_argument("--cflags", action="store_true",
help="print the cflags")
parser.add_argument("--defs", action="store_true",
help="print the macro defs")
parser.add_argument("--sources", action="store_true",
help="print the source file paths")
parser.add_argument("--update", action="store_true",
help="Print out the json option.")
parser.add_argument("--ldflags", action="store_true",
help="print the ldflags")
parser.add_argument("--cfg-json", action="store_true",
help="print all the config json")
parser.add_argument("--save-cfg-json", type=str, default="",
help="save config json to file")
parser.add_argument("--target", action="store_true",
help="print the target")
parser.add_argument("--cfg-str", action="store_true",
help="print the configuration string")
parser.add_argument("--get-inp-mem-banks", action="store_true",
help="returns number of input memory banks")
parser.add_argument("--get-inp-mem-width", action="store_true",
help="returns input memory read/write port width")
parser.add_argument("--get-inp-mem-depth", action="store_true",
help="returns input memory depth")
parser.add_argument("--get-inp-mem-axi-ratio", action="store_true",
help="returns ratio between input element width and axi width")
parser.add_argument("--get-wgt-mem-banks", action="store_true",
help="returns number of weight memory banks")
parser.add_argument("--get-wgt-mem-width", action="store_true",
help="returns weight memory read/write port width")
parser.add_argument("--get-wgt-mem-depth", action="store_true",
help="returns weight memory depth")
parser.add_argument("--get-wgt-mem-axi-ratio", action="store_true",
help="returns ratio between weight element width and axi width")
parser.add_argument("--get-out-mem-banks", action="store_true",
help="returns number of output memory banks")
parser.add_argument("--get-out-mem-width", action="store_true",
help="returns output memory read/write port width")
parser.add_argument("--get-out-mem-depth", action="store_true",
help="returns output memory depth")
parser.add_argument("--get-out-mem-axi-ratio", action="store_true",
help="returns ratio between output element width and axi width")
parser.add_argument("--get-axi-cache-bits", action="store_true",
help="returns AXI system ARCACHE/AWCACHE hardcoded bit value")
parser.add_argument("--get-axi-prot-bits", action="store_true",
help="returns AXI system ARPROT/AWPROT hardcoded bit value")
parser.add_argument("--get-ip-reg-map-range", action="store_true",
help="returns ip register map address range")
parser.add_argument("--get-fetch-base-addr", action="store_true",
help="returns fetch module base address")
parser.add_argument("--get-load-base-addr", action="store_true",
help="returns load module base address")
parser.add_argument("--get-compute-base-addr", action="store_true",
help="returns compute module base address")
parser.add_argument("--get-store-base-addr", action="store_true",
help="returns store module base address")
parser.add_argument("--get-fpga-dev", action="store_true",
help="returns FPGA device target")
parser.add_argument("--get-fpga-family", action="store_true",
help="returns FPGA device family")
parser.add_argument("--get-fpga-freq", action="store_true",
help="returns FPGA frequency")
parser.add_argument("--get-fpga-per", action="store_true",
help="returns HLS target clock period")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
return
curr_path = os.path.dirname(
os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../"))
path_list = [
os.path.join(proj_root, "vta/config/vta_config.json")
]
if args.use_cfg:
path_list = [args.use_cfg]
ok_path_list = [p for p in path_list if os.path.exists(p)]
if not ok_path_list:
raise RuntimeError("Cannot find config in %s" % str(path_list))
cfg = json.load(open(ok_path_list[0]))
pkg = get_pkg_config(cfg)
if args.target:
print(pkg.TARGET)
if args.defs:
print(" ".join(pkg.macro_defs))
if args.sources:
print(" ".join(pkg.lib_source))
if args.cflags:
cflags_str = " ".join(pkg.cflags)
if pkg.TARGET == "pynq":
cflags_str += " -DVTA_TARGET_PYNQ"
elif pkg.TARGET == "de10nano":
cflags_str += " -DVTA_TARGET_DE10_NANO"
elif pkg.TARGET == "ultra96":
cflags_str += " -DVTA_TARGET_ULTRA96"
print(cflags_str)
if args.ldflags:
print(" ".join(pkg.ldflags))
if args.cfg_json:
print(pkg.cfg_json)
if args.save_cfg_json:
with open(args.save_cfg_json, "w") as fo:
fo.write(pkg.cfg_json)
if args.cfg_str:
print(pkg.TARGET + "_" + pkg.bitstream)
if args.get_inp_mem_banks:
print(pkg.inp_mem_banks)
if args.get_inp_mem_width:
print(pkg.inp_mem_width)
if args.get_inp_mem_depth:
print(pkg.inp_mem_depth)
if args.get_inp_mem_axi_ratio:
print(pkg.inp_mem_axi_ratio)
if args.get_wgt_mem_banks:
print(pkg.wgt_mem_banks)
if args.get_wgt_mem_width:
print(pkg.wgt_mem_width)
if args.get_wgt_mem_depth:
print(pkg.wgt_mem_depth)
if args.get_wgt_mem_axi_ratio:
print(pkg.wgt_mem_axi_ratio)
if args.get_out_mem_banks:
print(pkg.out_mem_banks)
if args.get_out_mem_width:
print(pkg.out_mem_width)
if args.get_out_mem_depth:
print(pkg.out_mem_depth)
if args.get_out_mem_axi_ratio:
print(pkg.out_mem_axi_ratio)
if args.get_axi_cache_bits:
print(pkg.axi_cache_bits)
if args.get_axi_prot_bits:
print(pkg.axi_prot_bits)
if args.get_ip_reg_map_range:
print(pkg.ip_reg_map_range)
if args.get_fetch_base_addr:
print(pkg.fetch_base_addr)
if args.get_load_base_addr:
print(pkg.load_base_addr)
if args.get_compute_base_addr:
print(pkg.compute_base_addr)
if args.get_store_base_addr:
print(pkg.store_base_addr)
if args.get_fpga_dev:
print(pkg.fpga_device)
if args.get_fpga_family:
print(pkg.fpga_family)
if args.get_fpga_freq:
print(pkg.fpga_freq)
if args.get_fpga_per:
print(pkg.fpga_per)
if __name__ == "__main__":
main()
| true | true |
1c2f07d075b36f8c68d7d495bde8a0466b55974d | 1,730 | py | Python | import_broadbands.py | alphagov/land-availability-import | 58fd2c698eda18702ae680da3d3b9f3fea2865d1 | [
"MIT"
] | null | null | null | import_broadbands.py | alphagov/land-availability-import | 58fd2c698eda18702ae680da3d3b9f3fea2865d1 | [
"MIT"
] | 9 | 2017-02-20T15:14:42.000Z | 2017-07-10T10:35:45.000Z | import_broadbands.py | alphagov/land-availability-import | 58fd2c698eda18702ae680da3d3b9f3fea2865d1 | [
"MIT"
] | 2 | 2019-08-29T11:51:53.000Z | 2021-04-10T19:55:55.000Z | from importers import CSVImportCommand
import requests
import click
class BroadbandImportCommand(CSVImportCommand):
def clean_column(self, column):
clean = column.replace('<', '').replace('N/A', '')
if clean == '':
return '0'
else:
return clean
def process_row(self, row):
data = {
"postcode": row[0],
"speed_30_mb_percentage": float(self.clean_column(row[2])),
"avg_download_speed": float(self.clean_column(row[7])),
"min_download_speed": float(self.clean_column(row[9])),
"max_download_speed": float(self.clean_column(row[10])),
"avg_upload_speed": float(self.clean_column(row[15])),
"min_upload_speed": float(self.clean_column(row[17])),
"max_upload_speed": float(self.clean_column(row[18]))
}
headers = {'Authorization': 'Token {0}'.format(self.token)}
response = requests.post(
self.api_url,
json=data,
headers=headers)
if response.status_code == 201:
print('{0} imported correctly'.format(row[0]))
else:
print(
'ERROR: could not import {0} because of {1}'.format(
row[0], response.text))
@click.command()
@click.argument('filenames', nargs=-1, type=click.Path())
@click.option(
'--apiurl',
default='http://localhost:8000/api/broadbands/', help='API url')
@click.option('--apitoken', help='API authentication token')
def import_broadbands(filenames, apiurl, apitoken):
command = BroadbandImportCommand(filenames, apiurl, apitoken, True)
command.run()
if __name__ == '__main__':
import_broadbands()
| 31.454545 | 71 | 0.602312 | from importers import CSVImportCommand
import requests
import click
class BroadbandImportCommand(CSVImportCommand):
def clean_column(self, column):
clean = column.replace('<', '').replace('N/A', '')
if clean == '':
return '0'
else:
return clean
def process_row(self, row):
data = {
"postcode": row[0],
"speed_30_mb_percentage": float(self.clean_column(row[2])),
"avg_download_speed": float(self.clean_column(row[7])),
"min_download_speed": float(self.clean_column(row[9])),
"max_download_speed": float(self.clean_column(row[10])),
"avg_upload_speed": float(self.clean_column(row[15])),
"min_upload_speed": float(self.clean_column(row[17])),
"max_upload_speed": float(self.clean_column(row[18]))
}
headers = {'Authorization': 'Token {0}'.format(self.token)}
response = requests.post(
self.api_url,
json=data,
headers=headers)
if response.status_code == 201:
print('{0} imported correctly'.format(row[0]))
else:
print(
'ERROR: could not import {0} because of {1}'.format(
row[0], response.text))
@click.command()
@click.argument('filenames', nargs=-1, type=click.Path())
@click.option(
'--apiurl',
default='http://localhost:8000/api/broadbands/', help='API url')
@click.option('--apitoken', help='API authentication token')
def import_broadbands(filenames, apiurl, apitoken):
command = BroadbandImportCommand(filenames, apiurl, apitoken, True)
command.run()
if __name__ == '__main__':
import_broadbands()
| true | true |
1c2f07fc053ad9c2110a8a00d9b348350b3331c4 | 1,168 | py | Python | packages/athena/setup.py | sebastianvillarroel/soda-sql | d672d94945ad5200cb47e05fe1b04706c2e84cc5 | [
"Apache-2.0"
] | null | null | null | packages/athena/setup.py | sebastianvillarroel/soda-sql | d672d94945ad5200cb47e05fe1b04706c2e84cc5 | [
"Apache-2.0"
] | null | null | null | packages/athena/setup.py | sebastianvillarroel/soda-sql | d672d94945ad5200cb47e05fe1b04706c2e84cc5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
from setuptools import setup, find_namespace_packages
if sys.version_info < (3, 7):
print('Error: Soda SQL requires at least Python 3.7')
print('Error: Please upgrade your Python version to 3.7 or later')
sys.exit(1)
package_name = "soda-sql-athena"
package_version = '2.1.1'
# TODO Add proper description
description = "Soda SQL Amazon Athena"
requires = [
f'soda-sql-core=={package_version}',
'PyAthena>=2.2.0, <3.0'
]
# TODO Fix the params
# TODO Add a warning that installing core doesn't give any warehouse functionality
setup(
name=package_name,
version=package_version,
install_requires=requires,
packages=find_namespace_packages(include=["sodasql*"]),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
| 31.567568 | 82 | 0.666096 |
import sys
from setuptools import setup, find_namespace_packages
if sys.version_info < (3, 7):
print('Error: Soda SQL requires at least Python 3.7')
print('Error: Please upgrade your Python version to 3.7 or later')
sys.exit(1)
package_name = "soda-sql-athena"
package_version = '2.1.1'
description = "Soda SQL Amazon Athena"
requires = [
f'soda-sql-core=={package_version}',
'PyAthena>=2.2.0, <3.0'
]
setup(
name=package_name,
version=package_version,
install_requires=requires,
packages=find_namespace_packages(include=["sodasql*"]),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
| true | true |
1c2f0938a84af2f6d37e5903711c69fdc485b9d9 | 2,492 | py | Python | scripts/env/set-eth.py | DryptoBZX/contractsV2 | 3ee0b7669902ff6b9422440289ddc52f679e636b | [
"Apache-2.0"
] | null | null | null | scripts/env/set-eth.py | DryptoBZX/contractsV2 | 3ee0b7669902ff6b9422440289ddc52f679e636b | [
"Apache-2.0"
] | null | null | null | scripts/env/set-eth.py | DryptoBZX/contractsV2 | 3ee0b7669902ff6b9422440289ddc52f679e636b | [
"Apache-2.0"
] | null | null | null | from brownie import *
BZX = Contract.from_abi("BZX", "0xD8Ee69652E4e4838f2531732a46d1f7F584F0b7f", interface.IBZx.abi)
TOKEN_REGISTRY = Contract.from_abi("TOKEN_REGISTRY", "0xf0E474592B455579Fe580D610b846BdBb529C6F7", TokenRegistry.abi)
list = TOKEN_REGISTRY.getTokens(0, 50)
for l in list:
iTokenTemp = Contract.from_abi("iTokenTemp", l[0], LoanTokenLogicStandard.abi)
globals()[iTokenTemp.symbol()] = iTokenTemp
underlyingTemp = Contract.from_abi("underlyingTemp", l[1], TestToken.abi)
if (l[1] == "0x9f8F72aA9304c8B593d555F12eF6589cC3A579A2"):
globals()["MKR"] = underlyingTemp # MRK has some fun symbol()
else:
globals()[underlyingTemp.symbol()] = underlyingTemp
CHI = Contract.from_abi("CHI", "0x0000000000004946c0e9F43F4Dee607b0eF1fA1c", TestToken.abi)
STAKING = Contract.from_abi("STAKING", "0xe95Ebce2B02Ee07dEF5Ed6B53289801F7Fc137A4", StakingV1_1.abi)
vBZRX = Contract.from_abi("vBZRX", "0xB72B31907C1C95F3650b64b2469e08EdACeE5e8F", BZRXVestingToken.abi)
POOL3 = Contract.from_abi("CURVE3CRV", "0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490", TestToken.abi)
BPT = Contract.from_abi("BPT", "0xe26A220a341EAca116bDa64cF9D5638A935ae629", TestToken.abi)
SLP = Contract.from_abi("SLP", "0xa30911e072A0C88D55B5D0A0984B66b0D04569d0", TestToken.abi)
HELPER = Contract.from_abi("HELPER", "0x3B55369bfeA51822eb3E85868c299E8127E13c56", HelperImpl.abi)
PRICE_FEED = Contract.from_abi("PRICE_FEED", BZX.priceFeeds(), abi = PriceFeeds.abi)
STAKING = Contract.from_abi("STAKING", "0xe95Ebce2B02Ee07dEF5Ed6B53289801F7Fc137A4", StakingV1_1.abi)
SUSHI_ROUTER = Contract.from_abi("router", "0xd9e1cE17f2641f24aE83637ab66a2cca9C378B9F", interface.IPancakeRouter02.abi)
SUSHI = Contract.from_abi("SUSHI", "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2", TestToken.abi)
FEE_EXTRACTOR = Contract.from_abi("FEE_EXTRACTOR", BZX.feesController(), FeeExtractAndDistribute_ETH.abi)
DAO = Contract.from_abi("governorBravoDelegator", address="0x9da41f7810c2548572f4Fa414D06eD9772cA9e6E", abi=GovernorBravoDelegate.abi)
TIMELOCK = Contract.from_abi("TIMELOCK", address="0xfedC4dD5247B93feb41e899A09C44cFaBec29Cbc", abi=Timelock.abi)
CRV = Contract.from_abi("CRV", "0xD533a949740bb3306d119CC777fa900bA034cd52", TestToken.abi)
CRV_MINER = Contract.from_abi("ICurveMinter", "0xd061D61a4d941c39E5453435B6345Dc261C2fcE0", interface.ICurveMinter.abi)
POOL3Gauge = Contract.from_abi("3POOLGauge", "0xbFcF63294aD7105dEa65aA58F8AE5BE2D9d0952A", interface.ICurve3PoolGauge.abi)
| 55.377778 | 134 | 0.808989 | from brownie import *
BZX = Contract.from_abi("BZX", "0xD8Ee69652E4e4838f2531732a46d1f7F584F0b7f", interface.IBZx.abi)
TOKEN_REGISTRY = Contract.from_abi("TOKEN_REGISTRY", "0xf0E474592B455579Fe580D610b846BdBb529C6F7", TokenRegistry.abi)
list = TOKEN_REGISTRY.getTokens(0, 50)
for l in list:
iTokenTemp = Contract.from_abi("iTokenTemp", l[0], LoanTokenLogicStandard.abi)
globals()[iTokenTemp.symbol()] = iTokenTemp
underlyingTemp = Contract.from_abi("underlyingTemp", l[1], TestToken.abi)
if (l[1] == "0x9f8F72aA9304c8B593d555F12eF6589cC3A579A2"):
globals()["MKR"] = underlyingTemp
else:
globals()[underlyingTemp.symbol()] = underlyingTemp
CHI = Contract.from_abi("CHI", "0x0000000000004946c0e9F43F4Dee607b0eF1fA1c", TestToken.abi)
STAKING = Contract.from_abi("STAKING", "0xe95Ebce2B02Ee07dEF5Ed6B53289801F7Fc137A4", StakingV1_1.abi)
vBZRX = Contract.from_abi("vBZRX", "0xB72B31907C1C95F3650b64b2469e08EdACeE5e8F", BZRXVestingToken.abi)
POOL3 = Contract.from_abi("CURVE3CRV", "0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490", TestToken.abi)
BPT = Contract.from_abi("BPT", "0xe26A220a341EAca116bDa64cF9D5638A935ae629", TestToken.abi)
SLP = Contract.from_abi("SLP", "0xa30911e072A0C88D55B5D0A0984B66b0D04569d0", TestToken.abi)
HELPER = Contract.from_abi("HELPER", "0x3B55369bfeA51822eb3E85868c299E8127E13c56", HelperImpl.abi)
PRICE_FEED = Contract.from_abi("PRICE_FEED", BZX.priceFeeds(), abi = PriceFeeds.abi)
STAKING = Contract.from_abi("STAKING", "0xe95Ebce2B02Ee07dEF5Ed6B53289801F7Fc137A4", StakingV1_1.abi)
SUSHI_ROUTER = Contract.from_abi("router", "0xd9e1cE17f2641f24aE83637ab66a2cca9C378B9F", interface.IPancakeRouter02.abi)
SUSHI = Contract.from_abi("SUSHI", "0x6b3595068778dd592e39a122f4f5a5cf09c90fe2", TestToken.abi)
FEE_EXTRACTOR = Contract.from_abi("FEE_EXTRACTOR", BZX.feesController(), FeeExtractAndDistribute_ETH.abi)
DAO = Contract.from_abi("governorBravoDelegator", address="0x9da41f7810c2548572f4Fa414D06eD9772cA9e6E", abi=GovernorBravoDelegate.abi)
TIMELOCK = Contract.from_abi("TIMELOCK", address="0xfedC4dD5247B93feb41e899A09C44cFaBec29Cbc", abi=Timelock.abi)
CRV = Contract.from_abi("CRV", "0xD533a949740bb3306d119CC777fa900bA034cd52", TestToken.abi)
CRV_MINER = Contract.from_abi("ICurveMinter", "0xd061D61a4d941c39E5453435B6345Dc261C2fcE0", interface.ICurveMinter.abi)
POOL3Gauge = Contract.from_abi("3POOLGauge", "0xbFcF63294aD7105dEa65aA58F8AE5BE2D9d0952A", interface.ICurve3PoolGauge.abi)
| true | true |
1c2f09974650f574525787d83f9085cdfdfec390 | 22,929 | py | Python | src/ensae_teaching_cs/helpers/colorsdef.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 73 | 2015-05-12T13:12:11.000Z | 2021-12-21T11:44:29.000Z | src/ensae_teaching_cs/helpers/colorsdef.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 90 | 2015-06-23T11:11:35.000Z | 2021-03-31T22:09:15.000Z | src/ensae_teaching_cs/helpers/colorsdef.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 65 | 2015-01-13T08:23:55.000Z | 2022-02-11T22:42:07.000Z | """
@file
@brief Definition of colors
"""
colors_definition = [
('Alice blue', '#f0f8ff'),
('Alizarin crimson', '#e32636'),
('Almond', '#efdecd'),
('Amaranth', '#e52b50'),
('Amber', '#ffbf00'),
('American rose', '#ff033e'),
('Amethyst', '#9966cc'),
('Green', '#a4c639'),
('Antique brass', '#cd9575'),
('Antique fuchsia', '#915c83'),
('Antique white', '#faebd7'),
('Ao', '#008000'),
('Apple green', '#8db600'),
('Apricot', '#fbceb1'),
('Aqua', '#00ffff'),
('Aquamarine', '#7fffd4'),
('Army green', '#4b5320'),
('Arylide yellow', '#e9d66b'),
('Ash grey', '#b2beb5'),
('Asparagus', '#87a96b'),
('Atomic tangerine', '#ff9966'),
('Auburn', '#a52a2a'),
('Aureolin', '#fdee00'),
('Saurus', '#6e7f80'),
('Awesome', '#ff2052'),
('Azure', '#007fff'),
('Baby blue', '#89cff0'),
('Baby blue eyes', '#a1caf1'),
('Baby pink', '#f4c2c2'),
('Blue', '#21abcd'),
('Mania', '#fae7b5'),
('Banana yellow', '#ffe135'),
('Battleship grey', '#848482'),
('Bazaar', '#98777b'),
('Beau blue', '#bcd4e6'),
('Beaver', '#9f8170'),
('Beige', '#f5f5dc'),
('Bisque', '#ffe4c4'),
('Bistre', '#3d2b1f'),
('Bittersweet', '#fe6f5e'),
('Black', '#000000'),
('Almond', '#ffebcd'),
('France', '#318ce7'),
('Blue', '#ace5ee'),
('Blond', '#faf0be'),
('Blue', '#0000ff'),
('Bell', '#a2a2d0'),
('Gray', '#6699cc'),
('Blue green', '#0d98ba'),
('Blue purple', '#8a2be2'),
('Blue violet', '#8a2be2'),
('Blush', '#de5d83'),
('Bole', '#79443b'),
('Bondi blue', '#0095b6'),
('Bone', '#e3dac9'),
('Red', '#cc0000'),
('Bottle green', '#006a4e'),
('Boysenberry', '#873260'),
('Brandeis blue', '#0070ff'),
('Brass', '#b5a642'),
('Brick red', '#cb4154'),
('Bright cerulean', '#1dacd6'),
('Bright green', '#66ff00'),
('Bright lavender', '#bf94e4'),
('Bright maroon', '#c32148'),
('Bright pink', '#ff007f'),
('Bright turquoise', '#08e8de'),
('Bright ube', '#d19fe8'),
('Brilliant lavender', '#f4bbff'),
('Brilliant rose', '#ff55a3'),
('Brink pink', '#fb607f'),
('British racing green', '#004225'),
('Bronze', '#cd7f32'),
('Brown', '#a52a2a'),
('Bubble gum', '#ffc1cc'),
('Bubbles', '#e7feff'),
('Buff', '#f0dc82'),
('Bulgarian rose', '#480607'),
('Burgundy', '#800020'),
('Burlywood', '#deb887'),
('Burnt orange', '#cc5500'),
('Burnt sienna', '#e97451'),
('Burnt umber', '#8a3324'),
('Byzantine', '#bd33a4'),
('Byzantium', '#702963'),
('Blue', '#007aa5'),
('Red', '#e03c31'),
('Cadet', '#536872'),
('Cadet blue', '#5f9ea0'),
('Cadet grey', '#91a3b0'),
('Cadmium green', '#006b3c'),
('Cadmium orange', '#ed872d'),
('Cadmium red', '#e30022'),
('Cadmium yellow', '#fff600'),
('Pomona green', '#1e4d2b'),
('Blue', '#a3c1ad'),
('Camel', '#c19a6b'),
('Camouflage green', '#78866b'),
('Canary', '#ffff99'),
('Canary yellow', '#ffef00'),
('Candy apple red', '#ff0800'),
('Candy pink', '#e4717a'),
('Capri', '#00bfff'),
('Caput mortuum', '#592720'),
('Cardinal', '#c41e3a'),
('Caribbean green', '#00cc99'),
('Carmine', '#ff0040'),
('Carmine pink', '#eb4c42'),
('Carmine red', '#ff0038'),
('Carnation pink', '#ffa6c9'),
('Carnelian', '#b31b1b'),
('Carolina blue', '#99badd'),
('Carrot orange', '#ed9121'),
('Celadon', '#ace1af'),
('Celeste', '#b2ffff'),
('Celestial blue', '#4997d0'),
('Cerise', '#de3163'),
('Cerise pink', '#ec3b83'),
('Cerulean', '#007ba7'),
('Cerulean blue', '#2a52be'),
('Chamoisee', '#a0785a'),
('Champagne', '#fad6a5'),
('Charcoal', '#36454f'),
('Chartreuse', '#7fff00'),
('Cherry', '#de3163'),
('Cherry blossom pink', '#ffb7c5'),
('Chestnut', '#cd5c5c'),
('Chocolate', '#d2691e'),
('Chrome yellow', '#ffa700'),
('Cinereous', '#98817b'),
('Cinnabar', '#e34234'),
('Cinnamon', '#d2691e'),
('Citrine', '#e4d00a'),
('Classic rose', '#fbcce7'),
('Cobalt', '#0047ab'),
('Cocoa brown', '#d2691e'),
('Coffee', '#6f4e37'),
('Columbia blue', '#9bddff'),
('Cool black', '#002e63'),
('Cool grey', '#8c92ac'),
('Copper', '#b87333'),
('Copper rose', '#996666'),
('Coquelicot', '#ff3800'),
('Coral', '#ff7f50'),
('Coral pink', '#f88379'),
('Coral red', '#ff4040'),
('Cordovan', '#893f45'),
('Corn', '#fbec5d'),
('Red', '#b31b1b'),
('Cornflower', '#9aceeb'),
('Cornflower blue', '#6495ed'),
('Cornsilk', '#fff8dc'),
('Cosmic latte', '#fff8e7'),
('Cotton candy', '#ffbcd9'),
('Cream', '#fffdd0'),
('Crimson', '#dc143c'),
('Red', '#990000'),
('Crimson glory', '#be0032'),
('Cyan', '#00ffff'),
('Daffodil', '#ffff31'),
('Dandelion', '#f0e130'),
('Dark blue', '#00008b'),
('Dark brown', '#654321'),
('Dark byzantium', '#5d3954'),
('Dark candy apple red', '#a40000'),
('Dark cerulean', '#08457e'),
('Dark chestnut', '#986960'),
('Dark coral', '#cd5b45'),
('Dark cyan', '#008b8b'),
('Dark electric blue', '#536878'),
('Dark goldenrod', '#b8860b'),
('Dark gray', '#a9a9a9'),
('Dark green', '#013220'),
('Dark jungle green', '#1a2421'),
('Dark khaki', '#bdb76b'),
('Dark lava', '#483c32'),
('Dark lavender', '#734f96'),
('Dark magenta', '#8b008b'),
('Dark midnight blue', '#003366'),
('Dark olive green', '#556b2f'),
('Dark orange', '#ff8c00'),
('Dark orchid', '#9932cc'),
('Dark pastel blue', '#779ecb'),
('Dark pastel green', '#03c03c'),
('Dark pastel purple', '#966fd6'),
('Dark pastel red', '#c23b22'),
('Dark pink', '#e75480'),
('Dark powder blue', '#003399'),
('Dark raspberry', '#872657'),
('Dark red', '#8b0000'),
('Dark salmon', '#e9967a'),
('Dark scarlet', '#560319'),
('Dark sea green', '#8fbc8f'),
('Dark sienna', '#3c1414'),
('Dark slate blue', '#483d8b'),
('Dark slate gray', '#2f4f4f'),
('Dark spring green', '#177245'),
('Dark tan', '#918151'),
('Dark tangerine', '#ffa812'),
('Dark taupe', '#483c32'),
('Dark terra cotta', '#cc4e5c'),
('Dark turquoise', '#00ced1'),
('Dark violet', '#9400d3'),
('Dartmouth green', '#00693e'),
('Davy grey', '#555555'),
('Debian red', '#d70a53'),
('Deep carmine', '#a9203e'),
('Deep carmine pink', '#ef3038'),
('Deep carrot orange', '#e9692c'),
('Deep cerise', '#da3287'),
('Deep champagne', '#fad6a5'),
('Deep chestnut', '#b94e48'),
('Deep coffee', '#704241'),
('Deep fuchsia', '#c154c1'),
('Deep jungle green', '#004b49'),
('Deep lilac', '#9955bb'),
('Deep magenta', '#cc00cc'),
('Deep peach', '#ffcba4'),
('Deep pink', '#ff1493'),
('Deep saffron', '#ff9933'),
('Deep sky blue', '#00bfff'),
('Denim', '#1560bd'),
('Desert', '#c19a6b'),
('Desert sand', '#edc9af'),
('Dim gray', '#696969'),
('Dodger blue', '#1e90ff'),
('Dogwood rose', '#d71868'),
('Dollar bill', '#85bb65'),
('Drab', '#967117'),
('Duke blue', '#00009c'),
('Earth yellow', '#e1a95f'),
('Ecru', '#c2b280'),
('Eggplant', '#614051'),
('Eggshell', '#f0ead6'),
('Egyptian blue', '#1034a6'),
('Electric blue', '#7df9ff'),
('Electric crimson', '#ff003f'),
('Electric cyan', '#00ffff'),
('Electric green', '#00ff00'),
('Electric indigo', '#6f00ff'),
('Electric lavender', '#f4bbff'),
('Electric lime', '#ccff00'),
('Electric purple', '#bf00ff'),
('Electric ultramarine', '#3f00ff'),
('Electric violet', '#8f00ff'),
('Electric yellow', '#ffff00'),
('Emerald', '#50c878'),
('Eton blue', '#96c8a2'),
('Fallow', '#c19a6b'),
('Falu red', '#801818'),
('Famous', '#ff00ff'),
('Fandango', '#b53389'),
('Fashion fuchsia', '#f400a1'),
('Fawn', '#e5aa70'),
('Feldgrau', '#4d5d53'),
('Fern', '#71bc78'),
('Fern green', '#4f7942'),
('Red', '#ff2800'),
('Field drab', '#6c541e'),
('Fire engine red', '#ce2029'),
('Firebrick', '#b22222'),
('Flame', '#e25822'),
('Flamingo pink', '#fc8eac'),
('Flavescent', '#f7e98e'),
('Flax', '#eedc82'),
('Floral white', '#fffaf0'),
('Fluorescent orange', '#ffbf00'),
('Fluorescent pink', '#ff1493'),
('Fluorescent yellow', '#ccff00'),
('Folly', '#ff004f'),
('Forest green', '#228b22'),
('French beige', '#a67b5b'),
('French blue', '#0072bb'),
('French lilac', '#86608e'),
('French rose', '#f64a8a'),
('Fuchsia', '#ff00ff'),
('Fuchsia pink', '#ff77ff'),
('Fulvous', '#e48400'),
('Wuzzy', '#cc6666'),
('Gainsboro', '#dcdcdc'),
('Gamboge', '#e49b0f'),
('Ghost white', '#f8f8ff'),
('Ginger', '#b06500'),
('Glaucous', '#6082b6'),
('Glitter', '#e6e8fa'),
('Gold', '#ffd700'),
('Golden brown', '#996515'),
('Golden poppy', '#fcc200'),
('Golden yellow', '#ffdf00'),
('Goldenrod', '#daa520'),
('Apple', '#a8e4a0'),
('Gray', '#808080'),
('Gray asparagus', '#465945'),
('Green', '#00ff00'),
('Blue', '#1164b4'),
('Green yellow', '#adff2f'),
('Grullo', '#a99a86'),
('Guppie green', '#00ff7f'),
('Han blue', '#446ccf'),
('Han purple', '#5218fa'),
('Hansa yellow', '#e9d66b'),
('Harlequin', '#3fff00'),
('Harvard crimson', '#c90016'),
('Gold', '#da9100'),
('Gold', '#808000'),
('Heliotrope', '#df73ff'),
('Hollywood cerise', '#f400a1'),
('Honeydew', '#f0fff0'),
('Hooker green', '#49796b'),
('Hot magenta', '#ff1dce'),
('Hot pink', '#ff69b4'),
('Hunter green', '#355e3b'),
('Icterine', '#fcf75e'),
('Inchworm', '#b2ec5d'),
('India green', '#138808'),
('Indian red', '#cd5c5c'),
('Indian yellow', '#e3a857'),
('Indigo', '#4b0082'),
('Blue', '#002fa7'),
('International orange', '#ff4f00'),
('Iris', '#5a4fcf'),
('Isabelline', '#f4f0ec'),
('Islamic green', '#009000'),
('Ivory', '#fffff0'),
('Jade', '#00a86b'),
('Jasmine', '#f8de7e'),
('Jasper', '#d73b3e'),
('Jazzberry jam', '#a50b5e'),
('Jonquil', '#fada5e'),
('June bud', '#bdda57'),
('Jungle green', '#29ab87'),
('Crimson', '#e8000d'),
('Kelly green', '#4cbb17'),
('Khaki', '#c3b091'),
('Green', '#087830'),
('Languid lavender', '#d6cadd'),
('Lapis lazuli', '#26619c'),
('Lemon', '#fefe22'),
('Laurel green', '#a9ba9d'),
('Lava', '#cf1020'),
('Lavender', '#e6e6fa'),
('Lavender blue', '#ccccff'),
('Lavender blush', '#fff0f5'),
('Lavender gray', '#c4c3d0'),
('Lavender indigo', '#9457eb'),
('Lavender magenta', '#ee82ee'),
('Lavender mist', '#e6e6fa'),
('Lavender pink', '#fbaed2'),
('Lavender purple', '#967bb6'),
('Lavender rose', '#fba0e3'),
('Lawn green', '#7cfc00'),
('Lemon', '#fff700'),
('Yellow', '#fff44f'),
('Lemon chiffon', '#fffacd'),
('Lemon lime', '#bfff00'),
('Crimson', '#f56991'),
('Thulian pink', '#e68fac'),
('Light apricot', '#fdd5b1'),
('Light blue', '#add8e6'),
('Light brown', '#b5651d'),
('Light carmine pink', '#e66771'),
('Light coral', '#f08080'),
('Light cornflower blue', '#93ccea'),
('Light cyan', '#e0ffff'),
('Light fuchsia pink', '#f984ef'),
('Light goldenrod yellow', '#fafad2'),
('Light gray', '#d3d3d3'),
('Light green', '#90ee90'),
('Light khaki', '#f0e68c'),
('Light pastel purple', '#b19cd9'),
('Light pink', '#ffb6c1'),
('Light salmon', '#ffa07a'),
('Light salmon pink', '#ff9999'),
('Light sea green', '#20b2aa'),
('Light sky blue', '#87cefa'),
('Light slate gray', '#778899'),
('Light taupe', '#b38b6d'),
('Light yellow', '#ffffed'),
('Lilac', '#c8a2c8'),
('Lime', '#bfff00'),
('Lime green', '#32cd32'),
('Lincoln green', '#195905'),
('Linen', '#faf0e6'),
('Lion', '#c19a6b'),
('Liver', '#534b4f'),
('Lust', '#e62020'),
('Green', '#18453b'),
('Cheese', '#ffbd88'),
('Magenta', '#ff00ff'),
('Magic mint', '#aaf0d1'),
('Magnolia', '#f8f4ff'),
('Mahogany', '#c04000'),
('Maize', '#fbec5d'),
('Blue', '#6050dc'),
('Malachite', '#0bda51'),
('Manatee', '#979aaa'),
('Tango', '#ff8243'),
('Mantis', '#74c365'),
('Maroon', '#800000'),
('Mauve', '#e0b0ff'),
('Mauve taupe', '#915f6d'),
('Mauvelous', '#ef98aa'),
('Maya blue', '#73c2fb'),
('Meat brown', '#e5b73b'),
('Persian blue', '#0067a5'),
('Medium aquamarine', '#66ddaa'),
('Medium blue', '#0000cd'),
('Medium candy apple red', '#e2062c'),
('Medium carmine', '#af4035'),
('Medium champagne', '#f3e5ab'),
('Medium electric blue', '#035096'),
('Medium jungle green', '#1c352d'),
('Medium lavender magenta', '#dda0dd'),
('Medium orchid', '#ba55d3'),
('Medium purple', '#9370db'),
('Medium red violet', '#bb3385'),
('Medium sea green', '#3cb371'),
('Medium slate blue', '#7b68ee'),
('Medium spring bud', '#c9dc87'),
('Medium spring green', '#00fa9a'),
('Medium taupe', '#674c47'),
('Medium teal blue', '#0054b4'),
('Medium turquoise', '#48d1cc'),
('Medium violet red', '#c71585'),
('Melon', '#fdbcb4'),
('Midnight blue', '#191970'),
('Midnight green', '#004953'),
('Mikado yellow', '#ffc40c'),
('Mint', '#3eb489'),
('Mint cream', '#f5fffa'),
('Mint green', '#98ff98'),
('Misty rose', '#ffe4e1'),
('Moccasin', '#faebd7'),
('Mode beige', '#967117'),
('Moonstone blue', '#73a9c2'),
('Moss green', '#addfad'),
('Meadow', '#30ba8f'),
('Mountbatten pink', '#997a8d'),
('Mulberry', '#c54b8c'),
('Munsell', '#f2f3f4'),
('Mustard', '#ffdb58'),
('Myrtle', '#21421e'),
('Nadeshiko pink', '#f6adc6'),
('Napier green', '#2a8000'),
('Naples yellow', '#fada5e'),
('Navajo white', '#ffdead'),
('Navy blue', '#000080'),
('Carrot', '#ffa343'),
('Neon fuchsia', '#fe59c2'),
('Neon green', '#39ff14'),
('Green', '#059033'),
('Blue', '#0077be'),
('Ochre', '#cc7722'),
('Office green', '#008000'),
('Old gold', '#cfb53b'),
('Old lace', '#fdf5e6'),
('Old lavender', '#796878'),
('Old mauve', '#673147'),
('Old rose', '#c08081'),
('Olive', '#808000'),
('Drab', '#6b8e23'),
('Green', '#bab86c'),
('Olivine', '#9ab973'),
('Onyx', '#0f0f0f'),
('Opera mauve', '#b784a7'),
('Orange', '#ffa500'),
('Yellow', '#f8d568'),
('Orange peel', '#ff9f00'),
('Orange red', '#ff4500'),
('Orchid', '#da70d6'),
('Otter brown', '#654321'),
('Space', '#414a4c'),
('Orange', '#ff6e4a'),
('Blue', '#002147'),
('Blue', '#1ca9c9'),
('Pakistan green', '#006600'),
('Palatinate blue', '#273be2'),
('Palatinate purple', '#682860'),
('Pale aqua', '#bcd4e6'),
('Pale blue', '#afeeee'),
('Pale brown', '#987654'),
('Pale carmine', '#af4035'),
('Pale cerulean', '#9bc4e2'),
('Pale chestnut', '#ddadaf'),
('Pale copper', '#da8a67'),
('Pale cornflower blue', '#abcdef'),
('Pale gold', '#e6be8a'),
('Pale goldenrod', '#eee8aa'),
('Pale green', '#98fb98'),
('Pale lavender', '#dcd0ff'),
('Pale magenta', '#f984e5'),
('Pale pink', '#fadadd'),
('Pale plum', '#dda0dd'),
('Pale red violet', '#db7093'),
('Pale robin egg blue', '#96ded1'),
('Pale silver', '#c9c0bb'),
('Pale spring bud', '#ecebbd'),
('Pale taupe', '#bc987e'),
('Pale violet red', '#db7093'),
('Pansy purple', '#78184a'),
('Papaya whip', '#ffefd5'),
('Green', '#50c878'),
('Pastel blue', '#aec6cf'),
('Pastel brown', '#836953'),
('Pastel gray', '#cfcfc4'),
('Pastel green', '#77dd77'),
('Pastel magenta', '#f49ac2'),
('Pastel orange', '#ffb347'),
('Pastel pink', '#ffd1dc'),
('Pastel purple', '#b39eb5'),
('Pastel red', '#ff6961'),
('Pastel violet', '#cb99c9'),
('Pastel yellow', '#fdfd96'),
('Patriarch', '#800080'),
('Payne grey', '#536878'),
('Peach', '#ffe5b4'),
('Peach puff', '#ffdab9'),
('Peach yellow', '#fadfad'),
('Pear', '#d1e231'),
('Pearl', '#eae0c8'),
('Aqua', '#88d8c0'),
('Peridot', '#e6e200'),
('Periwinkle', '#ccccff'),
('Persian blue', '#1c39bb'),
('Persian indigo', '#32127a'),
('Persian orange', '#d99058'),
('Persian pink', '#f77fbe'),
('Persian plum', '#701c1c'),
('Persian red', '#cc3333'),
('Persian rose', '#fe28a2'),
('Phlox', '#df00ff'),
('Phthalo blue', '#000f89'),
('Phthalo green', '#123524'),
('Piggy pink', '#fddde6'),
('Pine green', '#01796f'),
('Pink', '#ffc0cb'),
('Flamingo', '#fc74fd'),
('Sherbet', '#f78fa7'),
('Pink pearl', '#e7accf'),
('Pistachio', '#93c572'),
('Platinum', '#e5e4e2'),
('Plum', '#dda0dd'),
('Orange', '#ff5a36'),
('Powder blue', '#b0e0e6'),
('Princeton orange', '#ff8f00'),
('Prussian blue', '#003153'),
('Psychedelic purple', '#df00ff'),
('Puce', '#cc8899'),
('Pumpkin', '#ff7518'),
('Purple', '#800080'),
('Heart', '#69359c'),
('Majesty', '#9d81ba'),
('Purple mountain majesty', '#9678b6'),
('Purple pizzazz', '#fe4eda'),
('Purple taupe', '#50404d'),
('Rackley', '#5d8aa8'),
('Red', '#ff355e'),
('Raspberry', '#e30b5d'),
('Raspberry glace', '#915f6d'),
('Raspberry pink', '#e25098'),
('Raspberry rose', '#b3446c'),
('Sienna', '#d68a59'),
('Razzle dazzle rose', '#ff33cc'),
('Razzmatazz', '#e3256b'),
('Red', '#ff0000'),
('Orange', '#ff5349'),
('Red brown', '#a52a2a'),
('Red violet', '#c71585'),
('Rich black', '#004040'),
('Rich carmine', '#d70040'),
('Rich electric blue', '#0892d0'),
('Rich lilac', '#b666d2'),
('Rich maroon', '#b03060'),
('Rifle green', '#414833'),
('Blue', '#1fcecb'),
('Rose', '#ff007f'),
('Rose bonbon', '#f9429e'),
('Rose ebony', '#674846'),
('Rose gold', '#b76e79'),
('Rose madder', '#e32636'),
('Rose pink', '#ff66cc'),
('Rose quartz', '#aa98a9'),
('Rose taupe', '#905d5d'),
('Rose vale', '#ab4e52'),
('Rosewood', '#65000b'),
('Rosso corsa', '#d40000'),
('Rosy brown', '#bc8f8f'),
('Royal azure', '#0038a8'),
('Royal blue', '#4169e1'),
('Royal fuchsia', '#ca2c92'),
('Royal purple', '#7851a9'),
('Ruby', '#e0115f'),
('Ruddy', '#ff0028'),
('Ruddy brown', '#bb6528'),
('Ruddy pink', '#e18e96'),
('Rufous', '#a81c07'),
('Russet', '#80461b'),
('Rust', '#b7410e'),
('State green', '#00563f'),
('Saddle brown', '#8b4513'),
('Safety orange', '#ff6700'),
('Saffron', '#f4c430'),
('Blue', '#23297a'),
('Salmon', '#ff8c69'),
('Salmon pink', '#ff91a4'),
('Sand', '#c2b280'),
('Sand dune', '#967117'),
('Sandstorm', '#ecd540'),
('Sandy brown', '#f4a460'),
('Sandy taupe', '#967117'),
('Sap green', '#507d2a'),
('Sapphire', '#0f52ba'),
('Satin sheen gold', '#cba135'),
('Scarlet', '#ff2400'),
('School bus yellow', '#ffd800'),
('Green', '#76ff7a'),
('Sea blue', '#006994'),
('Sea green', '#2e8b57'),
('Seal brown', '#321414'),
('Seashell', '#fff5ee'),
('Selective yellow', '#ffba00'),
('Sepia', '#704214'),
('Shadow', '#8a795d'),
('Shamrock', '#45cea2'),
('Shamrock green', '#009e60'),
('Shocking pink', '#fc0fc0'),
('Sienna', '#882d17'),
('Silver', '#c0c0c0'),
('Sinopia', '#cb410b'),
('Skobeloff', '#007474'),
('Sky blue', '#87ceeb'),
('Sky magenta', '#cf71af'),
('Slate blue', '#6a5acd'),
('Slate gray', '#708090'),
('Smalt', '#003399'),
('Smokey topaz', '#933d41'),
('Smoky black', '#100c08'),
('Snow', '#fffafa'),
('Ball', '#0fc0fc'),
('Spring bud', '#a7fc00'),
('Spring green', '#00ff7f'),
('Steel blue', '#4682b4'),
('Stil de grain yellow', '#fada5e'),
('Stizza', '#990000'),
('Stormcloud', '#008080'),
('Straw', '#e4d96f'),
('Sunglow', '#ffcc33'),
('Sunset', '#fad6a5'),
('Orange', '#fd5e53'),
('Tan', '#d2b48c'),
('Tangelo', '#f94d00'),
('Tangerine', '#f28500'),
('Tangerine yellow', '#ffcc00'),
('Taupe', '#483c32'),
('Taupe gray', '#8b8589'),
('Tawny', '#cd5700'),
('Tea green', '#d0f0c0'),
('Tea rose', '#f4c2c2'),
('Teal', '#008080'),
('Teal blue', '#367588'),
('Teal green', '#006d5b'),
('Terra cotta', '#e2725b'),
('Thistle', '#d8bfd8'),
('Thulian pink', '#de6fa1'),
('Pink', '#fc89ac'),
('Blue', '#0abab5'),
('Tiger eye', '#e08d3c'),
('Timberwolf', '#dbd7d2'),
('Titanium yellow', '#eee600'),
('Tomato', '#ff6347'),
('Toolbox', '#746cc0'),
('Topaz', '#ffc87c'),
('Tractor red', '#fd0e35'),
('Grey', '#808080'),
('Tropical rain forest', '#00755e'),
('Blue', '#0073cf'),
('Blue', '#417dc1'),
('Tumbleweed', '#deaa88'),
('Turkish rose', '#b57281'),
('Turquoise', '#30d5c8'),
('Turquoise blue', '#00ffef'),
('Turquoise green', '#a0d6b4'),
('Tuscan red', '#66424d'),
('Twilight lavender', '#8a496b'),
('Tyrian purple', '#66023c'),
('A blue', '#0033aa'),
('A red', '#d9004c'),
('Blue', '#536895'),
('Gold', '#ffb300'),
('Green', '#3cd070'),
('Forest green', '#014421'),
('Maroon', '#7b1113'),
('Cardinal', '#990000'),
('Gold', '#ffcc00'),
('Ube', '#8878c3'),
('Ultra pink', '#ff6fff'),
('Ultramarine', '#120a8f'),
('Ultramarine blue', '#4166f5'),
('Umber', '#635147'),
('Nations blue', '#5b92e5'),
('Gold', '#b78727'),
('Yellow', '#ffff66'),
('Upsdell red', '#ae2029'),
('Urobilin', '#e1ad21'),
('Crimson', '#d3003f'),
('Vanilla', '#f3e5ab'),
('Vegas gold', '#c5b358'),
('Venetian red', '#c80815'),
('Verdigris', '#43b3ae'),
('Vermilion', '#e34234'),
('Veronica', '#a020f0'),
('Violet', '#ee82ee'),
('Blue', '#324ab2'),
('Red', '#f75394'),
('Viridian', '#40826d'),
('Vivid auburn', '#922724'),
('Vivid burgundy', '#9f1d35'),
('Vivid cerise', '#da1d81'),
('Vivid tangerine', '#ffa089'),
('Vivid violet', '#9f00ff'),
('Warm black', '#004242'),
('Waterspout', '#00ffff'),
('Wenge', '#645452'),
('Wheat', '#f5deb3'),
('White', '#ffffff'),
('White smoke', '#f5f5f5'),
('Strawberry', '#ff43a4'),
('Watermelon', '#fc6c85'),
('Wild blue yonder', '#a2add0'),
('Wine', '#722f37'),
('Wisteria', '#c9a0dc'),
('Xanadu', '#738678'),
('Blue', '#0f4d92'),
('Yellow', '#ffff00'),
('Orange', '#ffae42'),
('Yellow green', '#9acd32'),
('Zaffre', '#0014a8'),
('Zinnwaldite brown', '#2c1608'),
('Force blue', '#5d8aa8'),
]
| 30.694779 | 43 | 0.493829 |
colors_definition = [
('Alice blue', '#f0f8ff'),
('Alizarin crimson', '#e32636'),
('Almond', '#efdecd'),
('Amaranth', '#e52b50'),
('Amber', '#ffbf00'),
('American rose', '#ff033e'),
('Amethyst', '#9966cc'),
('Green', '#a4c639'),
('Antique brass', '#cd9575'),
('Antique fuchsia', '#915c83'),
('Antique white', '#faebd7'),
('Ao', '#008000'),
('Apple green', '#8db600'),
('Apricot', '#fbceb1'),
('Aqua', '#00ffff'),
('Aquamarine', '#7fffd4'),
('Army green', '#4b5320'),
('Arylide yellow', '#e9d66b'),
('Ash grey', '#b2beb5'),
('Asparagus', '#87a96b'),
('Atomic tangerine', '#ff9966'),
('Auburn', '#a52a2a'),
('Aureolin', '#fdee00'),
('Saurus', '#6e7f80'),
('Awesome', '#ff2052'),
('Azure', '#007fff'),
('Baby blue', '#89cff0'),
('Baby blue eyes', '#a1caf1'),
('Baby pink', '#f4c2c2'),
('Blue', '#21abcd'),
('Mania', '#fae7b5'),
('Banana yellow', '#ffe135'),
('Battleship grey', '#848482'),
('Bazaar', '#98777b'),
('Beau blue', '#bcd4e6'),
('Beaver', '#9f8170'),
('Beige', '#f5f5dc'),
('Bisque', '#ffe4c4'),
('Bistre', '#3d2b1f'),
('Bittersweet', '#fe6f5e'),
('Black', '#000000'),
('Almond', '#ffebcd'),
('France', '#318ce7'),
('Blue', '#ace5ee'),
('Blond', '#faf0be'),
('Blue', '#0000ff'),
('Bell', '#a2a2d0'),
('Gray', '#6699cc'),
('Blue green', '#0d98ba'),
('Blue purple', '#8a2be2'),
('Blue violet', '#8a2be2'),
('Blush', '#de5d83'),
('Bole', '#79443b'),
('Bondi blue', '#0095b6'),
('Bone', '#e3dac9'),
('Red', '#cc0000'),
('Bottle green', '#006a4e'),
('Boysenberry', '#873260'),
('Brandeis blue', '#0070ff'),
('Brass', '#b5a642'),
('Brick red', '#cb4154'),
('Bright cerulean', '#1dacd6'),
('Bright green', '#66ff00'),
('Bright lavender', '#bf94e4'),
('Bright maroon', '#c32148'),
('Bright pink', '#ff007f'),
('Bright turquoise', '#08e8de'),
('Bright ube', '#d19fe8'),
('Brilliant lavender', '#f4bbff'),
('Brilliant rose', '#ff55a3'),
('Brink pink', '#fb607f'),
('British racing green', '#004225'),
('Bronze', '#cd7f32'),
('Brown', '#a52a2a'),
('Bubble gum', '#ffc1cc'),
('Bubbles', '#e7feff'),
('Buff', '#f0dc82'),
('Bulgarian rose', '#480607'),
('Burgundy', '#800020'),
('Burlywood', '#deb887'),
('Burnt orange', '#cc5500'),
('Burnt sienna', '#e97451'),
('Burnt umber', '#8a3324'),
('Byzantine', '#bd33a4'),
('Byzantium', '#702963'),
('Blue', '#007aa5'),
('Red', '#e03c31'),
('Cadet', '#536872'),
('Cadet blue', '#5f9ea0'),
('Cadet grey', '#91a3b0'),
('Cadmium green', '#006b3c'),
('Cadmium orange', '#ed872d'),
('Cadmium red', '#e30022'),
('Cadmium yellow', '#fff600'),
('Pomona green', '#1e4d2b'),
('Blue', '#a3c1ad'),
('Camel', '#c19a6b'),
('Camouflage green', '#78866b'),
('Canary', '#ffff99'),
('Canary yellow', '#ffef00'),
('Candy apple red', '#ff0800'),
('Candy pink', '#e4717a'),
('Capri', '#00bfff'),
('Caput mortuum', '#592720'),
('Cardinal', '#c41e3a'),
('Caribbean green', '#00cc99'),
('Carmine', '#ff0040'),
('Carmine pink', '#eb4c42'),
('Carmine red', '#ff0038'),
('Carnation pink', '#ffa6c9'),
('Carnelian', '#b31b1b'),
('Carolina blue', '#99badd'),
('Carrot orange', '#ed9121'),
('Celadon', '#ace1af'),
('Celeste', '#b2ffff'),
('Celestial blue', '#4997d0'),
('Cerise', '#de3163'),
('Cerise pink', '#ec3b83'),
('Cerulean', '#007ba7'),
('Cerulean blue', '#2a52be'),
('Chamoisee', '#a0785a'),
('Champagne', '#fad6a5'),
('Charcoal', '#36454f'),
('Chartreuse', '#7fff00'),
('Cherry', '#de3163'),
('Cherry blossom pink', '#ffb7c5'),
('Chestnut', '#cd5c5c'),
('Chocolate', '#d2691e'),
('Chrome yellow', '#ffa700'),
('Cinereous', '#98817b'),
('Cinnabar', '#e34234'),
('Cinnamon', '#d2691e'),
('Citrine', '#e4d00a'),
('Classic rose', '#fbcce7'),
('Cobalt', '#0047ab'),
('Cocoa brown', '#d2691e'),
('Coffee', '#6f4e37'),
('Columbia blue', '#9bddff'),
('Cool black', '#002e63'),
('Cool grey', '#8c92ac'),
('Copper', '#b87333'),
('Copper rose', '#996666'),
('Coquelicot', '#ff3800'),
('Coral', '#ff7f50'),
('Coral pink', '#f88379'),
('Coral red', '#ff4040'),
('Cordovan', '#893f45'),
('Corn', '#fbec5d'),
('Red', '#b31b1b'),
('Cornflower', '#9aceeb'),
('Cornflower blue', '#6495ed'),
('Cornsilk', '#fff8dc'),
('Cosmic latte', '#fff8e7'),
('Cotton candy', '#ffbcd9'),
('Cream', '#fffdd0'),
('Crimson', '#dc143c'),
('Red', '#990000'),
('Crimson glory', '#be0032'),
('Cyan', '#00ffff'),
('Daffodil', '#ffff31'),
('Dandelion', '#f0e130'),
('Dark blue', '#00008b'),
('Dark brown', '#654321'),
('Dark byzantium', '#5d3954'),
('Dark candy apple red', '#a40000'),
('Dark cerulean', '#08457e'),
('Dark chestnut', '#986960'),
('Dark coral', '#cd5b45'),
('Dark cyan', '#008b8b'),
('Dark electric blue', '#536878'),
('Dark goldenrod', '#b8860b'),
('Dark gray', '#a9a9a9'),
('Dark green', '#013220'),
('Dark jungle green', '#1a2421'),
('Dark khaki', '#bdb76b'),
('Dark lava', '#483c32'),
('Dark lavender', '#734f96'),
('Dark magenta', '#8b008b'),
('Dark midnight blue', '#003366'),
('Dark olive green', '#556b2f'),
('Dark orange', '#ff8c00'),
('Dark orchid', '#9932cc'),
('Dark pastel blue', '#779ecb'),
('Dark pastel green', '#03c03c'),
('Dark pastel purple', '#966fd6'),
('Dark pastel red', '#c23b22'),
('Dark pink', '#e75480'),
('Dark powder blue', '#003399'),
('Dark raspberry', '#872657'),
('Dark red', '#8b0000'),
('Dark salmon', '#e9967a'),
('Dark scarlet', '#560319'),
('Dark sea green', '#8fbc8f'),
('Dark sienna', '#3c1414'),
('Dark slate blue', '#483d8b'),
('Dark slate gray', '#2f4f4f'),
('Dark spring green', '#177245'),
('Dark tan', '#918151'),
('Dark tangerine', '#ffa812'),
('Dark taupe', '#483c32'),
('Dark terra cotta', '#cc4e5c'),
('Dark turquoise', '#00ced1'),
('Dark violet', '#9400d3'),
('Dartmouth green', '#00693e'),
('Davy grey', '#555555'),
('Debian red', '#d70a53'),
('Deep carmine', '#a9203e'),
('Deep carmine pink', '#ef3038'),
('Deep carrot orange', '#e9692c'),
('Deep cerise', '#da3287'),
('Deep champagne', '#fad6a5'),
('Deep chestnut', '#b94e48'),
('Deep coffee', '#704241'),
('Deep fuchsia', '#c154c1'),
('Deep jungle green', '#004b49'),
('Deep lilac', '#9955bb'),
('Deep magenta', '#cc00cc'),
('Deep peach', '#ffcba4'),
('Deep pink', '#ff1493'),
('Deep saffron', '#ff9933'),
('Deep sky blue', '#00bfff'),
('Denim', '#1560bd'),
('Desert', '#c19a6b'),
('Desert sand', '#edc9af'),
('Dim gray', '#696969'),
('Dodger blue', '#1e90ff'),
('Dogwood rose', '#d71868'),
('Dollar bill', '#85bb65'),
('Drab', '#967117'),
('Duke blue', '#00009c'),
('Earth yellow', '#e1a95f'),
('Ecru', '#c2b280'),
('Eggplant', '#614051'),
('Eggshell', '#f0ead6'),
('Egyptian blue', '#1034a6'),
('Electric blue', '#7df9ff'),
('Electric crimson', '#ff003f'),
('Electric cyan', '#00ffff'),
('Electric green', '#00ff00'),
('Electric indigo', '#6f00ff'),
('Electric lavender', '#f4bbff'),
('Electric lime', '#ccff00'),
('Electric purple', '#bf00ff'),
('Electric ultramarine', '#3f00ff'),
('Electric violet', '#8f00ff'),
('Electric yellow', '#ffff00'),
('Emerald', '#50c878'),
('Eton blue', '#96c8a2'),
('Fallow', '#c19a6b'),
('Falu red', '#801818'),
('Famous', '#ff00ff'),
('Fandango', '#b53389'),
('Fashion fuchsia', '#f400a1'),
('Fawn', '#e5aa70'),
('Feldgrau', '#4d5d53'),
('Fern', '#71bc78'),
('Fern green', '#4f7942'),
('Red', '#ff2800'),
('Field drab', '#6c541e'),
('Fire engine red', '#ce2029'),
('Firebrick', '#b22222'),
('Flame', '#e25822'),
('Flamingo pink', '#fc8eac'),
('Flavescent', '#f7e98e'),
('Flax', '#eedc82'),
('Floral white', '#fffaf0'),
('Fluorescent orange', '#ffbf00'),
('Fluorescent pink', '#ff1493'),
('Fluorescent yellow', '#ccff00'),
('Folly', '#ff004f'),
('Forest green', '#228b22'),
('French beige', '#a67b5b'),
('French blue', '#0072bb'),
('French lilac', '#86608e'),
('French rose', '#f64a8a'),
('Fuchsia', '#ff00ff'),
('Fuchsia pink', '#ff77ff'),
('Fulvous', '#e48400'),
('Wuzzy', '#cc6666'),
('Gainsboro', '#dcdcdc'),
('Gamboge', '#e49b0f'),
('Ghost white', '#f8f8ff'),
('Ginger', '#b06500'),
('Glaucous', '#6082b6'),
('Glitter', '#e6e8fa'),
('Gold', '#ffd700'),
('Golden brown', '#996515'),
('Golden poppy', '#fcc200'),
('Golden yellow', '#ffdf00'),
('Goldenrod', '#daa520'),
('Apple', '#a8e4a0'),
('Gray', '#808080'),
('Gray asparagus', '#465945'),
('Green', '#00ff00'),
('Blue', '#1164b4'),
('Green yellow', '#adff2f'),
('Grullo', '#a99a86'),
('Guppie green', '#00ff7f'),
('Han blue', '#446ccf'),
('Han purple', '#5218fa'),
('Hansa yellow', '#e9d66b'),
('Harlequin', '#3fff00'),
('Harvard crimson', '#c90016'),
('Gold', '#da9100'),
('Gold', '#808000'),
('Heliotrope', '#df73ff'),
('Hollywood cerise', '#f400a1'),
('Honeydew', '#f0fff0'),
('Hooker green', '#49796b'),
('Hot magenta', '#ff1dce'),
('Hot pink', '#ff69b4'),
('Hunter green', '#355e3b'),
('Icterine', '#fcf75e'),
('Inchworm', '#b2ec5d'),
('India green', '#138808'),
('Indian red', '#cd5c5c'),
('Indian yellow', '#e3a857'),
('Indigo', '#4b0082'),
('Blue', '#002fa7'),
('International orange', '#ff4f00'),
('Iris', '#5a4fcf'),
('Isabelline', '#f4f0ec'),
('Islamic green', '#009000'),
('Ivory', '#fffff0'),
('Jade', '#00a86b'),
('Jasmine', '#f8de7e'),
('Jasper', '#d73b3e'),
('Jazzberry jam', '#a50b5e'),
('Jonquil', '#fada5e'),
('June bud', '#bdda57'),
('Jungle green', '#29ab87'),
('Crimson', '#e8000d'),
('Kelly green', '#4cbb17'),
('Khaki', '#c3b091'),
('Green', '#087830'),
('Languid lavender', '#d6cadd'),
('Lapis lazuli', '#26619c'),
('Lemon', '#fefe22'),
('Laurel green', '#a9ba9d'),
('Lava', '#cf1020'),
('Lavender', '#e6e6fa'),
('Lavender blue', '#ccccff'),
('Lavender blush', '#fff0f5'),
('Lavender gray', '#c4c3d0'),
('Lavender indigo', '#9457eb'),
('Lavender magenta', '#ee82ee'),
('Lavender mist', '#e6e6fa'),
('Lavender pink', '#fbaed2'),
('Lavender purple', '#967bb6'),
('Lavender rose', '#fba0e3'),
('Lawn green', '#7cfc00'),
('Lemon', '#fff700'),
('Yellow', '#fff44f'),
('Lemon chiffon', '#fffacd'),
('Lemon lime', '#bfff00'),
('Crimson', '#f56991'),
('Thulian pink', '#e68fac'),
('Light apricot', '#fdd5b1'),
('Light blue', '#add8e6'),
('Light brown', '#b5651d'),
('Light carmine pink', '#e66771'),
('Light coral', '#f08080'),
('Light cornflower blue', '#93ccea'),
('Light cyan', '#e0ffff'),
('Light fuchsia pink', '#f984ef'),
('Light goldenrod yellow', '#fafad2'),
('Light gray', '#d3d3d3'),
('Light green', '#90ee90'),
('Light khaki', '#f0e68c'),
('Light pastel purple', '#b19cd9'),
('Light pink', '#ffb6c1'),
('Light salmon', '#ffa07a'),
('Light salmon pink', '#ff9999'),
('Light sea green', '#20b2aa'),
('Light sky blue', '#87cefa'),
('Light slate gray', '#778899'),
('Light taupe', '#b38b6d'),
('Light yellow', '#ffffed'),
('Lilac', '#c8a2c8'),
('Lime', '#bfff00'),
('Lime green', '#32cd32'),
('Lincoln green', '#195905'),
('Linen', '#faf0e6'),
('Lion', '#c19a6b'),
('Liver', '#534b4f'),
('Lust', '#e62020'),
('Green', '#18453b'),
('Cheese', '#ffbd88'),
('Magenta', '#ff00ff'),
('Magic mint', '#aaf0d1'),
('Magnolia', '#f8f4ff'),
('Mahogany', '#c04000'),
('Maize', '#fbec5d'),
('Blue', '#6050dc'),
('Malachite', '#0bda51'),
('Manatee', '#979aaa'),
('Tango', '#ff8243'),
('Mantis', '#74c365'),
('Maroon', '#800000'),
('Mauve', '#e0b0ff'),
('Mauve taupe', '#915f6d'),
('Mauvelous', '#ef98aa'),
('Maya blue', '#73c2fb'),
('Meat brown', '#e5b73b'),
('Persian blue', '#0067a5'),
('Medium aquamarine', '#66ddaa'),
('Medium blue', '#0000cd'),
('Medium candy apple red', '#e2062c'),
('Medium carmine', '#af4035'),
('Medium champagne', '#f3e5ab'),
('Medium electric blue', '#035096'),
('Medium jungle green', '#1c352d'),
('Medium lavender magenta', '#dda0dd'),
('Medium orchid', '#ba55d3'),
('Medium purple', '#9370db'),
('Medium red violet', '#bb3385'),
('Medium sea green', '#3cb371'),
('Medium slate blue', '#7b68ee'),
('Medium spring bud', '#c9dc87'),
('Medium spring green', '#00fa9a'),
('Medium taupe', '#674c47'),
('Medium teal blue', '#0054b4'),
('Medium turquoise', '#48d1cc'),
('Medium violet red', '#c71585'),
('Melon', '#fdbcb4'),
('Midnight blue', '#191970'),
('Midnight green', '#004953'),
('Mikado yellow', '#ffc40c'),
('Mint', '#3eb489'),
('Mint cream', '#f5fffa'),
('Mint green', '#98ff98'),
('Misty rose', '#ffe4e1'),
('Moccasin', '#faebd7'),
('Mode beige', '#967117'),
('Moonstone blue', '#73a9c2'),
('Moss green', '#addfad'),
('Meadow', '#30ba8f'),
('Mountbatten pink', '#997a8d'),
('Mulberry', '#c54b8c'),
('Munsell', '#f2f3f4'),
('Mustard', '#ffdb58'),
('Myrtle', '#21421e'),
('Nadeshiko pink', '#f6adc6'),
('Napier green', '#2a8000'),
('Naples yellow', '#fada5e'),
('Navajo white', '#ffdead'),
('Navy blue', '#000080'),
('Carrot', '#ffa343'),
('Neon fuchsia', '#fe59c2'),
('Neon green', '#39ff14'),
('Green', '#059033'),
('Blue', '#0077be'),
('Ochre', '#cc7722'),
('Office green', '#008000'),
('Old gold', '#cfb53b'),
('Old lace', '#fdf5e6'),
('Old lavender', '#796878'),
('Old mauve', '#673147'),
('Old rose', '#c08081'),
('Olive', '#808000'),
('Drab', '#6b8e23'),
('Green', '#bab86c'),
('Olivine', '#9ab973'),
('Onyx', '#0f0f0f'),
('Opera mauve', '#b784a7'),
('Orange', '#ffa500'),
('Yellow', '#f8d568'),
('Orange peel', '#ff9f00'),
('Orange red', '#ff4500'),
('Orchid', '#da70d6'),
('Otter brown', '#654321'),
('Space', '#414a4c'),
('Orange', '#ff6e4a'),
('Blue', '#002147'),
('Blue', '#1ca9c9'),
('Pakistan green', '#006600'),
('Palatinate blue', '#273be2'),
('Palatinate purple', '#682860'),
('Pale aqua', '#bcd4e6'),
('Pale blue', '#afeeee'),
('Pale brown', '#987654'),
('Pale carmine', '#af4035'),
('Pale cerulean', '#9bc4e2'),
('Pale chestnut', '#ddadaf'),
('Pale copper', '#da8a67'),
('Pale cornflower blue', '#abcdef'),
('Pale gold', '#e6be8a'),
('Pale goldenrod', '#eee8aa'),
('Pale green', '#98fb98'),
('Pale lavender', '#dcd0ff'),
('Pale magenta', '#f984e5'),
('Pale pink', '#fadadd'),
('Pale plum', '#dda0dd'),
('Pale red violet', '#db7093'),
('Pale robin egg blue', '#96ded1'),
('Pale silver', '#c9c0bb'),
('Pale spring bud', '#ecebbd'),
('Pale taupe', '#bc987e'),
('Pale violet red', '#db7093'),
('Pansy purple', '#78184a'),
('Papaya whip', '#ffefd5'),
('Green', '#50c878'),
('Pastel blue', '#aec6cf'),
('Pastel brown', '#836953'),
('Pastel gray', '#cfcfc4'),
('Pastel green', '#77dd77'),
('Pastel magenta', '#f49ac2'),
('Pastel orange', '#ffb347'),
('Pastel pink', '#ffd1dc'),
('Pastel purple', '#b39eb5'),
('Pastel red', '#ff6961'),
('Pastel violet', '#cb99c9'),
('Pastel yellow', '#fdfd96'),
('Patriarch', '#800080'),
('Payne grey', '#536878'),
('Peach', '#ffe5b4'),
('Peach puff', '#ffdab9'),
('Peach yellow', '#fadfad'),
('Pear', '#d1e231'),
('Pearl', '#eae0c8'),
('Aqua', '#88d8c0'),
('Peridot', '#e6e200'),
('Periwinkle', '#ccccff'),
('Persian blue', '#1c39bb'),
('Persian indigo', '#32127a'),
('Persian orange', '#d99058'),
('Persian pink', '#f77fbe'),
('Persian plum', '#701c1c'),
('Persian red', '#cc3333'),
('Persian rose', '#fe28a2'),
('Phlox', '#df00ff'),
('Phthalo blue', '#000f89'),
('Phthalo green', '#123524'),
('Piggy pink', '#fddde6'),
('Pine green', '#01796f'),
('Pink', '#ffc0cb'),
('Flamingo', '#fc74fd'),
('Sherbet', '#f78fa7'),
('Pink pearl', '#e7accf'),
('Pistachio', '#93c572'),
('Platinum', '#e5e4e2'),
('Plum', '#dda0dd'),
('Orange', '#ff5a36'),
('Powder blue', '#b0e0e6'),
('Princeton orange', '#ff8f00'),
('Prussian blue', '#003153'),
('Psychedelic purple', '#df00ff'),
('Puce', '#cc8899'),
('Pumpkin', '#ff7518'),
('Purple', '#800080'),
('Heart', '#69359c'),
('Majesty', '#9d81ba'),
('Purple mountain majesty', '#9678b6'),
('Purple pizzazz', '#fe4eda'),
('Purple taupe', '#50404d'),
('Rackley', '#5d8aa8'),
('Red', '#ff355e'),
('Raspberry', '#e30b5d'),
('Raspberry glace', '#915f6d'),
('Raspberry pink', '#e25098'),
('Raspberry rose', '#b3446c'),
('Sienna', '#d68a59'),
('Razzle dazzle rose', '#ff33cc'),
('Razzmatazz', '#e3256b'),
('Red', '#ff0000'),
('Orange', '#ff5349'),
('Red brown', '#a52a2a'),
('Red violet', '#c71585'),
('Rich black', '#004040'),
('Rich carmine', '#d70040'),
('Rich electric blue', '#0892d0'),
('Rich lilac', '#b666d2'),
('Rich maroon', '#b03060'),
('Rifle green', '#414833'),
('Blue', '#1fcecb'),
('Rose', '#ff007f'),
('Rose bonbon', '#f9429e'),
('Rose ebony', '#674846'),
('Rose gold', '#b76e79'),
('Rose madder', '#e32636'),
('Rose pink', '#ff66cc'),
('Rose quartz', '#aa98a9'),
('Rose taupe', '#905d5d'),
('Rose vale', '#ab4e52'),
('Rosewood', '#65000b'),
('Rosso corsa', '#d40000'),
('Rosy brown', '#bc8f8f'),
('Royal azure', '#0038a8'),
('Royal blue', '#4169e1'),
('Royal fuchsia', '#ca2c92'),
('Royal purple', '#7851a9'),
('Ruby', '#e0115f'),
('Ruddy', '#ff0028'),
('Ruddy brown', '#bb6528'),
('Ruddy pink', '#e18e96'),
('Rufous', '#a81c07'),
('Russet', '#80461b'),
('Rust', '#b7410e'),
('State green', '#00563f'),
('Saddle brown', '#8b4513'),
('Safety orange', '#ff6700'),
('Saffron', '#f4c430'),
('Blue', '#23297a'),
('Salmon', '#ff8c69'),
('Salmon pink', '#ff91a4'),
('Sand', '#c2b280'),
('Sand dune', '#967117'),
('Sandstorm', '#ecd540'),
('Sandy brown', '#f4a460'),
('Sandy taupe', '#967117'),
('Sap green', '#507d2a'),
('Sapphire', '#0f52ba'),
('Satin sheen gold', '#cba135'),
('Scarlet', '#ff2400'),
('School bus yellow', '#ffd800'),
('Green', '#76ff7a'),
('Sea blue', '#006994'),
('Sea green', '#2e8b57'),
('Seal brown', '#321414'),
('Seashell', '#fff5ee'),
('Selective yellow', '#ffba00'),
('Sepia', '#704214'),
('Shadow', '#8a795d'),
('Shamrock', '#45cea2'),
('Shamrock green', '#009e60'),
('Shocking pink', '#fc0fc0'),
('Sienna', '#882d17'),
('Silver', '#c0c0c0'),
('Sinopia', '#cb410b'),
('Skobeloff', '#007474'),
('Sky blue', '#87ceeb'),
('Sky magenta', '#cf71af'),
('Slate blue', '#6a5acd'),
('Slate gray', '#708090'),
('Smalt', '#003399'),
('Smokey topaz', '#933d41'),
('Smoky black', '#100c08'),
('Snow', '#fffafa'),
('Ball', '#0fc0fc'),
('Spring bud', '#a7fc00'),
('Spring green', '#00ff7f'),
('Steel blue', '#4682b4'),
('Stil de grain yellow', '#fada5e'),
('Stizza', '#990000'),
('Stormcloud', '#008080'),
('Straw', '#e4d96f'),
('Sunglow', '#ffcc33'),
('Sunset', '#fad6a5'),
('Orange', '#fd5e53'),
('Tan', '#d2b48c'),
('Tangelo', '#f94d00'),
('Tangerine', '#f28500'),
('Tangerine yellow', '#ffcc00'),
('Taupe', '#483c32'),
('Taupe gray', '#8b8589'),
('Tawny', '#cd5700'),
('Tea green', '#d0f0c0'),
('Tea rose', '#f4c2c2'),
('Teal', '#008080'),
('Teal blue', '#367588'),
('Teal green', '#006d5b'),
('Terra cotta', '#e2725b'),
('Thistle', '#d8bfd8'),
('Thulian pink', '#de6fa1'),
('Pink', '#fc89ac'),
('Blue', '#0abab5'),
('Tiger eye', '#e08d3c'),
('Timberwolf', '#dbd7d2'),
('Titanium yellow', '#eee600'),
('Tomato', '#ff6347'),
('Toolbox', '#746cc0'),
('Topaz', '#ffc87c'),
('Tractor red', '#fd0e35'),
('Grey', '#808080'),
('Tropical rain forest', '#00755e'),
('Blue', '#0073cf'),
('Blue', '#417dc1'),
('Tumbleweed', '#deaa88'),
('Turkish rose', '#b57281'),
('Turquoise', '#30d5c8'),
('Turquoise blue', '#00ffef'),
('Turquoise green', '#a0d6b4'),
('Tuscan red', '#66424d'),
('Twilight lavender', '#8a496b'),
('Tyrian purple', '#66023c'),
('A blue', '#0033aa'),
('A red', '#d9004c'),
('Blue', '#536895'),
('Gold', '#ffb300'),
('Green', '#3cd070'),
('Forest green', '#014421'),
('Maroon', '#7b1113'),
('Cardinal', '#990000'),
('Gold', '#ffcc00'),
('Ube', '#8878c3'),
('Ultra pink', '#ff6fff'),
('Ultramarine', '#120a8f'),
('Ultramarine blue', '#4166f5'),
('Umber', '#635147'),
('Nations blue', '#5b92e5'),
('Gold', '#b78727'),
('Yellow', '#ffff66'),
('Upsdell red', '#ae2029'),
('Urobilin', '#e1ad21'),
('Crimson', '#d3003f'),
('Vanilla', '#f3e5ab'),
('Vegas gold', '#c5b358'),
('Venetian red', '#c80815'),
('Verdigris', '#43b3ae'),
('Vermilion', '#e34234'),
('Veronica', '#a020f0'),
('Violet', '#ee82ee'),
('Blue', '#324ab2'),
('Red', '#f75394'),
('Viridian', '#40826d'),
('Vivid auburn', '#922724'),
('Vivid burgundy', '#9f1d35'),
('Vivid cerise', '#da1d81'),
('Vivid tangerine', '#ffa089'),
('Vivid violet', '#9f00ff'),
('Warm black', '#004242'),
('Waterspout', '#00ffff'),
('Wenge', '#645452'),
('Wheat', '#f5deb3'),
('White', '#ffffff'),
('White smoke', '#f5f5f5'),
('Strawberry', '#ff43a4'),
('Watermelon', '#fc6c85'),
('Wild blue yonder', '#a2add0'),
('Wine', '#722f37'),
('Wisteria', '#c9a0dc'),
('Xanadu', '#738678'),
('Blue', '#0f4d92'),
('Yellow', '#ffff00'),
('Orange', '#ffae42'),
('Yellow green', '#9acd32'),
('Zaffre', '#0014a8'),
('Zinnwaldite brown', '#2c1608'),
('Force blue', '#5d8aa8'),
]
| true | true |
1c2f0a29fb656dcac8d987417053888e961ea568 | 801 | py | Python | evora/server/ftp_server.py | ejgl/ScienceCamera | c81542bb0605423961110fa6d79d64fa69356a98 | [
"0BSD"
] | 4 | 2017-08-29T22:41:00.000Z | 2021-01-21T00:22:35.000Z | evora/server/ftp_server.py | ejgl/ScienceCamera | c81542bb0605423961110fa6d79d64fa69356a98 | [
"0BSD"
] | 40 | 2016-04-11T23:47:24.000Z | 2021-09-26T15:34:17.000Z | evora/server/ftp_server.py | ejgl/ScienceCamera | c81542bb0605423961110fa6d79d64fa69356a98 | [
"0BSD"
] | 6 | 2016-05-27T22:49:17.000Z | 2021-08-19T22:46:11.000Z | #!/usr/bin/env python2
from __future__ import absolute_import, division, print_function
from os.path import isdir
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.cred.portal import Portal
from twisted.internet import reactor
# ftp server imports
from twisted.protocols.ftp import FTPFactory, FTPRealm
from evora.common import netconsts
# TODO: this needs to be specified some other way
# Does not exist on non-observatory computers
data_path = "/home/mro/storage/evora_data/"
if isdir(data_path):
p = Portal(FTPRealm(data_path), [AllowAnonymousAccess()])
f = FTPFactory(p)
f.timeOut = None
reactor.listenTCP(netconsts.FTP_TRANSFER_PORT, f)
else:
print("[ftp_server.py] Directory at '" + data_path + "' does not exist, exiting...")
quit()
reactor.run()
| 29.666667 | 88 | 0.761548 |
from __future__ import absolute_import, division, print_function
from os.path import isdir
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.cred.portal import Portal
from twisted.internet import reactor
from twisted.protocols.ftp import FTPFactory, FTPRealm
from evora.common import netconsts
data_path = "/home/mro/storage/evora_data/"
if isdir(data_path):
p = Portal(FTPRealm(data_path), [AllowAnonymousAccess()])
f = FTPFactory(p)
f.timeOut = None
reactor.listenTCP(netconsts.FTP_TRANSFER_PORT, f)
else:
print("[ftp_server.py] Directory at '" + data_path + "' does not exist, exiting...")
quit()
reactor.run()
| true | true |
1c2f0b44319be3c44e399c947d7a1c55056cb317 | 529 | py | Python | django_dropbox_csv_export/satisfaction_ratings/tests/test_models.py | zkan/django-dropbox-csv-export | 5e77c539d84acf59d6f1dc1ffe3515b13fc34565 | [
"MIT"
] | null | null | null | django_dropbox_csv_export/satisfaction_ratings/tests/test_models.py | zkan/django-dropbox-csv-export | 5e77c539d84acf59d6f1dc1ffe3515b13fc34565 | [
"MIT"
] | null | null | null | django_dropbox_csv_export/satisfaction_ratings/tests/test_models.py | zkan/django-dropbox-csv-export | 5e77c539d84acf59d6f1dc1ffe3515b13fc34565 | [
"MIT"
] | null | null | null | from django.test import TestCase
from ..models import SatisfactionRating
class SatisfactionRatingTest(TestCase):
def test_save_satisfaction_rating(self):
satisfaction_rating = SatisfactionRating()
satisfaction_rating.customer_name = 'Pronto'
satisfaction_rating.score = 9
satisfaction_rating.save()
satisfaction_rating = SatisfactionRating.objects.last()
self.assertEqual(satisfaction_rating.customer_name, 'Pronto')
self.assertEqual(satisfaction_rating.score, 9)
| 31.117647 | 69 | 0.750473 | from django.test import TestCase
from ..models import SatisfactionRating
class SatisfactionRatingTest(TestCase):
def test_save_satisfaction_rating(self):
satisfaction_rating = SatisfactionRating()
satisfaction_rating.customer_name = 'Pronto'
satisfaction_rating.score = 9
satisfaction_rating.save()
satisfaction_rating = SatisfactionRating.objects.last()
self.assertEqual(satisfaction_rating.customer_name, 'Pronto')
self.assertEqual(satisfaction_rating.score, 9)
| true | true |
1c2f0b4fcfb1e4565793074e7760ec4d5fe26d08 | 3,678 | py | Python | model/graph_models/object_descriptor.py | Nik-V9/AirObject | 5937e64531f08449e81d2c90e3c6643727efbaf0 | [
"BSD-3-Clause"
] | 9 | 2022-03-15T17:28:48.000Z | 2022-03-29T12:32:28.000Z | model/graph_models/object_descriptor.py | Nik-V9/AirObject | 5937e64531f08449e81d2c90e3c6643727efbaf0 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T06:03:14.000Z | 2022-03-29T13:38:29.000Z | model/graph_models/object_descriptor.py | Nik-V9/AirObject | 5937e64531f08449e81d2c90e3c6643727efbaf0 | [
"BSD-3-Clause"
] | 1 | 2022-03-15T19:34:06.000Z | 2022-03-15T19:34:06.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from model.graph_models.attention import GraphAtten
class ObjectDescriptor(nn.Module):
def __init__(self, config):
super(ObjectDescriptor, self).__init__()
points_encoder_dims = config['points_encoder_dims']
descriptor_dim = config['descriptor_dim']
nhid = config['hidden_dim']
alpha = config['alpha']
nheads = config['nheads']
nout = config['nout']
nfeat = descriptor_dim + points_encoder_dims[-1]
self.points_encoder = PointsEncoder(points_encoder_dims)
self.gcn = GCN(nfeat, nhid, nout, alpha, nheads)
def forward(self, batch_points, batch_descs, batch_adj):
'''
inputs:
batch_points: List[Tensor], normalized points, each tensor belonging to an object
batch_descs: List[Tensor], local feature descriptors, each tensor belonging to an object
batch_adj: List[Tensor], adjacency matrix corresponding to the triangulation based object points graph
return_features: bool, return node-wise graph features
'''
batch_features, locations = [], []
for points, descs, adj in zip(batch_points, batch_descs, batch_adj):
encoded_points = self.points_encoder(points)
features = torch.cat((descs, encoded_points), dim=1)
features, w = self.gcn(features, adj)
batch_features.append(features)
locations.append(w)
batch_features = torch.stack(batch_features)
batch_features = nn.functional.normalize(batch_features, p=2, dim=-1)
return batch_features, locations
class PointsEncoder(nn.Module):
def __init__(self, dims):
super(PointsEncoder, self).__init__()
layers = []
for i in range(len(dims)-1):
layers.append(nn.Linear(dims[i], dims[i+1]))
if i != len(dims)-2:
layers.append(nn.BatchNorm1d((dims[i+1])))
layers.append(nn.ReLU())
self.layers = layers
for i, layer in enumerate(self.layers):
self.add_module('point_encoder{}'.format(i), layer)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = nn.functional.normalize(x, p=2, dim=-1)
return x
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nout, alpha=0.2, nheads=8):
super(GCN, self).__init__()
self.atten1 = GraphAtten(nfeat, nhid, nfeat, alpha, nheads)
self.atten2 = GraphAtten(nfeat, nhid, nfeat, alpha, nheads)
self.tran1 = nn.Linear(nfeat, nfeat)
self.relu = nn.ReLU()
self.sparsification = Sparsification(nfeat, nout)
def forward(self, x, adj):
x = self.atten1(x, adj)
x = self.atten2(x, adj)
x = self.relu(self.tran1(x))
x, w = self.sparsification(x)
return x, w
class Sparsification(nn.Module):
def __init__(self, input_dim, output_dim):
super(Sparsification, self).__init__()
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=-1)
self.location_encoder1 = nn.Linear(input_dim, input_dim)
self.location_encoder2 = nn.Linear(input_dim, output_dim)
self.feature_encoder1 = nn.Linear(input_dim, input_dim)
self.feature_encoder2 = nn.Linear(input_dim, output_dim)
self.feature_encoder3 = nn.Linear(output_dim, output_dim)
def forward(self, x):
descriptor = self.relu(self.feature_encoder1(x))
descriptor = self.relu(self.feature_encoder2(descriptor))
locations = self.relu(self.location_encoder1(x))
locations = self.relu(self.location_encoder2(locations))
norm_locations = nn.functional.normalize(locations, p=2, dim=-1)
descriptor = locations * descriptor
descriptor = torch.sum(descriptor, 0)
descriptor = self.feature_encoder3(descriptor)
return descriptor, norm_locations | 32.263158 | 108 | 0.694671 |
import torch
import torch.nn as nn
from model.graph_models.attention import GraphAtten
class ObjectDescriptor(nn.Module):
def __init__(self, config):
super(ObjectDescriptor, self).__init__()
points_encoder_dims = config['points_encoder_dims']
descriptor_dim = config['descriptor_dim']
nhid = config['hidden_dim']
alpha = config['alpha']
nheads = config['nheads']
nout = config['nout']
nfeat = descriptor_dim + points_encoder_dims[-1]
self.points_encoder = PointsEncoder(points_encoder_dims)
self.gcn = GCN(nfeat, nhid, nout, alpha, nheads)
def forward(self, batch_points, batch_descs, batch_adj):
batch_features, locations = [], []
for points, descs, adj in zip(batch_points, batch_descs, batch_adj):
encoded_points = self.points_encoder(points)
features = torch.cat((descs, encoded_points), dim=1)
features, w = self.gcn(features, adj)
batch_features.append(features)
locations.append(w)
batch_features = torch.stack(batch_features)
batch_features = nn.functional.normalize(batch_features, p=2, dim=-1)
return batch_features, locations
class PointsEncoder(nn.Module):
def __init__(self, dims):
super(PointsEncoder, self).__init__()
layers = []
for i in range(len(dims)-1):
layers.append(nn.Linear(dims[i], dims[i+1]))
if i != len(dims)-2:
layers.append(nn.BatchNorm1d((dims[i+1])))
layers.append(nn.ReLU())
self.layers = layers
for i, layer in enumerate(self.layers):
self.add_module('point_encoder{}'.format(i), layer)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = nn.functional.normalize(x, p=2, dim=-1)
return x
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nout, alpha=0.2, nheads=8):
super(GCN, self).__init__()
self.atten1 = GraphAtten(nfeat, nhid, nfeat, alpha, nheads)
self.atten2 = GraphAtten(nfeat, nhid, nfeat, alpha, nheads)
self.tran1 = nn.Linear(nfeat, nfeat)
self.relu = nn.ReLU()
self.sparsification = Sparsification(nfeat, nout)
def forward(self, x, adj):
x = self.atten1(x, adj)
x = self.atten2(x, adj)
x = self.relu(self.tran1(x))
x, w = self.sparsification(x)
return x, w
class Sparsification(nn.Module):
def __init__(self, input_dim, output_dim):
super(Sparsification, self).__init__()
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=-1)
self.location_encoder1 = nn.Linear(input_dim, input_dim)
self.location_encoder2 = nn.Linear(input_dim, output_dim)
self.feature_encoder1 = nn.Linear(input_dim, input_dim)
self.feature_encoder2 = nn.Linear(input_dim, output_dim)
self.feature_encoder3 = nn.Linear(output_dim, output_dim)
def forward(self, x):
descriptor = self.relu(self.feature_encoder1(x))
descriptor = self.relu(self.feature_encoder2(descriptor))
locations = self.relu(self.location_encoder1(x))
locations = self.relu(self.location_encoder2(locations))
norm_locations = nn.functional.normalize(locations, p=2, dim=-1)
descriptor = locations * descriptor
descriptor = torch.sum(descriptor, 0)
descriptor = self.feature_encoder3(descriptor)
return descriptor, norm_locations | true | true |
1c2f0d28c1df042f6c7b367df5e9614a27ecf277 | 702 | py | Python | game/tichu/team.py | lukaspestalozzi/Master_Semester_Project | 4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e | [
"MIT"
] | null | null | null | game/tichu/team.py | lukaspestalozzi/Master_Semester_Project | 4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e | [
"MIT"
] | null | null | null | game/tichu/team.py | lukaspestalozzi/Master_Semester_Project | 4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e | [
"MIT"
] | null | null | null | from collections import namedtuple
from game.tichu.tichuplayers import TichuPlayer
from game.utils import check_isinstance
class Team(namedtuple("T", ["player1", "player2"])):
def __init__(self, player1, player2):
check_isinstance(player1, TichuPlayer)
check_isinstance(player2, TichuPlayer)
super(Team, self).__init__()
@property
def second_player(self):
return self.player2
@property
def first_player(self):
return self.player1
def __contains__(self, player):
return player == self.player1 or player == self.player2
def __str__(self):
return "Team(player1:{}, player2:{})".format(self.player1, self.player2)
| 26 | 80 | 0.68661 | from collections import namedtuple
from game.tichu.tichuplayers import TichuPlayer
from game.utils import check_isinstance
class Team(namedtuple("T", ["player1", "player2"])):
def __init__(self, player1, player2):
check_isinstance(player1, TichuPlayer)
check_isinstance(player2, TichuPlayer)
super(Team, self).__init__()
@property
def second_player(self):
return self.player2
@property
def first_player(self):
return self.player1
def __contains__(self, player):
return player == self.player1 or player == self.player2
def __str__(self):
return "Team(player1:{}, player2:{})".format(self.player1, self.player2)
| true | true |
1c2f0d7ec6b403d64e71162ea6a400ae30342e6d | 23,524 | py | Python | tests/test_app_routers_share_tokens_GET.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
] | 2 | 2021-08-19T12:35:25.000Z | 2022-02-16T04:13:38.000Z | tests/test_app_routers_share_tokens_GET.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
] | 46 | 2021-09-02T03:22:05.000Z | 2022-03-31T09:20:00.000Z | tests/test_app_routers_share_tokens_GET.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
] | 1 | 2021-11-17T23:18:27.000Z | 2021-11-17T23:18:27.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from unittest import mock
from unittest.mock import call
from pytz import timezone
from config import TZ
from app.model.blockchain import IbetShareContract
from app.model.db import (
Token,
TokenType,
AdditionalTokenInfo
)
from tests.account_config import config_eth_account
class TestAppRoutersShareTokensGET:
# target API endpoint
apiurl = "/share/tokens"
local_tz = timezone(TZ)
###########################################################################
# Normal Case
###########################################################################
# <Normal_1>
# parameter unset address, 0 Record
def test_normal_1(self, client, db):
resp = client.get(self.apiurl)
assert resp.status_code == 200
assert resp.json() == []
# <Normal_2>
# parameter unset address, 1 Record
@mock.patch("app.model.blockchain.token.IbetShareContract.get")
def test_normal_2(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
token = Token()
token.type = TokenType.IBET_SHARE
token.tx_hash = "tx_hash_test1"
token.issuer_address = issuer_address_1
token.token_address = "token_address_test1"
token.abi = "abi_test1"
db.add(token)
db.commit()
_issue_datetime = timezone("UTC").localize(token.created).astimezone(self.local_tz).isoformat()
# request target API
mock_token = IbetShareContract()
mock_token.issuer_address = issuer_address_1
mock_token.token_address = "token_address_test1"
mock_token.name = "testtoken1"
mock_token.symbol = "test1"
mock_token.total_supply = 10000
mock_token.contact_information = "contactInformation_test1"
mock_token.privacy_policy = "privacyPolicy_test1"
mock_token.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token.status = True
mock_token.issue_price = 1000
mock_token.dividends = 123.45
mock_token.dividend_record_date = "20211231"
mock_token.dividend_payment_date = "20211231"
mock_token.cancellation_date = "20221231"
mock_token.transferable = True
mock_token.is_offering = True
mock_token.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token.principal_value = 1000
mock_token.transfer_approval_required = False
mock_token.is_canceled = False
mock_token.memo = "memo_test1"
mock_get.side_effect = [mock_token]
resp = client.get(self.apiurl)
# assertion mock call arguments
mock_get.assert_any_call(contract_address=token.token_address)
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime,
"token_status": 1,
"memo": "memo_test1",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
# <Normal Case 3>
# parameter unset address, Multi Record
@mock.patch("app.model.blockchain.token.IbetShareContract.get")
def test_normal_3(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
# 1st Data
token_1 = Token()
token_1.type = TokenType.IBET_SHARE
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime_1 = timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
additional_info_1 = AdditionalTokenInfo()
additional_info_1.token_address = "token_address_test1"
additional_info_1.is_manual_transfer_approval = True
db.add(additional_info_1)
db.commit()
mock_token_1 = IbetShareContract()
mock_token_1.issuer_address = issuer_address_1
mock_token_1.token_address = "token_address_test1"
mock_token_1.name = "testtoken1"
mock_token_1.symbol = "test1"
mock_token_1.total_supply = 10000
mock_token_1.contact_information = "contactInformation_test1"
mock_token_1.privacy_policy = "privacyPolicy_test1"
mock_token_1.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_1.status = True
mock_token_1.issue_price = 1000
mock_token_1.dividends = 123.45
mock_token_1.dividend_record_date = "20211231"
mock_token_1.dividend_payment_date = "20211231"
mock_token_1.cancellation_date = "20221231"
mock_token_1.transferable = True
mock_token_1.is_offering = True
mock_token_1.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_1.principal_value = 1000
mock_token_1.transfer_approval_required = False
mock_token_1.is_canceled = False
mock_token_1.memo = "memo_test1"
# 2nd Data
token_2 = Token()
token_2.type = TokenType.IBET_SHARE
token_2.tx_hash = "tx_hash_test2"
token_2.issuer_address = issuer_address_2
token_2.token_address = "token_address_test2"
token_2.abi = "abi_test2"
token_2.token_status = 0
db.add(token_2)
db.commit()
_issue_datetime_2 = timezone("UTC").localize(token_2.created).astimezone(self.local_tz).isoformat()
additional_info_2 = AdditionalTokenInfo()
additional_info_2.token_address = "token_address_test2"
additional_info_2.is_manual_transfer_approval = None # not target
db.add(additional_info_2)
db.commit()
mock_token_2 = IbetShareContract()
mock_token_2.issuer_address = issuer_address_2
mock_token_2.token_address = "token_address_test2"
mock_token_2.name = "testtoken2"
mock_token_2.symbol = "test2"
mock_token_2.total_supply = 10000
mock_token_2.contact_information = "contactInformation_test2"
mock_token_2.privacy_policy = "privacyPolicy_test2"
mock_token_2.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_2.status = True
mock_token_2.issue_price = 1000
mock_token_2.dividends = 123.45
mock_token_2.dividend_record_date = "20211231"
mock_token_2.dividend_payment_date = "20211231"
mock_token_2.cancellation_date = "20221231"
mock_token_2.transferable = True
mock_token_2.is_offering = True
mock_token_2.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_2.principal_value = 1000
mock_token_2.transfer_approval_required = False
mock_token_2.is_canceled = False
mock_token_2.memo = "memo_test2"
mock_get.side_effect = [
mock_token_1, mock_token_2
]
resp = client.get(self.apiurl)
# assertion mock call arguments
mock_get.assert_has_calls([
call(contract_address=token_1.token_address),
call(contract_address=token_2.token_address)
])
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": True,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_1,
"token_status": 1,
"memo": "memo_test1",
},
{
"issuer_address": issuer_address_2,
"token_address": "token_address_test2",
"name": "testtoken2",
"symbol": "test2",
"total_supply": 10000,
"contact_information": "contactInformation_test2",
"privacy_policy": "privacyPolicy_test2",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_2,
"token_status": 0,
"memo": "memo_test2",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
# <Normal Case 4>
# parameter set address, 0 Record
def test_normal_4(self, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
# No Target Data
token = Token()
token.type = TokenType.IBET_SHARE
token.tx_hash = "tx_hash_test1"
token.issuer_address = "issuer_address_test1"
token.token_address = "token_address_test1"
token.abi = "abi_test1"
db.add(token)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
assert resp.status_code == 200
assert resp.json() == []
# <Normal Case 5>
# parameter set address, 1 Record
@mock.patch("app.model.blockchain.token.IbetShareContract.get")
def test_normal_5(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
token_1 = Token()
token_1.type = TokenType.IBET_SHARE
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime = timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
mock_token = IbetShareContract()
mock_token.issuer_address = issuer_address_1
mock_token.token_address = "token_address_test1"
mock_token.name = "testtoken1"
mock_token.symbol = "test1"
mock_token.total_supply = 10000
mock_token.contact_information = "contactInformation_test1"
mock_token.privacy_policy = "privacyPolicy_test1"
mock_token.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token.status = True
mock_token.issue_price = 1000
mock_token.dividends = 123.45
mock_token.dividend_record_date = "20211231"
mock_token.dividend_payment_date = "20211231"
mock_token.cancellation_date = "20221231"
mock_token.transferable = True
mock_token.is_offering = True
mock_token.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token.principal_value = 1000
mock_token.transfer_approval_required = False
mock_token.is_canceled = False
mock_token.memo = "memo_test1"
mock_get.side_effect = [mock_token]
# No Target Data
token_2 = Token()
token_2.type = TokenType.IBET_SHARE
token_2.tx_hash = "tx_hash_test1"
token_2.issuer_address = issuer_address_2
token_2.token_address = "token_address_test1"
token_2.abi = "abi_test1"
db.add(token_2)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
# assertion mock call arguments
mock_get.assert_any_call(contract_address=token_1.token_address)
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime,
"token_status": 1,
"memo": "memo_test1",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
# <Normal Case 6>
# parameter set address, Multi Record
@mock.patch("app.model.blockchain.token.IbetShareContract.get")
def test_normal_6(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
# 1st Data
token_1 = Token()
token_1.type = TokenType.IBET_SHARE
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime_1 = timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
mock_token_1 = IbetShareContract()
mock_token_1.issuer_address = issuer_address_1
mock_token_1.token_address = "token_address_test1"
mock_token_1.name = "testtoken1"
mock_token_1.symbol = "test1"
mock_token_1.total_supply = 10000
mock_token_1.contact_information = "contactInformation_test1"
mock_token_1.privacy_policy = "privacyPolicy_test1"
mock_token_1.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_1.status = True
mock_token_1.issue_price = 1000
mock_token_1.dividends = 123.45
mock_token_1.dividend_record_date = "20211231"
mock_token_1.dividend_payment_date = "20211231"
mock_token_1.cancellation_date = "20221231"
mock_token_1.transferable = True
mock_token_1.is_offering = True
mock_token_1.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_1.principal_value = 1000
mock_token_1.transfer_approval_required = False
mock_token_1.is_canceled = False
mock_token_1.memo = "memo_test1"
# 2nd Data
token_2 = Token()
token_2.type = TokenType.IBET_SHARE
token_2.tx_hash = "tx_hash_test2"
token_2.issuer_address = issuer_address_1
token_2.token_address = "token_address_test2"
token_2.abi = "abi_test2"
token_2.token_status = 0
db.add(token_2)
db.commit()
_issue_datetime_2 = timezone("UTC").localize(token_2.created).astimezone(self.local_tz).isoformat()
mock_token_2 = IbetShareContract()
mock_token_2.issuer_address = issuer_address_1
mock_token_2.token_address = "token_address_test2"
mock_token_2.name = "testtoken2"
mock_token_2.symbol = "test2"
mock_token_2.total_supply = 10000
mock_token_2.contact_information = "contactInformation_test2"
mock_token_2.privacy_policy = "privacyPolicy_test2"
mock_token_2.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_2.status = True
mock_token_2.issue_price = 1000
mock_token_2.dividends = 123.45
mock_token_2.dividend_record_date = "20211231"
mock_token_2.dividend_payment_date = "20211231"
mock_token_2.cancellation_date = "20221231"
mock_token_2.transferable = True
mock_token_2.is_offering = True
mock_token_2.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_2.principal_value = 1000
mock_token_2.transfer_approval_required = False
mock_token_2.is_canceled = False
mock_token_2.memo = "memo_test2"
mock_get.side_effect = [
mock_token_1, mock_token_2
]
# No Target Data
token_3 = Token()
token_3.type = TokenType.IBET_SHARE
token_3.tx_hash = "tx_hash_test1"
token_3.issuer_address = issuer_address_2
token_3.token_address = "token_address_test1"
token_3.abi = "abi_test1"
db.add(token_3)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
# assertion mock call arguments
mock_get.assert_has_calls([
call(contract_address=token_1.token_address),
call(contract_address=token_2.token_address)
])
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_1,
"token_status": 1,
"memo": "memo_test1",
},
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test2",
"name": "testtoken2",
"symbol": "test2",
"total_supply": 10000,
"principal_value": 1000,
"contact_information": "contactInformation_test2",
"privacy_policy": "privacyPolicy_test2",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_2,
"token_status": 0,
"memo": "memo_test2",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
###########################################################################
# Error Case
###########################################################################
# <Error_1>
# parameter error
def test_error_1(self, client, db):
resp = client.get(self.apiurl, headers={"issuer-address": "issuer_address"})
assert resp.status_code == 422
assert resp.json() == {
"meta": {
"code": 1,
"title": "RequestValidationError"
},
"detail": [{
"loc": ["header", "issuer-address"],
"msg": "issuer-address is not a valid address",
"type": "value_error"
}]
}
| 41.054101 | 107 | 0.622641 | from unittest import mock
from unittest.mock import call
from pytz import timezone
from config import TZ
from app.model.blockchain import IbetShareContract
from app.model.db import (
Token,
TokenType,
AdditionalTokenInfo
)
from tests.account_config import config_eth_account
class TestAppRoutersShareTokensGET:
apiurl = "/share/tokens"
local_tz = timezone(TZ)
oken_2.type = TokenType.IBET_SHARE
token_2.tx_hash = "tx_hash_test2"
token_2.issuer_address = issuer_address_2
token_2.token_address = "token_address_test2"
token_2.abi = "abi_test2"
token_2.token_status = 0
db.add(token_2)
db.commit()
_issue_datetime_2 = timezone("UTC").localize(token_2.created).astimezone(self.local_tz).isoformat()
additional_info_2 = AdditionalTokenInfo()
additional_info_2.token_address = "token_address_test2"
additional_info_2.is_manual_transfer_approval = None
db.add(additional_info_2)
db.commit()
mock_token_2 = IbetShareContract()
mock_token_2.issuer_address = issuer_address_2
mock_token_2.token_address = "token_address_test2"
mock_token_2.name = "testtoken2"
mock_token_2.symbol = "test2"
mock_token_2.total_supply = 10000
mock_token_2.contact_information = "contactInformation_test2"
mock_token_2.privacy_policy = "privacyPolicy_test2"
mock_token_2.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_2.status = True
mock_token_2.issue_price = 1000
mock_token_2.dividends = 123.45
mock_token_2.dividend_record_date = "20211231"
mock_token_2.dividend_payment_date = "20211231"
mock_token_2.cancellation_date = "20221231"
mock_token_2.transferable = True
mock_token_2.is_offering = True
mock_token_2.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_2.principal_value = 1000
mock_token_2.transfer_approval_required = False
mock_token_2.is_canceled = False
mock_token_2.memo = "memo_test2"
mock_get.side_effect = [
mock_token_1, mock_token_2
]
resp = client.get(self.apiurl)
mock_get.assert_has_calls([
call(contract_address=token_1.token_address),
call(contract_address=token_2.token_address)
])
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": True,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_1,
"token_status": 1,
"memo": "memo_test1",
},
{
"issuer_address": issuer_address_2,
"token_address": "token_address_test2",
"name": "testtoken2",
"symbol": "test2",
"total_supply": 10000,
"contact_information": "contactInformation_test2",
"privacy_policy": "privacyPolicy_test2",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_2,
"token_status": 0,
"memo": "memo_test2",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
def test_normal_4(self, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
token = Token()
token.type = TokenType.IBET_SHARE
token.tx_hash = "tx_hash_test1"
token.issuer_address = "issuer_address_test1"
token.token_address = "token_address_test1"
token.abi = "abi_test1"
db.add(token)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
assert resp.status_code == 200
assert resp.json() == []
@mock.patch("app.model.blockchain.token.IbetShareContract.get")
def test_normal_5(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
token_1 = Token()
token_1.type = TokenType.IBET_SHARE
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime = timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
mock_token = IbetShareContract()
mock_token.issuer_address = issuer_address_1
mock_token.token_address = "token_address_test1"
mock_token.name = "testtoken1"
mock_token.symbol = "test1"
mock_token.total_supply = 10000
mock_token.contact_information = "contactInformation_test1"
mock_token.privacy_policy = "privacyPolicy_test1"
mock_token.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token.status = True
mock_token.issue_price = 1000
mock_token.dividends = 123.45
mock_token.dividend_record_date = "20211231"
mock_token.dividend_payment_date = "20211231"
mock_token.cancellation_date = "20221231"
mock_token.transferable = True
mock_token.is_offering = True
mock_token.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token.principal_value = 1000
mock_token.transfer_approval_required = False
mock_token.is_canceled = False
mock_token.memo = "memo_test1"
mock_get.side_effect = [mock_token]
token_2 = Token()
token_2.type = TokenType.IBET_SHARE
token_2.tx_hash = "tx_hash_test1"
token_2.issuer_address = issuer_address_2
token_2.token_address = "token_address_test1"
token_2.abi = "abi_test1"
db.add(token_2)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
mock_get.assert_any_call(contract_address=token_1.token_address)
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime,
"token_status": 1,
"memo": "memo_test1",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
@mock.patch("app.model.blockchain.token.IbetShareContract.get")
def test_normal_6(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
token_1 = Token()
token_1.type = TokenType.IBET_SHARE
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime_1 = timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
mock_token_1 = IbetShareContract()
mock_token_1.issuer_address = issuer_address_1
mock_token_1.token_address = "token_address_test1"
mock_token_1.name = "testtoken1"
mock_token_1.symbol = "test1"
mock_token_1.total_supply = 10000
mock_token_1.contact_information = "contactInformation_test1"
mock_token_1.privacy_policy = "privacyPolicy_test1"
mock_token_1.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_1.status = True
mock_token_1.issue_price = 1000
mock_token_1.dividends = 123.45
mock_token_1.dividend_record_date = "20211231"
mock_token_1.dividend_payment_date = "20211231"
mock_token_1.cancellation_date = "20221231"
mock_token_1.transferable = True
mock_token_1.is_offering = True
mock_token_1.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_1.principal_value = 1000
mock_token_1.transfer_approval_required = False
mock_token_1.is_canceled = False
mock_token_1.memo = "memo_test1"
token_2 = Token()
token_2.type = TokenType.IBET_SHARE
token_2.tx_hash = "tx_hash_test2"
token_2.issuer_address = issuer_address_1
token_2.token_address = "token_address_test2"
token_2.abi = "abi_test2"
token_2.token_status = 0
db.add(token_2)
db.commit()
_issue_datetime_2 = timezone("UTC").localize(token_2.created).astimezone(self.local_tz).isoformat()
mock_token_2 = IbetShareContract()
mock_token_2.issuer_address = issuer_address_1
mock_token_2.token_address = "token_address_test2"
mock_token_2.name = "testtoken2"
mock_token_2.symbol = "test2"
mock_token_2.total_supply = 10000
mock_token_2.contact_information = "contactInformation_test2"
mock_token_2.privacy_policy = "privacyPolicy_test2"
mock_token_2.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_2.status = True
mock_token_2.issue_price = 1000
mock_token_2.dividends = 123.45
mock_token_2.dividend_record_date = "20211231"
mock_token_2.dividend_payment_date = "20211231"
mock_token_2.cancellation_date = "20221231"
mock_token_2.transferable = True
mock_token_2.is_offering = True
mock_token_2.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_2.principal_value = 1000
mock_token_2.transfer_approval_required = False
mock_token_2.is_canceled = False
mock_token_2.memo = "memo_test2"
mock_get.side_effect = [
mock_token_1, mock_token_2
]
token_3 = Token()
token_3.type = TokenType.IBET_SHARE
token_3.tx_hash = "tx_hash_test1"
token_3.issuer_address = issuer_address_2
token_3.token_address = "token_address_test1"
token_3.abi = "abi_test1"
db.add(token_3)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
mock_get.assert_has_calls([
call(contract_address=token_1.token_address),
call(contract_address=token_2.token_address)
])
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test1",
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"principal_value": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_1,
"token_status": 1,
"memo": "memo_test1",
},
{
"issuer_address": issuer_address_1,
"token_address": "token_address_test2",
"name": "testtoken2",
"symbol": "test2",
"total_supply": 10000,
"principal_value": 1000,
"contact_information": "contactInformation_test2",
"privacy_policy": "privacyPolicy_test2",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"issue_price": 1000,
"dividends": 123.45,
"dividend_record_date": "20211231",
"dividend_payment_date": "20211231",
"cancellation_date": "20221231",
"transferable": True,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"is_offering": True,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"is_canceled": False,
"issue_datetime": _issue_datetime_2,
"token_status": 0,
"memo": "memo_test2",
}
]
assert resp.status_code == 200
assert resp.json() == assumed_response
| true | true |
1c2f0dbf99f70aa7e0c4f0b9b609c5c57eaed13a | 97 | py | Python | bloggingapp/apps.py | mr-shubhamsinghal/blog | 1dc24e0d52ce7432f10faad5a2823190d3f924d8 | [
"MIT"
] | null | null | null | bloggingapp/apps.py | mr-shubhamsinghal/blog | 1dc24e0d52ce7432f10faad5a2823190d3f924d8 | [
"MIT"
] | null | null | null | bloggingapp/apps.py | mr-shubhamsinghal/blog | 1dc24e0d52ce7432f10faad5a2823190d3f924d8 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BloggingappConfig(AppConfig):
name = 'bloggingapp'
| 16.166667 | 35 | 0.773196 | from django.apps import AppConfig
class BloggingappConfig(AppConfig):
name = 'bloggingapp'
| true | true |
1c2f0dbffd87af73f94a2a3f241c3730a7a594e1 | 78,244 | py | Python | ISAFlaserResults/code/csvClean.py | dgbirm/elo_sailor | 0978eac23e9334eee8cab3225840f82fbc153194 | [
"MIT"
] | 2 | 2020-08-12T17:34:53.000Z | 2021-02-19T15:13:06.000Z | ISAFlaserResults/code/csvClean.py | dgbirm/elo_sailor | 0978eac23e9334eee8cab3225840f82fbc153194 | [
"MIT"
] | null | null | null | ISAFlaserResults/code/csvClean.py | dgbirm/elo_sailor | 0978eac23e9334eee8cab3225840f82fbc153194 | [
"MIT"
] | null | null | null | #Scrap code for cleaning the CSV files so that we can read them easier
import csv
import sys
import pandas as pd
import numpy as np
import os
import re
from selenium import webdriver
from time import sleep
from text_unidecode import unidecode
from tabulate import tabulate
sys.path.append('/home/daniel/Desktop/elo_sailor/Glicko2approach')
from SailingGlicko2 import *
from Scrape import *
os.chdir("..")
def getHiddenHTML(currentRegatta):
browser = webdriver.Firefox()
browser.get(currentRegatta)
sleep(2)
innerHTML = browser.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
browser.close()
return innerHTML
colorlst=[('0000ff', '2'), ('0000ff', '21'), ('bbbb00', '4'), ('bbbb00', '4'), ('bbbb00', '3'), ('bbbb00', '4'), ('999900', '8'), ('999900', '2'), ('999900', '5'), ('999900', '13'), ('999900', '14'), ('999900', '51'), ('0000ff', '2'), ('0000ff', '21'), ('bbbb00', '4'), ('bbbb00', '4'), ('bbbb00', '3'), ('bbbb00', '4'), ('999900', '8'), ('999900', '2'), ('999900', '5'), ('999900', '13'), ('999900', '14'), ('999900', '51'), ('ff0000', '13'), ('ff0000', '7'), ('ff0000', '3'), ('ff0000', '1'), ('0000ff', '2'), ('0000ff', '1'), ('999900', '19'), ('999900', '18'), ('999900', '30'), ('999900', '1'), ('999900', '6'), ('999900', '5'), ('ff0000', '13'), ('ff0000', '7'), ('ff0000', '3'), ('ff0000', '1'), ('0000ff', '2'), ('0000ff', '1'), ('999900', '19'), ('999900', '18'), ('999900', '30'), ('999900', '1'), ('999900', '6'), ('999900', '5'), ('ff0000', '2'), ('ff0000', '2'), ('0000ff', '3'), ('0000ff', '7'), ('ff0000', '4'), ('ff0000', '53 UFD'), ('999900', '4'), ('999900', '38'), ('999900', '13'), ('999900', '23'), ('999900', '2'), ('999900', '9'), ('ff0000', '2'), ('ff0000', '2'), ('0000ff', '3'), ('0000ff', '7'), ('ff0000', '4'), ('ff0000', '53 UFD'), ('999900', '4'), ('999900', '38'), ('999900', '13'), ('999900', '23'), ('999900', '2'), ('999900', '9'), ('0000ff', '12'), ('0000ff', '7'), ('bbbb00', '2'), ('bbbb00', '2'), ('ff0000', '1'), ('ff0000', '1'), ('999900', '17'), ('999900', '8'), ('999900', '39'), ('999900', '21'), ('999900', '1'), ('999900', '12'), ('0000ff', '12'), ('0000ff', '7'), ('bbbb00', '2'), ('bbbb00', '2'), ('ff0000', '1'), ('ff0000', '1'), ('999900', '17'), ('999900', '8'), ('999900', '39'), ('999900', '21'), ('999900', '1'), ('999900', '12'), ('ff0000', '17'), ('ff0000', '4'), ('0000ff', '9'), ('0000ff', '9'), ('bbbb00', '2'), ('bbbb00', '5'), ('999900', '23'), ('999900', '7'), ('999900', '7'), ('999900', '22'), ('999900', '4'), ('999900', '4'), ('ff0000', '17'), ('ff0000', '4'), ('0000ff', '9'), ('0000ff', '9'), ('bbbb00', '2'), ('bbbb00', '5'), ('999900', '23'), ('999900', '7'), ('999900', '7'), ('999900', '22'), ('999900', '4'), ('999900', '4'), ('0000ff', '3'), ('0000ff', '2'), ('ff0000', '7'), ('ff0000', '2'), ('0000ff', '1'), ('0000ff', '7'), ('999900', '22'), ('999900', '22'), ('999900', '2'), ('999900', '11'), ('999900', '8'), ('999900', '20'), ('0000ff', '3'), ('0000ff', '2'), ('ff0000', '7'), ('ff0000', '2'), ('0000ff', '1'), ('0000ff', '7'), ('999900', '22'), ('999900', '22'), ('999900', '2'), ('999900', '11'), ('999900', '8'), ('999900', '20'), ('bbbb00', '5'), ('bbbb00', '13'), ('0000ff', '5'), ('0000ff', '4'), ('bbbb00', '1'), ('bbbb00', '1'), ('999900', '24'), ('999900', '40'), ('999900', '29'), ('999900', '4'), ('999900', '9'), ('999900', '10'), ('bbbb00', '5'), ('bbbb00', '13'), ('0000ff', '5'), ('0000ff', '4'), ('bbbb00', '1'), ('bbbb00', '1'), ('999900', '24'), ('999900', '40'), ('999900', '29'), ('999900', '4'), ('999900', '9'), ('999900', '10'), ('ff0000', '5'), ('ff0000', '6'), ('bbbb00', '12'), ('bbbb00', '14'), ('ff0000', '15'), ('ff0000', '7'), ('999900', '16'), ('999900', '6'), ('999900', '12'), ('999900', '7'), ('999900', '45'), ('999900', '7'), ('ff0000', '5'), ('ff0000', '6'), ('bbbb00', '12'), ('bbbb00', '14'), ('ff0000', '15'), ('ff0000', '7'), ('999900', '16'), ('999900', '6'), ('999900', '12'), ('999900', '7'), ('999900', '45'), ('999900', '7'), ('bbbb00', '6'), ('bbbb00', '10'), ('ff0000', '2'), ('ff0000', '5'), ('0000ff', '7'), ('0000ff', '3'), ('999900', '7'), ('999900', '14'), ('999900', '31'), ('999900', '10'), ('999900', '53 BFD'), ('999900', '11'), ('bbbb00', '6'), ('bbbb00', '10'), ('ff0000', '2'), ('ff0000', '5'), ('0000ff', '7'), ('0000ff', '3'), ('999900', '7'), ('999900', '14'), ('999900', '31'), ('999900', '10'), ('999900', '53 BFD'), ('999900', '11'), ('bbbb00', '21'), ('bbbb00', '5'), ('ff0000', '10'), ('ff0000', '7'), ('0000ff', '3'), ('0000ff', '4'), ('999900', '15'), ('999900', '11'), ('999900', '18'), ('999900', '6'), ('999900', '32'), ('999900', '17'), ('bbbb00', '21'), ('bbbb00', '5'), ('ff0000', '10'), ('ff0000', '7'), ('0000ff', '3'), ('0000ff', '4'), ('999900', '15'), ('999900', '11'), ('999900', '18'), ('999900', '6'), ('999900', '32'), ('999900', '17'), ('ff0000', '15'), ('ff0000', '18'), ('ff0000', '6'), ('ff0000', '6'), ('ff0000', '2'), ('ff0000', '4'), ('999900', '35'), ('999900', '15'), ('999900', '3'), ('999900', '2'), ('999900', '18'), ('999900', '34'), ('ff0000', '15'), ('ff0000', '18'), ('ff0000', '6'), ('ff0000', '6'), ('ff0000', '2'), ('ff0000', '4'), ('999900', '35'), ('999900', '15'), ('999900', '3'), ('999900', '2'), ('999900', '18'), ('999900', '34'), ('0000ff', '27'), ('0000ff', '3'), ('0000ff', '4'), ('0000ff', '3'), ('bbbb00', '5'), ('bbbb00', '2'), ('999900', '14'), ('999900', '30'), ('999900', '15'), ('999900', '25'), ('999900', '21'), ('999900', '32'), ('0000ff', '27'), ('0000ff', '3'), ('0000ff', '4'), ('0000ff', '3'), ('bbbb00', '5'), ('bbbb00', '2'), ('999900', '14'), ('999900', '30'), ('999900', '15'), ('999900', '25'), ('999900', '21'), ('999900', '32'), ('ff0000', '8'), ('ff0000', '3'), ('bbbb00', '3'), ('bbbb00', '1'), ('bbbb00', '8'), ('bbbb00', '8'), ('999900', '22 SCP'), ('999900', '27'), ('999900', '28'), ('999900', '12'), ('999900', '33'), ('999900', '13'), ('ff0000', '8'), ('ff0000', '3'), ('bbbb00', '3'), ('bbbb00', '1'), ('bbbb00', '8'), ('bbbb00', '8'), ('999900', '22 SCP'), ('999900', '27'), ('999900', '28'), ('999900', '12'), ('999900', '33'), ('999900', '13'), ('0000ff', '6'), ('0000ff', '17'), ('bbbb00', '15'), ('bbbb00', '15'), ('0000ff', '14'), ('0000ff', '17'), ('999900', '3'), ('999900', '3'), ('999900', '20'), ('999900', '37'), ('999900', '17'), ('999900', '19'), ('0000ff', '6'), ('0000ff', '17'), ('bbbb00', '15'), ('bbbb00', '15'), ('0000ff', '14'), ('0000ff', '17'), ('999900', '3'), ('999900', '3'), ('999900', '20'), ('999900', '37'), ('999900', '17'), ('999900', '19'), ('bbbb00', '4'), ('bbbb00', '4'), ('ff0000', '1'), ('ff0000', '8'), ('0000ff', '13'), ('0000ff', '10'), ('999900', '49'), ('999900', '45'), ('999900', '35'), ('999900', '9'), ('999900', '15'), ('999900', '2'), ('bbbb00', '4'), ('bbbb00', '4'), ('ff0000', '1'), ('ff0000', '8'), ('0000ff', '13'), ('0000ff', '10'), ('999900', '49'), ('999900', '45'), ('999900', '35'), ('999900', '9'), ('999900', '15'), ('999900', '2'), ('0000ff', '16'), ('0000ff', '12'), ('0000ff', '2'), ('0000ff', '1'), ('bbbb00', '4'), ('bbbb00', '3'), ('999900', '27'), ('999900', '21'), ('999900', '33'), ('999900', '29'), ('999900', '29'), ('999900', '6'), ('0000ff', '16'), ('0000ff', '12'), ('0000ff', '2'), ('0000ff', '1'), ('bbbb00', '4'), ('bbbb00', '3'), ('999900', '27'), ('999900', '21'), ('999900', '33'), ('999900', '29'), ('999900', '29'), ('999900', '6'), ('ff0000', '14'), ('ff0000', '1'), ('bbbb00', '8'), ('bbbb00', '3'), ('ff0000', '9'), ('ff0000', '9'), ('999900', '1'), ('999900', '46'), ('999900', '41'), ('999900', '33'), ('999900', '30'), ('999900', '3'), ('ff0000', '14'), ('ff0000', '1'), ('bbbb00', '8'), ('bbbb00', '3'), ('ff0000', '9'), ('ff0000', '9'), ('999900', '1'), ('999900', '46'), ('999900', '41'), ('999900', '33'), ('999900', '30'), ('999900', '3'), ('ff0000', '18'), ('ff0000', '53 BFD'), ('ff0000', '4'), ('ff0000', '9'), ('bbbb00', '6'), ('bbbb00', '7'), ('999900', '53 UFD'), ('999900', '34'), ('999900', '10'), ('999900', '15'), ('999900', '22'), ('999900', '15'), ('ff0000', '18'), ('ff0000', '53 BFD'), ('ff0000', '4'), ('ff0000', '9'), ('bbbb00', '6'), ('bbbb00', '7'), ('999900', '53 UFD'), ('999900', '34'), ('999900', '10'), ('999900', '15'), ('999900', '22'), ('999900', '15'), ('0000ff', '5'), ('0000ff', '20'), ('0000ff', '1'), ('0000ff', '16'), ('bbbb00', '15'), ('bbbb00', '10'), ('999900', '36'), ('999900', '9'), ('999900', '4'), ('999900', '3'), ('999900', '42'), ('999900', '43'), ('0000ff', '5'), ('0000ff', '20'), ('0000ff', '1'), ('0000ff', '16'), ('bbbb00', '15'), ('bbbb00', '10'), ('999900', '36'), ('999900', '9'), ('999900', '4'), ('999900', '3'), ('999900', '42'), ('999900', '43'), ('bbbb00', '9'), ('bbbb00', '2'), ('0000ff', '10'), ('0000ff', '8'), ('ff0000', '23'), ('ff0000', '13'), ('999900', '11'), ('999900', '44'), ('999900', '19'), ('999900', '14'), ('999900', '20'), ('999900', '39'), ('bbbb00', '9'), ('bbbb00', '2'), ('0000ff', '10'), ('0000ff', '8'), ('ff0000', '23'), ('ff0000', '13'), ('999900', '11'), ('999900', '44'), ('999900', '19'), ('999900', '14'), ('999900', '20'), ('999900', '39'), ('bbbb00', '37'), ('bbbb00', '53 BFD'), ('bbbb00', '1'), ('bbbb00', '5'), ('0000ff', '6'), ('0000ff', '5'), ('999900', '26'), ('999900', '26'), ('999900', '32'), ('999900', '20'), ('999900', '12'), ('999900', '16'), ('bbbb00', '37'), ('bbbb00', '53 BFD'), ('bbbb00', '1'), ('bbbb00', '5'), ('0000ff', '6'), ('0000ff', '5'), ('999900', '26'), ('999900', '26'), ('999900', '32'), ('999900', '20'), ('999900', '12'), ('999900', '16'), ('bbbb00', '7'), ('bbbb00', '7'), ('0000ff', '53 BFD'), ('0000ff', '6'), ('0000ff', '11'), ('0000ff', '11'), ('999900', '30'), ('999900', '29'), ('999900', '9'), ('999900', '26'), ('999900', '16'), ('999900', '31'), ('bbbb00', '7'), ('bbbb00', '7'), ('0000ff', '53 BFD'), ('0000ff', '6'), ('0000ff', '11'), ('0000ff', '11'), ('999900', '30'), ('999900', '29'), ('999900', '9'), ('999900', '26'), ('999900', '16'), ('999900', '31'), ('0000ff', '14'), ('0000ff', '16'), ('ff0000', '16'), ('ff0000', '10'), ('0000ff', '15'), ('0000ff', '2'), ('999900', '18'), ('999900', '42'), ('999900', '47'), ('999900', '17'), ('999900', '13'), ('999900', '8'), ('0000ff', '14'), ('0000ff', '16'), ('ff0000', '16'), ('ff0000', '10'), ('0000ff', '15'), ('0000ff', '2'), ('999900', '18'), ('999900', '42'), ('999900', '47'), ('999900', '17'), ('999900', '13'), ('999900', '8'), ('bbbb00', '1'), ('bbbb00', '3'), ('bbbb00', '17'), ('bbbb00', '12'), ('0000ff', '9'), ('0000ff', '6'), ('999900', '9'), ('999900', '37'), ('999900', '8'), ('999900', '34'), ('999900', '49'), ('999900', '40'), ('bbbb00', '1'), ('bbbb00', '3'), ('bbbb00', '17'), ('bbbb00', '12'), ('0000ff', '9'), ('0000ff', '6'), ('999900', '9'), ('999900', '37'), ('999900', '8'), ('999900', '34'), ('999900', '49'), ('999900', '40'), ('0000ff', '30'), ('0000ff', '26'), ('0000ff', '7'), ('0000ff', '15'), ('ff0000', '3'), ('ff0000', '6'), ('999900', '32'), ('999900', '43'), ('999900', '42'), ('999900', '18'), ('999900', '10'), ('999900', '1'), ('0000ff', '30'), ('0000ff', '26'), ('0000ff', '7'), ('0000ff', '15'), ('ff0000', '3'), ('ff0000', '6'), ('999900', '32'), ('999900', '43'), ('999900', '42'), ('999900', '18'), ('999900', '10'), ('999900', '1'), ('ff0000', '20'), ('ff0000', '17'), ('0000ff', '11'), ('0000ff', '19'), ('bbbb00', '17'), ('bbbb00', '11'), ('999900', '10'), ('999900', '23'), ('999900', '22'), ('999900', '31'), ('999900', '3'), ('999900', '33'), ('ff0000', '20'), ('ff0000', '17'), ('0000ff', '11'), ('0000ff', '19'), ('bbbb00', '17'), ('bbbb00', '11'), ('999900', '10'), ('999900', '23'), ('999900', '22'), ('999900', '31'), ('999900', '3'), ('999900', '33'), ('ff0000', '9'), ('ff0000', '25'), ('0000ff', '14'), ('0000ff', '5'), ('0000ff', '21'), ('0000ff', '19'), ('999900', '29'), ('999900', '10'), ('999900', '21'), ('999900', '8'), ('999900', '37'), ('999900', '38'), ('ff0000', '9'), ('ff0000', '25'), ('0000ff', '14'), ('0000ff', '5'), ('0000ff', '21'), ('0000ff', '19'), ('999900', '29'), ('999900', '10'), ('999900', '21'), ('999900', '8'), ('999900', '37'), ('999900', '38'), ('ff0000', '32'), ('ff0000', '16'), ('0000ff', '16'), ('0000ff', '17'), ('0000ff', '5'), ('0000ff', '8'), ('999900', '5'), ('999900', '48'), ('999900', '1'), ('999900', '40'), ('999900', '53 BFD'), ('999900', '18'), ('ff0000', '32'), ('ff0000', '16'), ('0000ff', '16'), ('0000ff', '17'), ('0000ff', '5'), ('0000ff', '8'), ('999900', '5'), ('999900', '48'), ('999900', '1'), ('999900', '40'), ('999900', '53 BFD'), ('999900', '18'), ('0000ff', '8'), ('0000ff', '6'), ('ff0000', '22'), ('ff0000', '14'), ('bbbb00', '12'), ('bbbb00', '21'), ('999900', '37'), ('999900', '33'), ('999900', '14'), ('999900', '5'), ('999900', '27'), ('999900', '47'), ('0000ff', '8'), ('0000ff', '6'), ('ff0000', '22'), ('ff0000', '14'), ('bbbb00', '12'), ('bbbb00', '21'), ('999900', '37'), ('999900', '33'), ('999900', '14'), ('999900', '5'), ('999900', '27'), ('999900', '47'), ('bbbb00', '2'), ('bbbb00', '28'), ('bbbb00', '13'), ('bbbb00', '9'), ('0000ff', '18'), ('0000ff', '18'), ('999900', '34'), ('999900', '13'), ('999900', '50'), ('999900', '32'), ('999900', '34'), ('999900', '14'), ('bbbb00', '2'), ('bbbb00', '28'), ('bbbb00', '13'), ('bbbb00', '9'), ('0000ff', '18'), ('0000ff', '18'), ('999900', '34'), ('999900', '13'), ('999900', '50'), ('999900', '32'), ('999900', '34'), ('999900', '14'), ('ff0000', '35'), ('ff0000', '5'), ('0000ff', '17'), ('0000ff', '11'), ('ff0000', '7'), ('ff0000', '23'), ('999900', '13'), ('999900', '35'), ('999900', '17'), ('999900', '35 SCP'), ('999900', '26'), ('999900', '36'), ('ff0000', '35'), ('ff0000', '5'), ('0000ff', '17'), ('0000ff', '11'), ('ff0000', '7'), ('ff0000', '23'), ('999900', '13'), ('999900', '35'), ('999900', '17'), ('999900', '35 SCP'), ('999900', '26'), ('999900', '36'), ('ff0000', '19'), ('ff0000', '14'), ('ff0000', '13'), ('ff0000', '21'), ('bbbb00', '21'), ('bbbb00', '6'), ('999900', '44'), ('999900', '28'), ('999900', '6'), ('999900', '16'), ('999900', '28'), ('999900', '45'), ('ff0000', '19'), ('ff0000', '14'), ('ff0000', '13'), ('ff0000', '21'), ('bbbb00', '21'), ('bbbb00', '6'), ('999900', '44'), ('999900', '28'), ('999900', '6'), ('999900', '16'), ('999900', '28'), ('999900', '45'), ('ff0000', '23'), ('ff0000', '24'), ('0000ff', '12'), ('0000ff', '12'), ('0000ff', '8'), ('0000ff', '20'), ('999900', '12'), ('999900', '24'), ('999900', '37'), ('999900', '42'), ('999900', '7'), ('999900', '41'), ('ff0000', '23'), ('ff0000', '24'), ('0000ff', '12'), ('0000ff', '12'), ('0000ff', '8'), ('0000ff', '20'), ('999900', '12'), ('999900', '24'), ('999900', '37'), ('999900', '42'), ('999900', '7'), ('999900', '41'), ('ff0000', '26'), ('ff0000', '12'), ('bbbb00', '11'), ('bbbb00', '10'), ('ff0000', '12'), ('ff0000', '8'), ('999900', '21'), ('999900', '21 SCP'), ('999900', '44'), ('999900', '50'), ('999900', '31'), ('999900', '26'), ('ff0000', '26'), ('ff0000', '12'), ('bbbb00', '11'), ('bbbb00', '10'), ('ff0000', '12'), ('ff0000', '8'), ('999900', '21'), ('999900', '21 SCP'), ('999900', '44'), ('999900', '50'), ('999900', '31'), ('999900', '26'), ('0000ff', '4'), ('0000ff', '15'), ('bbbb00', '7'), ('bbbb00', '7'), ('ff0000', '17'), ('ff0000', '3'), ('999900', '2'), ('999900', '52'), ('999900', '34'), ('999900', '48'), ('999900', '40'), ('999900', '37'), ('0000ff', '4'), ('0000ff', '15'), ('bbbb00', '7'), ('bbbb00', '7'), ('ff0000', '17'), ('ff0000', '3'), ('999900', '2'), ('999900', '52'), ('999900', '34'), ('999900', '48'), ('999900', '40'), ('999900', '37'), ('0000ff', '9'), ('0000ff', '22'), ('ff0000', '11'), ('ff0000', '4'), ('bbbb00', '7'), ('bbbb00', '9'), ('999900', '40'), ('999900', '19'), ('999900', '38'), ('999900', '41'), ('999900', '38'), ('999900', '24'), ('0000ff', '9'), ('0000ff', '22'), ('ff0000', '11'), ('ff0000', '4'), ('bbbb00', '7'), ('bbbb00', '9'), ('999900', '40'), ('999900', '19'), ('999900', '38'), ('999900', '41'), ('999900', '38'), ('999900', '24'), ('0000ff', '36'), ('0000ff', '9'), ('0000ff', '13'), ('0000ff', '21'), ('ff0000', '20'), ('ff0000', '5'), ('999900', '39'), ('999900', '32'), ('999900', '16'), ('999900', '35'), ('999900', '11'), ('999900', '50'), ('0000ff', '36'), ('0000ff', '9'), ('0000ff', '13'), ('0000ff', '21'), ('ff0000', '20'), ('ff0000', '5'), ('999900', '39'), ('999900', '32'), ('999900', '16'), ('999900', '35'), ('999900', '11'), ('999900', '50'), ('ff0000', '29'), ('ff0000', '13'), ('0000ff', '53 BFD'), ('0000ff', '10'), ('0000ff', '12'), ('0000ff', '16'), ('999900', '48'), ('999900', '31'), ('999900', '25'), ('999900', '30'), ('999900', '5'), ('999900', '30'), ('ff0000', '29'), ('ff0000', '13'), ('0000ff', '53 BFD'), ('0000ff', '10'), ('0000ff', '12'), ('0000ff', '16'), ('999900', '48'), ('999900', '31'), ('999900', '25'), ('999900', '30'), ('999900', '5'), ('999900', '30'), ('0000ff', '1'), ('0000ff', '19'), ('0000ff', '6'), ('0000ff', '25'), ('0000ff', '20'), ('0000ff', '35'), ('999900', '45'), ('999900', '12'), ('999900', '11'), ('999900', '28'), ('999900', '47'), ('999900', '35'), ('0000ff', '1'), ('0000ff', '19'), ('0000ff', '6'), ('0000ff', '25'), ('0000ff', '20'), ('0000ff', '35'), ('999900', '45'), ('999900', '12'), ('999900', '11'), ('999900', '28'), ('999900', '47'), ('999900', '35'), ('0000ff', '24'), ('0000ff', '30'), ('bbbb00', '10'), ('bbbb00', '11'), ('ff0000', '18'), ('ff0000', '17'), ('999900', '42'), ('999900', '1'), ('999900', '27'), ('999900', '38'), ('999900', '41'), ('999900', '21'), ('0000ff', '24'), ('0000ff', '30'), ('bbbb00', '10'), ('bbbb00', '11'), ('ff0000', '18'), ('ff0000', '17'), ('999900', '42'), ('999900', '1'), ('999900', '27'), ('999900', '38'), ('999900', '41'), ('999900', '21'), ('0000ff', '34'), ('0000ff', '25'), ('0000ff', '8'), ('0000ff', '20'), ('ff0000', '10'), ('ff0000', '11'), ('999900', '28'), ('999900', '20'), ('999900', '43'), ('999900', '44'), ('999900', '24'), ('999900', '28'), ('0000ff', '34'), ('0000ff', '25'), ('0000ff', '8'), ('0000ff', '20'), ('ff0000', '10'), ('ff0000', '11'), ('999900', '28'), ('999900', '20'), ('999900', '43'), ('999900', '44'), ('999900', '24'), ('999900', '28'), ('bbbb00', '13'), ('bbbb00', '1'), ('ff0000', '5'), ('ff0000', '3'), ('ff0000', '8'), ('ff0000', '2'), ('999900', '53 UFD'), ('999900', '36'), ('999900', '23'), ('999900', '52'), ('999900', '44'), ('999900', '44'), ('bbbb00', '13'), ('bbbb00', '1'), ('ff0000', '5'), ('ff0000', '3'), ('ff0000', '8'), ('ff0000', '2'), ('999900', '53 UFD'), ('999900', '36'), ('999900', '23'), ('999900', '52'), ('999900', '44'), ('999900', '44'), ('0000ff', '28'), ('0000ff', '1'), ('bbbb00', '6'), ('bbbb00', '16'), ('ff0000', '29'), ('ff0000', '10'), ('999900', '41'), ('999900', '25'), ('999900', '40'), ('999900', '39'), ('999900', '25'), ('999900', '29'), ('0000ff', '28'), ('0000ff', '1'), ('bbbb00', '6'), ('bbbb00', '16'), ('ff0000', '29'), ('ff0000', '10'), ('999900', '41'), ('999900', '25'), ('999900', '40'), ('999900', '39'), ('999900', '25'), ('999900', '29'), ('bbbb00', '12'), ('bbbb00', '53 BFD'), ('ff0000', '14'), ('ff0000', '19'), ('0000ff', '4'), ('0000ff', '14'), ('999900', '20'), ('999900', '41'), ('999900', '24'), ('999900', '43'), ('999900', '36'), ('999900', '53'), ('bbbb00', '12'), ('bbbb00', '53 BFD'), ('ff0000', '14'), ('ff0000', '19'), ('0000ff', '4'), ('0000ff', '14'), ('999900', '20'), ('999900', '41'), ('999900', '24'), ('999900', '43'), ('999900', '36'), ('999900', '53'), ('ff0000', '1'), ('ff0000', '11'), ('0000ff', '15'), ('0000ff', '24'), ('ff0000', '26'), ('ff0000', '22'), ('999900', '33'), ('999900', '4'), ('999900', '51'), ('999900', '45'), ('999900', '53 BFD'), ('999900', '25'), ('ff0000', '1'), ('ff0000', '11'), ('0000ff', '15'), ('0000ff', '24'), ('ff0000', '26'), ('ff0000', '22'), ('999900', '33'), ('999900', '4'), ('999900', '51'), ('999900', '45'), ('999900', '53 BFD'), ('999900', '25'), ('0000ff', '11'), ('0000ff', '4'), ('bbbb00', '25'), ('bbbb00', '17'), ('0000ff', '25'), ('0000ff', '27'), ('999900', '43'), ('999900', '16'), ('999900', '26'), ('999900', '46'), ('999900', '23'), ('999900', '42'), ('0000ff', '11'), ('0000ff', '4'), ('bbbb00', '25'), ('bbbb00', '17'), ('0000ff', '25'), ('0000ff', '27'), ('999900', '43'), ('999900', '16'), ('999900', '26'), ('999900', '46'), ('999900', '23'), ('999900', '42'), ('bbbb00', '27'), ('bbbb00', '14'), ('ff0000', '9'), ('ff0000', '17'), ('ff0000', '13'), ('ff0000', '15'), ('999900', '25'), ('999900', '51'), ('999900', '48'), ('999900', '27'), ('999900', '48'), ('999900', '27'), ('bbbb00', '27'), ('bbbb00', '14'), ('ff0000', '9'), ('ff0000', '17'), ('ff0000', '13'), ('ff0000', '15'), ('999900', '25'), ('999900', '51'), ('999900', '48'), ('999900', '27'), ('999900', '48'), ('999900', '27'), ('ff0000', '6'), ('ff0000', '9'), ('0000ff', '27'), ('0000ff', '22'), ('bbbb00', '36'), ('bbbb00', '16'), ('999900', '50'), ('999900', '50'), ('999900', '36'), ('999900', '24'), ('999900', '35'), ('999900', '23'), ('ff0000', '6'), ('ff0000', '9'), ('0000ff', '27'), ('0000ff', '22'), ('bbbb00', '36'), ('bbbb00', '16'), ('999900', '50'), ('999900', '50'), ('999900', '36'), ('999900', '24'), ('999900', '35'), ('999900', '23'), ('ff0000', '30'), ('ff0000', '10'), ('bbbb00', '9'), ('bbbb00', '18'), ('bbbb00', '23'), ('bbbb00', '15'), ('999900', '47'), ('999900', '17'), ('999900', '49'), ('999900', '49'), ('999900', '19'), ('999900', '49'), ('ff0000', '30'), ('ff0000', '10'), ('bbbb00', '9'), ('bbbb00', '18'), ('bbbb00', '23'), ('bbbb00', '15'), ('999900', '47'), ('999900', '17'), ('999900', '49'), ('999900', '49'), ('999900', '19'), ('999900', '49'), ('ff0000', '12'), ('ff0000', '28'), ('bbbb00', '5'), ('bbbb00', '8'), ('bbbb00', '25'), ('bbbb00', '12'), ('999900', '38'), ('999900', '39'), ('999900', '45'), ('999900', '36'), ('999900', '43'), ('999900', '46'), ('ff0000', '12'), ('ff0000', '28'), ('bbbb00', '5'), ('bbbb00', '8'), ('bbbb00', '25'), ('bbbb00', '12'), ('999900', '38'), ('999900', '39'), ('999900', '45'), ('999900', '36'), ('999900', '43'), ('999900', '46'), ('ff0000', '3'), ('ff0000', '53 BFD'), ('ff0000', '17'), ('ff0000', '22'), ('bbbb00', '14'), ('bbbb00', '13'), ('999900', '31'), ('999900', '49'), ('999900', '53 UFD'), ('999900', '51'), ('999900', '46'), ('999900', '22'), ('ff0000', '3'), ('ff0000', '53 BFD'), ('ff0000', '17'), ('ff0000', '22'), ('bbbb00', '14'), ('bbbb00', '13'), ('999900', '31'), ('999900', '49'), ('999900', '53 UFD'), ('999900', '51'), ('999900', '46'), ('999900', '22'), ('bbbb00', '20'), ('bbbb00', '12'), ('bbbb00', '20'), ('bbbb00', '6'), ('0000ff', '27'), ('0000ff', '9'), ('999900', '46'), ('999900', '47'), ('999900', '46'), ('999900', '47'), ('999900', '39'), ('999900', '48'), ('bbbb00', '20'), ('bbbb00', '12'), ('bbbb00', '20'), ('bbbb00', '6'), ('0000ff', '27'), ('0000ff', '9'), ('999900', '46'), ('999900', '47'), ('999900', '46'), ('999900', '47'), ('999900', '39'), ('999900', '48'), ('0000ff', '19'), ('0000ff', '8'), ('ff0000', '27'), ('ff0000', '15'), ('bbbb00', '30'), ('bbbb00', '18'), ('999999', '1'), ('999999', '2'), ('999999', '15'), ('999999', '6'), ('999999', '15'), ('999999', '13'), ('0000ff', '19'), ('0000ff', '8'), ('ff0000', '27'), ('ff0000', '15'), ('bbbb00', '30'), ('bbbb00', '18'), ('999999', '1'), ('999999', '2'), ('999999', '15'), ('999999', '6'), ('999999', '15'), ('999999', '13'), ('bbbb00', '14'), ('bbbb00', '24'), ('0000ff', '23'), ('0000ff', '35'), ('bbbb00', '11'), ('bbbb00', '30'), ('999999', '3'), ('999999', '3'), ('999999', '13'), ('999999', '1'), ('999999', '7'), ('999999', '16'), ('bbbb00', '14'), ('bbbb00', '24'), ('0000ff', '23'), ('0000ff', '35'), ('bbbb00', '11'), ('bbbb00', '30'), ('999999', '3'), ('999999', '3'), ('999999', '13'), ('999999', '1'), ('999999', '7'), ('999999', '16'), ('bbbb00', '17'), ('bbbb00', '53 BFD'), ('bbbb00', '24'), ('bbbb00', '30'), ('0000ff', '19'), ('0000ff', '22'), ('999999', '25'), ('999999', '7'), ('999999', '6'), ('999999', '8'), ('999999', '9'), ('999999', '17'), ('bbbb00', '17'), ('bbbb00', '53 BFD'), ('bbbb00', '24'), ('bbbb00', '30'), ('0000ff', '19'), ('0000ff', '22'), ('999999', '25'), ('999999', '7'), ('999999', '6'), ('999999', '8'), ('999999', '9'), ('999999', '17'), ('0000ff', '44'), ('0000ff', '18'), ('ff0000', '12'), ('ff0000', '53 UFD'), ('ff0000', '6'), ('ff0000', '30'), ('999999', '19'), ('999999', '19'), ('999999', '2'), ('999999', '7'), ('999999', '5'), ('999999', '44'), ('0000ff', '44'), ('0000ff', '18'), ('ff0000', '12'), ('ff0000', '53 UFD'), ('ff0000', '6'), ('ff0000', '30'), ('999999', '19'), ('999999', '19'), ('999999', '2'), ('999999', '7'), ('999999', '5'), ('999999', '44'), ('ff0000', '46'), ('ff0000', '8'), ('ff0000', '32'), ('ff0000', '11'), ('bbbb00', '9'), ('bbbb00', '24'), ('999999', '13'), ('999999', '28'), ('999999', '37'), ('999999', '24'), ('999999', '13'), ('999999', '1'), ('ff0000', '46'), ('ff0000', '8'), ('ff0000', '32'), ('ff0000', '11'), ('bbbb00', '9'), ('bbbb00', '24'), ('999999', '13'), ('999999', '28'), ('999999', '37'), ('999999', '24'), ('999999', '13'), ('999999', '1'), ('ff0000', '7'), ('ff0000', '27'), ('bbbb00', '21'), ('bbbb00', '22'), ('bbbb00', '10'), ('bbbb00', '29'), ('999999', '53 BFD'), ('999999', '33'), ('999999', '1'), ('999999', '37'), ('999999', '11'), ('999999', '3'), ('ff0000', '7'), ('ff0000', '27'), ('bbbb00', '21'), ('bbbb00', '22'), ('bbbb00', '10'), ('bbbb00', '29'), ('999999', '53 BFD'), ('999999', '33'), ('999999', '1'), ('999999', '37'), ('999999', '11'), ('999999', '3'), ('bbbb00', '8'), ('bbbb00', '29'), ('ff0000', '19'), ('ff0000', '32'), ('ff0000', '28'), ('ff0000', '21'), ('999999', '21'), ('999999', '6'), ('999999', '30'), ('999999', '9'), ('999999', '12'), ('999999', '20'), ('bbbb00', '8'), ('bbbb00', '29'), ('ff0000', '19'), ('ff0000', '32'), ('ff0000', '28'), ('ff0000', '21'), ('999999', '21'), ('999999', '6'), ('999999', '30'), ('999999', '9'), ('999999', '12'), ('999999', '20'), ('0000ff', '40'), ('0000ff', '34'), ('0000ff', '53 BFD'), ('0000ff', '2'), ('bbbb00', '18'), ('bbbb00', '32'), ('999999', '6'), ('999999', '30'), ('999999', '7'), ('999999', '14'), ('999999', '20'), ('999999', '11'), ('0000ff', '40'), ('0000ff', '34'), ('0000ff', '53 BFD'), ('0000ff', '2'), ('bbbb00', '18'), ('bbbb00', '32'), ('999999', '6'), ('999999', '30'), ('999999', '7'), ('999999', '14'), ('999999', '20'), ('999999', '11'), ('0000ff', '43'), ('0000ff', '13'), ('ff0000', '21'), ('ff0000', '23'), ('bbbb00', '22'), ('bbbb00', '25'), ('999999', '9'), ('999999', '22'), ('999999', '12'), ('999999', '16'), ('999999', '17'), ('999999', '21'), ('0000ff', '43'), ('0000ff', '13'), ('ff0000', '21'), ('ff0000', '23'), ('bbbb00', '22'), ('bbbb00', '25'), ('999999', '9'), ('999999', '22'), ('999999', '12'), ('999999', '16'), ('999999', '17'), ('999999', '21'), ('0000ff', '7'), ('0000ff', '32'), ('ff0000', '30'), ('ff0000', '18'), ('bbbb00', '13'), ('bbbb00', '33'), ('999999', '16'), ('999999', '10'), ('999999', '5'), ('999999', '42'), ('999999', '53 BFD'), ('999999', '8'), ('0000ff', '7'), ('0000ff', '32'), ('ff0000', '30'), ('ff0000', '18'), ('bbbb00', '13'), ('bbbb00', '33'), ('999999', '16'), ('999999', '10'), ('999999', '5'), ('999999', '42'), ('999999', '53 BFD'), ('999999', '8'), ('bbbb00', '29'), ('bbbb00', '31'), ('0000ff', '31'), ('0000ff', '29'), ('ff0000', '24'), ('ff0000', '19'), ('999999', '36'), ('999999', '4'), ('999999', '25'), ('999999', '29'), ('999999', '1'), ('999999', '4'), ('bbbb00', '29'), ('bbbb00', '31'), ('0000ff', '31'), ('0000ff', '29'), ('ff0000', '24'), ('ff0000', '19'), ('999999', '36'), ('999999', '4'), ('999999', '25'), ('999999', '29'), ('999999', '1'), ('999999', '4'), ('bbbb00', '28'), ('bbbb00', '53 BFD'), ('ff0000', '8'), ('ff0000', '27'), ('ff0000', '19'), ('ff0000', '31'), ('999999', '18'), ('999999', '24'), ('999999', '33'), ('999999', '26'), ('999999', '2'), ('999999', '15'), ('bbbb00', '28'), ('bbbb00', '53 BFD'), ('ff0000', '8'), ('ff0000', '27'), ('ff0000', '19'), ('ff0000', '31'), ('999999', '18'), ('999999', '24'), ('999999', '33'), ('999999', '26'), ('999999', '2'), ('999999', '15'), ('ff0000', '40'), ('ff0000', '19'), ('ff0000', '20'), ('ff0000', '29'), ('0000ff', '35'), ('0000ff', '42'), ('999999', '20'), ('999999', '13'), ('999999', '10'), ('999999', '22'), ('999999', '4'), ('999999', '26'), ('ff0000', '40'), ('ff0000', '19'), ('ff0000', '20'), ('ff0000', '29'), ('0000ff', '35'), ('0000ff', '42'), ('999999', '20'), ('999999', '13'), ('999999', '10'), ('999999', '22'), ('999999', '4'), ('999999', '26'), ('ff0000', '10'), ('ff0000', '23'), ('0000ff', '22'), ('0000ff', '34'), ('bbbb00', '37'), ('bbbb00', '17'), ('999999', '14'), ('999999', '17'), ('999999', '18'), ('999999', '15'), ('999999', '32'), ('999999', '30'), ('ff0000', '10'), ('ff0000', '23'), ('0000ff', '22'), ('0000ff', '34'), ('bbbb00', '37'), ('bbbb00', '17'), ('999999', '14'), ('999999', '17'), ('999999', '18'), ('999999', '15'), ('999999', '32'), ('999999', '30'), ('0000ff', '37'), ('0000ff', '11'), ('bbbb00', '26'), ('bbbb00', '29'), ('ff0000', '36'), ('ff0000', '27'), ('999999', '44'), ('999999', '18'), ('999999', '3'), ('999999', '19'), ('999999', '3'), ('999999', '27'), ('0000ff', '37'), ('0000ff', '11'), ('bbbb00', '26'), ('bbbb00', '29'), ('ff0000', '36'), ('ff0000', '27'), ('999999', '44'), ('999999', '18'), ('999999', '3'), ('999999', '19'), ('999999', '3'), ('999999', '27'), ('ff0000', '24'), ('ff0000', '15'), ('ff0000', '26'), ('ff0000', '34'), ('bbbb00', '19'), ('bbbb00', '31'), ('999999', '22'), ('999999', '39'), ('999999', '35'), ('999999', '2'), ('999999', '14'), ('999999', '12'), ('ff0000', '24'), ('ff0000', '15'), ('ff0000', '26'), ('ff0000', '34'), ('bbbb00', '19'), ('bbbb00', '31'), ('999999', '22'), ('999999', '39'), ('999999', '35'), ('999999', '2'), ('999999', '14'), ('999999', '12'), ('0000ff', '32'), ('0000ff', '31'), ('0000ff', '53 BFD'), ('0000ff', '23'), ('0000ff', '17'), ('0000ff', '21'), ('999999', '31'), ('999999', '8'), ('999999', '4'), ('999999', '27'), ('999999', '22'), ('999999', '18'), ('0000ff', '32'), ('0000ff', '31'), ('0000ff', '53 BFD'), ('0000ff', '23'), ('0000ff', '17'), ('0000ff', '21'), ('999999', '31'), ('999999', '8'), ('999999', '4'), ('999999', '27'), ('999999', '22'), ('999999', '18'), ('bbbb00', '11'), ('bbbb00', '6'), ('ff0000', '23'), ('ff0000', '33'), ('ff0000', '25'), ('ff0000', '20'), ('999999', '2'), ('999999', '43'), ('999999', '34'), ('999999', '36'), ('999999', '8'), ('999999', '40'), ('bbbb00', '11'), ('bbbb00', '6'), ('ff0000', '23'), ('ff0000', '33'), ('ff0000', '25'), ('ff0000', '20'), ('999999', '2'), ('999999', '43'), ('999999', '34'), ('999999', '36'), ('999999', '8'), ('999999', '40'), ('bbbb00', '24'), ('bbbb00', '32'), ('bbbb00', '27'), ('bbbb00', '19'), ('0000ff', '26'), ('0000ff', '30'), ('999999', '29'), ('999999', '15'), ('999999', '19'), ('999999', '12'), ('999999', '33'), ('999999', '5'), ('bbbb00', '24'), ('bbbb00', '32'), ('bbbb00', '27'), ('bbbb00', '19'), ('0000ff', '26'), ('0000ff', '30'), ('999999', '29'), ('999999', '15'), ('999999', '19'), ('999999', '12'), ('999999', '33'), ('999999', '5'), ('0000ff', '29'), ('0000ff', '5'), ('bbbb00', '32'), ('bbbb00', '34'), ('0000ff', '44'), ('0000ff', '32'), ('999999', '5'), ('999999', '1'), ('999999', '11'), ('999999', '32'), ('999999', '26'), ('999999', '38'), ('0000ff', '29'), ('0000ff', '5'), ('bbbb00', '32'), ('bbbb00', '34'), ('0000ff', '44'), ('0000ff', '32'), ('999999', '5'), ('999999', '1'), ('999999', '11'), ('999999', '32'), ('999999', '26'), ('999999', '38'), ('0000ff', '25'), ('0000ff', '38'), ('bbbb00', '28'), ('bbbb00', '24'), ('0000ff', '31'), ('0000ff', '28'), ('999999', '15'), ('999999', '20'), ('999999', '21'), ('999999', '39'), ('999999', '6'), ('999999', '10'), ('0000ff', '25'), ('0000ff', '38'), ('bbbb00', '28'), ('bbbb00', '24'), ('0000ff', '31'), ('0000ff', '28'), ('999999', '15'), ('999999', '20'), ('999999', '21'), ('999999', '39'), ('999999', '6'), ('999999', '10'), ('0000ff', '22'), ('0000ff', '28'), ('bbbb00', '14'), ('bbbb00', '20'), ('ff0000', '11'), ('ff0000', '16'), ('999999', '10'), ('999999', '27'), ('999999', '26'), ('999999', '44'), ('999999', '53 BFD'), ('999999', '19'), ('0000ff', '22'), ('0000ff', '28'), ('bbbb00', '14'), ('bbbb00', '20'), ('ff0000', '11'), ('ff0000', '16'), ('999999', '10'), ('999999', '27'), ('999999', '26'), ('999999', '44'), ('999999', '53 BFD'), ('999999', '19'), ('bbbb00', '34'), ('bbbb00', '20'), ('0000ff', '24'), ('0000ff', '26'), ('ff0000', '27'), ('ff0000', '26'), ('999999', '38'), ('999999', '5'), ('999999', '14'), ('999999', '13'), ('999999', '21'), ('999999', '34'), ('bbbb00', '34'), ('bbbb00', '20'), ('0000ff', '24'), ('0000ff', '26'), ('ff0000', '27'), ('ff0000', '26'), ('999999', '38'), ('999999', '5'), ('999999', '14'), ('999999', '13'), ('999999', '21'), ('999999', '34'), ('ff0000', '36'), ('ff0000', '21'), ('ff0000', '15'), ('ff0000', '13'), ('ff0000', '5'), ('ff0000', '28'), ('999999', '4'), ('999999', '40'), ('999999', '48'), ('999999', '5'), ('999999', '53 RET'), ('999999', '32'), ('ff0000', '36'), ('ff0000', '21'), ('ff0000', '15'), ('ff0000', '13'), ('ff0000', '5'), ('ff0000', '28'), ('999999', '4'), ('999999', '40'), ('999999', '48'), ('999999', '5'), ('999999', '53 RET'), ('999999', '32'), ('ff0000', '11'), ('ff0000', '26'), ('ff0000', '46'), ('ff0000', '16'), ('ff0000', '35'), ('ff0000', '53 UFD'), ('999999', '12'), ('999999', '46'), ('999999', '31'), ('999999', '18'), ('999999', '16'), ('999999', '2'), ('ff0000', '11'), ('ff0000', '26'), ('ff0000', '46'), ('ff0000', '16'), ('ff0000', '35'), ('ff0000', '53 UFD'), ('999999', '12'), ('999999', '46'), ('999999', '31'), ('999999', '18'), ('999999', '16'), ('999999', '2'), ('bbbb00', '41'), ('bbbb00', '8'), ('ff0000', '31'), ('ff0000', '38.6 DPI'), ('0000ff', '24'), ('0000ff', '12'), ('999999', '17'), ('999999', '16'), ('999999', '17'), ('999999', '28'), ('999999', '53 RET'), ('999999', '22'), ('bbbb00', '41'), ('bbbb00', '8'), ('ff0000', '31'), ('ff0000', '38.6 DPI'), ('0000ff', '24'), ('0000ff', '12'), ('999999', '17'), ('999999', '16'), ('999999', '17'), ('999999', '28'), ('999999', '53 RET'), ('999999', '22'), ('ff0000', '31'), ('ff0000', '29'), ('bbbb00', '18'), ('bbbb00', '21'), ('ff0000', '31'), ('ff0000', '25'), ('999999', '27'), ('999999', '25'), ('999999', '16'), ('999999', '33'), ('999999', '23'), ('999999', '7'), ('ff0000', '31'), ('ff0000', '29'), ('bbbb00', '18'), ('bbbb00', '21'), ('ff0000', '31'), ('ff0000', '25'), ('999999', '27'), ('999999', '25'), ('999999', '16'), ('999999', '33'), ('999999', '23'), ('999999', '7'), ('0000ff', '41'), ('0000ff', '43'), ('0000ff', '19'), ('0000ff', '13'), ('ff0000', '14'), ('ff0000', '14'), ('999999', '39'), ('999999', '14'), ('999999', '9'), ('999999', '50'), ('999999', '28'), ('999999', '33'), ('0000ff', '41'), ('0000ff', '43'), ('0000ff', '19'), ('0000ff', '13'), ('ff0000', '14'), ('ff0000', '14'), ('999999', '39'), ('999999', '14'), ('999999', '9'), ('999999', '50'), ('999999', '28'), ('999999', '33'), ('0000ff', '20'), ('0000ff', '35'), ('bbbb00', '16'), ('bbbb00', '25'), ('0000ff', '22'), ('0000ff', '29'), ('999999', '11'), ('999999', '12'), ('999999', '38'), ('999999', '17'), ('999999', '53 BFD'), ('999999', '35'), ('0000ff', '20'), ('0000ff', '35'), ('bbbb00', '16'), ('bbbb00', '25'), ('0000ff', '22'), ('0000ff', '29'), ('999999', '11'), ('999999', '12'), ('999999', '38'), ('999999', '17'), ('999999', '53 BFD'), ('999999', '35'), ('0000ff', '26'), ('0000ff', '29'), ('0000ff', '53 BFD'), ('0000ff', '14'), ('bbbb00', '24'), ('bbbb00', '20'), ('999999', '33'), ('999999', '35'), ('999999', '8'), ('999999', '35'), ('999999', '19'), ('999999', '24'), ('0000ff', '26'), ('0000ff', '29'), ('0000ff', '53 BFD'), ('0000ff', '14'), ('bbbb00', '24'), ('bbbb00', '20'), ('999999', '33'), ('999999', '35'), ('999999', '8'), ('999999', '35'), ('999999', '19'), ('999999', '24'), ('0000ff', '38'), ('0000ff', '27'), ('ff0000', '18'), ('ff0000', '26'), ('bbbb00', '32'), ('bbbb00', '22'), ('999999', '32'), ('999999', '21'), ('999999', '24'), ('999999', '23'), ('999999', '10'), ('999999', '39'), ('0000ff', '38'), ('0000ff', '27'), ('ff0000', '18'), ('ff0000', '26'), ('bbbb00', '32'), ('bbbb00', '22'), ('999999', '32'), ('999999', '21'), ('999999', '24'), ('999999', '23'), ('999999', '10'), ('999999', '39'), ('0000ff', '33'), ('0000ff', '23'), ('bbbb00', '29'), ('bbbb00', '38'), ('bbbb00', '28'), ('bbbb00', '26'), ('999999', '28'), ('999999', '26'), ('999999', '29'), ('999999', '4'), ('999999', '25'), ('999999', '31'), ('0000ff', '33'), ('0000ff', '23'), ('bbbb00', '29'), ('bbbb00', '38'), ('bbbb00', '28'), ('bbbb00', '26'), ('999999', '28'), ('999999', '26'), ('999999', '29'), ('999999', '4'), ('999999', '25'), ('999999', '31'), ('bbbb00', '23'), ('bbbb00', '21'), ('bbbb00', '19'), ('bbbb00', '28'), ('bbbb00', '26'), ('bbbb00', '14'), ('999999', '53 BFD'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '11'), ('999999', '18'), ('999999', '43'), ('bbbb00', '23'), ('bbbb00', '21'), ('bbbb00', '19'), ('bbbb00', '28'), ('bbbb00', '26'), ('bbbb00', '14'), ('999999', '53 BFD'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '11'), ('999999', '18'), ('999999', '43'), ('bbbb00', '16'), ('bbbb00', '53 BFD'), ('ff0000', '25'), ('ff0000', '40'), ('ff0000', '21'), ('ff0000', '24'), ('999999', '8'), ('999999', '37'), ('999999', '46'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '6'), ('bbbb00', '16'), ('bbbb00', '53 BFD'), ('ff0000', '25'), ('ff0000', '40'), ('ff0000', '21'), ('ff0000', '24'), ('999999', '8'), ('999999', '37'), ('999999', '46'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '6'), ('ff0000', '27'), ('ff0000', '20'), ('ff0000', '33'), ('ff0000', '24'), ('bbbb00', '16'), ('bbbb00', '23'), ('999999', '26'), ('999999', '32'), ('999999', '41'), ('999999', '46'), ('999999', '30'), ('999999', '29'), ('ff0000', '27'), ('ff0000', '20'), ('ff0000', '33'), ('ff0000', '24'), ('bbbb00', '16'), ('bbbb00', '23'), ('999999', '26'), ('999999', '32'), ('999999', '41'), ('999999', '46'), ('999999', '30'), ('999999', '29'), ('bbbb00', '3'), ('bbbb00', '18'), ('ff0000', '43'), ('ff0000', '37.6 DPI'), ('0000ff', '40'), ('0000ff', '31'), ('999999', '37'), ('999999', '11'), ('999999', '28'), ('999999', '41'), ('999999', '29'), ('999999', '36'), ('bbbb00', '3'), ('bbbb00', '18'), ('ff0000', '43'), ('ff0000', '37.6 DPI'), ('0000ff', '40'), ('0000ff', '31'), ('999999', '37'), ('999999', '11'), ('999999', '28'), ('999999', '41'), ('999999', '29'), ('999999', '36'), ('0000ff', '18'), ('0000ff', '14'), ('bbbb00', '30'), ('bbbb00', '32'), ('ff0000', '40'), ('ff0000', '38'), ('999999', '7'), ('999999', '34'), ('999999', '22'), ('999999', '40'), ('999999', '37'), ('999999', '50'), ('0000ff', '18'), ('0000ff', '14'), ('bbbb00', '30'), ('bbbb00', '32'), ('ff0000', '40'), ('ff0000', '38'), ('999999', '7'), ('999999', '34'), ('999999', '22'), ('999999', '40'), ('999999', '37'), ('999999', '50'), ('ff0000', '47'), ('ff0000', '53 BFD'), ('ff0000', '28'), ('ff0000', '28'), ('0000ff', '29'), ('0000ff', '13'), ('999999', '24'), ('999999', '41'), ('999999', '47'), ('999999', '30'), ('999999', '40'), ('999999', '9'), ('ff0000', '47'), ('ff0000', '53 BFD'), ('ff0000', '28'), ('ff0000', '28'), ('0000ff', '29'), ('0000ff', '13'), ('999999', '24'), ('999999', '41'), ('999999', '47'), ('999999', '30'), ('999999', '40'), ('999999', '9'), ('bbbb00', '35'), ('bbbb00', '15'), ('0000ff', '20'), ('0000ff', '18'), ('0000ff', '36'), ('0000ff', '25'), ('999999', '53 RET'), ('999999', '45'), ('999999', '42'), ('999999', '3'), ('999999', '53 BFD'), ('999999', '41'), ('bbbb00', '35'), ('bbbb00', '15'), ('0000ff', '20'), ('0000ff', '18'), ('0000ff', '36'), ('0000ff', '25'), ('999999', '53 RET'), ('999999', '45'), ('999999', '42'), ('999999', '3'), ('999999', '53 BFD'), ('999999', '41'), ('ff0000', '51'), ('ff0000', '22'), ('0000ff', '25'), ('0000ff', '30'), ('ff0000', '16'), ('ff0000', '12'), ('999999', '43'), ('999999', '44'), ('999999', '32'), ('999999', '43'), ('999999', '36'), ('999999', '46'), ('ff0000', '51'), ('ff0000', '22'), ('0000ff', '25'), ('0000ff', '30'), ('ff0000', '16'), ('ff0000', '12'), ('999999', '43'), ('999999', '44'), ('999999', '32'), ('999999', '43'), ('999999', '36'), ('999999', '46'), ('bbbb00', '45'), ('bbbb00', '23'), ('ff0000', '24'), ('ff0000', '30'), ('ff0000', '30'), ('ff0000', '18'), ('999999', '41'), ('999999', '38'), ('999999', '39'), ('999999', '47'), ('999999', '53 BFD'), ('999999', '14'), ('bbbb00', '45'), ('bbbb00', '23'), ('ff0000', '24'), ('ff0000', '30'), ('ff0000', '30'), ('ff0000', '18'), ('999999', '41'), ('999999', '38'), ('999999', '39'), ('999999', '47'), ('999999', '53 BFD'), ('999999', '14'), ('0000ff', '15'), ('0000ff', '53 BFD'), ('0000ff', '35'), ('0000ff', '27'), ('0000ff', '34'), ('0000ff', '37'), ('999999', '23'), ('999999', '53 BFD'), ('999999', '27'), ('999999', '31'), ('999999', '53 BFD'), ('999999', '25'), ('0000ff', '15'), ('0000ff', '53 BFD'), ('0000ff', '35'), ('0000ff', '27'), ('0000ff', '34'), ('0000ff', '37'), ('999999', '23'), ('999999', '53 BFD'), ('999999', '27'), ('999999', '31'), ('999999', '53 BFD'), ('999999', '25'), ('0000ff', '21'), ('0000ff', '10'), ('0000ff', '26'), ('0000ff', '28'), ('0000ff', '38'), ('0000ff', '40'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '49'), ('999999', '25'), ('999999', '35'), ('999999', '49'), ('0000ff', '21'), ('0000ff', '10'), ('0000ff', '26'), ('0000ff', '28'), ('0000ff', '38'), ('0000ff', '40'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '49'), ('999999', '25'), ('999999', '35'), ('999999', '49'), ('0000ff', '23'), ('0000ff', '24'), ('bbbb00', '23'), ('bbbb00', '39'), ('ff0000', '37'), ('ff0000', '40'), ('999999', '40'), ('999999', '53 BFD'), ('999999', '36'), ('999999', '21'), ('999999', '24'), ('999999', '48'), ('0000ff', '23'), ('0000ff', '24'), ('bbbb00', '23'), ('bbbb00', '39'), ('ff0000', '37'), ('ff0000', '40'), ('999999', '40'), ('999999', '53 BFD'), ('999999', '36'), ('999999', '21'), ('999999', '24'), ('999999', '48'), ('bbbb00', '19'), ('bbbb00', '25'), ('bbbb00', '36'), ('bbbb00', '37'), ('0000ff', '30'), ('0000ff', '44'), ('999999', '30'), ('999999', '31'), ('999999', '40'), ('999999', '45'), ('999999', '31'), ('999999', '37'), ('bbbb00', '19'), ('bbbb00', '25'), ('bbbb00', '36'), ('bbbb00', '37'), ('0000ff', '30'), ('0000ff', '44'), ('999999', '30'), ('999999', '31'), ('999999', '40'), ('999999', '45'), ('999999', '31'), ('999999', '37'), ('bbbb00', '30'), ('bbbb00', '11'), ('0000ff', '53 BFD'), ('0000ff', '36'), ('bbbb00', '31'), ('bbbb00', '39'), ('999999', '53 BFD'), ('999999', '29'), ('999999', '53 RET'), ('999999', '20'), ('999999', '27'), ('999999', '42'), ('bbbb00', '30'), ('bbbb00', '11'), ('0000ff', '53 BFD'), ('0000ff', '36'), ('bbbb00', '31'), ('bbbb00', '39'), ('999999', '53 BFD'), ('999999', '29'), ('999999', '53 RET'), ('999999', '20'), ('999999', '27'), ('999999', '42'), ('ff0000', '21'), ('ff0000', '53 BFD'), ('bbbb00', '35'), ('bbbb00', '23'), ('bbbb00', '42'), ('bbbb00', '27'), ('999999', '42'), ('999999', '36'), ('999999', '45'), ('999999', '10'), ('999999', '39'), ('999999', '47'), ('ff0000', '21'), ('ff0000', '53 BFD'), ('bbbb00', '35'), ('bbbb00', '23'), ('bbbb00', '42'), ('bbbb00', '27'), ('999999', '42'), ('999999', '36'), ('999999', '45'), ('999999', '10'), ('999999', '39'), ('999999', '47'), ('bbbb00', '18'), ('bbbb00', '19'), ('bbbb00', '34'), ('bbbb00', '33'), ('bbbb00', '29'), ('bbbb00', '37'), ('999999', '53 RET'), ('999999', '53 BFD'), ('999999', '20'), ('999999', '49'), ('999999', '38'), ('999999', '28'), ('bbbb00', '18'), ('bbbb00', '19'), ('bbbb00', '34'), ('bbbb00', '33'), ('bbbb00', '29'), ('bbbb00', '37'), ('999999', '53 RET'), ('999999', '53 BFD'), ('999999', '20'), ('999999', '49'), ('999999', '38'), ('999999', '28'), ('bbbb00', '48'), ('bbbb00', '37'), ('ff0000', '40'), ('ff0000', '12'), ('bbbb00', '20'), ('bbbb00', '35'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '43'), ('999999', '48'), ('999999', '53 BFD'), ('999999', '23'), ('bbbb00', '48'), ('bbbb00', '37'), ('ff0000', '40'), ('ff0000', '12'), ('bbbb00', '20'), ('bbbb00', '35'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '43'), ('999999', '48'), ('999999', '53 BFD'), ('999999', '23'), ('bbbb00', '26'), ('bbbb00', '27'), ('ff0000', '38'), ('ff0000', '42'), ('bbbb00', '27'), ('bbbb00', '28'), ('999999', '53 BFD'), ('999999', '9'), ('999999', '44'), ('999999', '51'), ('999999', '34'), ('999999', '51'), ('bbbb00', '26'), ('bbbb00', '27'), ('ff0000', '38'), ('ff0000', '42'), ('bbbb00', '27'), ('bbbb00', '28'), ('999999', '53 BFD'), ('999999', '9'), ('999999', '44'), ('999999', '51'), ('999999', '34'), ('999999', '51'), ('0000ff', '17'), ('0000ff', '33'), ('bbbb00', '41'), ('bbbb00', '13'), ('0000ff', '10'), ('0000ff', '15'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53'), ('0000ff', '17'), ('0000ff', '33'), ('bbbb00', '41'), ('bbbb00', '13'), ('0000ff', '10'), ('0000ff', '15'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53'), ('bbbb00', '31'), ('bbbb00', '35'), ('bbbb00', '33'), ('bbbb00', '26'), ('0000ff', '28'), ('0000ff', '23'), ('999999', '35'), ('999999', '42'), ('999999', '53 UFD'), ('999999', '38'), ('999999', '53 RET'), ('999999', '45'), ('bbbb00', '31'), ('bbbb00', '35'), ('bbbb00', '33'), ('bbbb00', '26'), ('0000ff', '28'), ('0000ff', '23'), ('999999', '35'), ('999999', '42'), ('999999', '53 UFD'), ('999999', '38'), ('999999', '53 RET'), ('999999', '45'), ('ff0000', '33'), ('ff0000', '53 BFD'), ('bbbb00', '22'), ('bbbb00', '31'), ('ff0000', '34'), ('ff0000', '29'), ('8b4513', '9'), ('8b4513', '21'), ('8b4513', '3'), ('8b4513', '9'), ('8b4513', '1'), ('8b4513', '2'), ('ff0000', '33'), ('ff0000', '53 BFD'), ('bbbb00', '22'), ('bbbb00', '31'), ('ff0000', '34'), ('ff0000', '29'), ('8b4513', '9'), ('8b4513', '21'), ('8b4513', '3'), ('8b4513', '9'), ('8b4513', '1'), ('8b4513', '2'), ('bbbb00', '38'), ('bbbb00', '9'), ('ff0000', '42'), ('ff0000', '37'), ('bbbb00', '38'), ('bbbb00', '41'), ('8b4513', '7'), ('8b4513', '28'), ('8b4513', '1'), ('8b4513', '3'), ('8b4513', '2'), ('8b4513', '8'), ('bbbb00', '38'), ('bbbb00', '9'), ('ff0000', '42'), ('ff0000', '37'), ('bbbb00', '38'), ('bbbb00', '41'), ('8b4513', '7'), ('8b4513', '28'), ('8b4513', '1'), ('8b4513', '3'), ('8b4513', '2'), ('8b4513', '8'), ('ff0000', '16'), ('ff0000', '53 BFD'), ('0000ff', '34'), ('0000ff', '31'), ('ff0000', '32'), ('ff0000', '42'), ('8b4513', '8'), ('8b4513', '6'), ('8b4513', '4'), ('8b4513', '25'), ('8b4513', '12'), ('8b4513', '16'), ('ff0000', '16'), ('ff0000', '53 BFD'), ('0000ff', '34'), ('0000ff', '31'), ('ff0000', '32'), ('ff0000', '42'), ('8b4513', '8'), ('8b4513', '6'), ('8b4513', '4'), ('8b4513', '25'), ('8b4513', '12'), ('8b4513', '16'), ('ff0000', '22'), ('ff0000', '53 BFD'), ('ff0000', '36'), ('ff0000', '31'), ('ff0000', '33'), ('ff0000', '32'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '2'), ('8b4513', '26'), ('8b4513', '16'), ('8b4513', '7'), ('ff0000', '22'), ('ff0000', '53 BFD'), ('ff0000', '36'), ('ff0000', '31'), ('ff0000', '33'), ('ff0000', '32'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '2'), ('8b4513', '26'), ('8b4513', '16'), ('8b4513', '7'), ('bbbb00', '36'), ('bbbb00', '53 BFD'), ('0000ff', '18'), ('0000ff', '32'), ('ff0000', '39'), ('ff0000', '34'), ('8b4513', '3'), ('8b4513', '49'), ('8b4513', '26'), ('8b4513', '6'), ('8b4513', '6'), ('8b4513', '1'), ('bbbb00', '36'), ('bbbb00', '53 BFD'), ('0000ff', '18'), ('0000ff', '32'), ('ff0000', '39'), ('ff0000', '34'), ('8b4513', '3'), ('8b4513', '49'), ('8b4513', '26'), ('8b4513', '6'), ('8b4513', '6'), ('8b4513', '1'), ('bbbb00', '25'), ('bbbb00', '34'), ('bbbb00', '31'), ('bbbb00', '27'), ('0000ff', '33'), ('0000ff', '33'), ('8b4513', '53 UFD'), ('8b4513', '3'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '19'), ('8b4513', '18'), ('bbbb00', '25'), ('bbbb00', '34'), ('bbbb00', '31'), ('bbbb00', '27'), ('0000ff', '33'), ('0000ff', '33'), ('8b4513', '53 UFD'), ('8b4513', '3'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '19'), ('8b4513', '18'), ('ff0000', '25'), ('ff0000', '53 DSQ'), ('0000ff', '29'), ('0000ff', '42'), ('0000ff', '32'), ('0000ff', '24'), ('8b4513', '10'), ('8b4513', '14'), ('8b4513', '11'), ('8b4513', '17'), ('8b4513', '9'), ('8b4513', '53'), ('ff0000', '25'), ('ff0000', '53 DSQ'), ('0000ff', '29'), ('0000ff', '42'), ('0000ff', '32'), ('0000ff', '24'), ('8b4513', '10'), ('8b4513', '14'), ('8b4513', '11'), ('8b4513', '17'), ('8b4513', '9'), ('8b4513', '53'), ('0000ff', '13'), ('0000ff', '37'), ('ff0000', '29'), ('ff0000', '41'), ('bbbb00', '43'), ('bbbb00', '34'), ('8b4513', '1'), ('8b4513', '33'), ('8b4513', '7'), ('8b4513', '21'), ('8b4513', '28'), ('8b4513', '11'), ('0000ff', '13'), ('0000ff', '37'), ('ff0000', '29'), ('ff0000', '41'), ('bbbb00', '43'), ('bbbb00', '34'), ('8b4513', '1'), ('8b4513', '33'), ('8b4513', '7'), ('8b4513', '21'), ('8b4513', '28'), ('8b4513', '11'), ('bbbb00', '22'), ('bbbb00', '39'), ('ff0000', '41'), ('ff0000', '43'), ('0000ff', '23'), ('0000ff', '38'), ('8b4513', '11'), ('8b4513', '11'), ('8b4513', '21'), ('8b4513', '36'), ('8b4513', '3'), ('8b4513', '12'), ('bbbb00', '22'), ('bbbb00', '39'), ('ff0000', '41'), ('ff0000', '43'), ('0000ff', '23'), ('0000ff', '38'), ('8b4513', '11'), ('8b4513', '11'), ('8b4513', '21'), ('8b4513', '36'), ('8b4513', '3'), ('8b4513', '12'), ('ff0000', '38'), ('ff0000', '53 BFD'), ('ff0000', '37'), ('ff0000', '25'), ('bbbb00', '34'), ('bbbb00', '19'), ('8b4513', '24'), ('8b4513', '22'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '14'), ('8b4513', '6'), ('ff0000', '38'), ('ff0000', '53 BFD'), ('ff0000', '37'), ('ff0000', '25'), ('bbbb00', '34'), ('bbbb00', '19'), ('8b4513', '24'), ('8b4513', '22'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '14'), ('8b4513', '6'), ('bbbb00', '32'), ('bbbb00', '53 BFD'), ('ff0000', '39'), ('ff0000', '20'), ('bbbb00', '45'), ('bbbb00', '45'), ('8b4513', '15'), ('8b4513', '1'), ('8b4513', '10'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '15'), ('bbbb00', '32'), ('bbbb00', '53 BFD'), ('ff0000', '39'), ('ff0000', '20'), ('bbbb00', '45'), ('bbbb00', '45'), ('8b4513', '15'), ('8b4513', '1'), ('8b4513', '10'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '15'), ('bbbb00', '44'), ('bbbb00', '22'), ('0000ff', '37'), ('0000ff', '45'), ('ff0000', '41'), ('ff0000', '41'), ('8b4513', '6'), ('8b4513', '20'), ('8b4513', '53 RET'), ('8b4513', '2'), ('8b4513', '5'), ('8b4513', '14'), ('bbbb00', '44'), ('bbbb00', '22'), ('0000ff', '37'), ('0000ff', '45'), ('ff0000', '41'), ('ff0000', '41'), ('8b4513', '6'), ('8b4513', '20'), ('8b4513', '53 RET'), ('8b4513', '2'), ('8b4513', '5'), ('8b4513', '14'), ('0000ff', '45'), ('0000ff', '40'), ('0000ff', '30'), ('0000ff', '37'), ('0000ff', '16'), ('0000ff', '26'), ('8b4513', '28'), ('8b4513', '25'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '22'), ('8b4513', '22'), ('0000ff', '45'), ('0000ff', '40'), ('0000ff', '30'), ('0000ff', '37'), ('0000ff', '16'), ('0000ff', '26'), ('8b4513', '28'), ('8b4513', '25'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '22'), ('8b4513', '22'), ('0000ff', '50'), ('0000ff', '36'), ('bbbb00', '38'), ('bbbb00', '41'), ('bbbb00', '41'), ('bbbb00', '44'), ('8b4513', '27'), ('8b4513', '9'), ('8b4513', '28'), ('8b4513', '10'), ('8b4513', '7'), ('8b4513', '3'), ('0000ff', '50'), ('0000ff', '36'), ('bbbb00', '38'), ('bbbb00', '41'), ('bbbb00', '41'), ('bbbb00', '44'), ('8b4513', '27'), ('8b4513', '9'), ('8b4513', '28'), ('8b4513', '10'), ('8b4513', '7'), ('8b4513', '3'), ('ff0000', '42'), ('ff0000', '53 BFD'), ('0000ff', '39'), ('0000ff', '47'), ('0000ff', '39'), ('0000ff', '41'), ('8b4513', '5'), ('8b4513', '18'), ('8b4513', '8'), ('8b4513', '22'), ('8b4513', '13'), ('8b4513', '13'), ('ff0000', '42'), ('ff0000', '53 BFD'), ('0000ff', '39'), ('0000ff', '47'), ('0000ff', '39'), ('0000ff', '41'), ('8b4513', '5'), ('8b4513', '18'), ('8b4513', '8'), ('8b4513', '22'), ('8b4513', '13'), ('8b4513', '13'), ('0000ff', '31'), ('0000ff', '39'), ('0000ff', '28'), ('0000ff', '43'), ('0000ff', '37'), ('0000ff', '36'), ('8b4513', '32'), ('8b4513', '7'), ('8b4513', '23'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '5'), ('0000ff', '31'), ('0000ff', '39'), ('0000ff', '28'), ('0000ff', '43'), ('0000ff', '37'), ('0000ff', '36'), ('8b4513', '32'), ('8b4513', '7'), ('8b4513', '23'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '5'), ('bbbb00', '42'), ('bbbb00', '30'), ('ff0000', '34'), ('ff0000', '39'), ('bbbb00', '33'), ('bbbb00', '43'), ('8b4513', '4'), ('8b4513', '24'), ('8b4513', '27'), ('8b4513', '34'), ('8b4513', '8'), ('8b4513', '9'), ('bbbb00', '42'), ('bbbb00', '30'), ('ff0000', '34'), ('ff0000', '39'), ('bbbb00', '33'), ('bbbb00', '43'), ('8b4513', '4'), ('8b4513', '24'), ('8b4513', '27'), ('8b4513', '34'), ('8b4513', '8'), ('8b4513', '9'), ('ff0000', '28'), ('ff0000', '53 BFD'), ('ff0000', '35'), ('ff0000', '44'), ('bbbb00', '35'), ('bbbb00', '38'), ('8b4513', '26'), ('8b4513', '38'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '10'), ('8b4513', '4'), ('ff0000', '28'), ('ff0000', '53 BFD'), ('ff0000', '35'), ('ff0000', '44'), ('bbbb00', '35'), ('bbbb00', '38'), ('8b4513', '26'), ('8b4513', '38'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '10'), ('8b4513', '4'), ('bbbb00', '46'), ('bbbb00', '17'), ('bbbb00', '37'), ('bbbb00', '35'), ('0000ff', '42'), ('0000ff', '34'), ('8b4513', '13'), ('8b4513', '23'), ('8b4513', '53 DSQ'), ('8b4513', '15'), ('8b4513', '26'), ('8b4513', '10'), ('bbbb00', '46'), ('bbbb00', '17'), ('bbbb00', '37'), ('bbbb00', '35'), ('0000ff', '42'), ('0000ff', '34'), ('8b4513', '13'), ('8b4513', '23'), ('8b4513', '53 DSQ'), ('8b4513', '15'), ('8b4513', '26'), ('8b4513', '10'), ('ff0000', '4'), ('ff0000', '30'), ('0000ff', '40'), ('0000ff', '39'), ('0000ff', '45'), ('0000ff', '46'), ('8b4513', '12'), ('8b4513', '43'), ('8b4513', '15'), ('8b4513', '12'), ('8b4513', '32'), ('8b4513', '26'), ('ff0000', '4'), ('ff0000', '30'), ('0000ff', '40'), ('0000ff', '39'), ('0000ff', '45'), ('0000ff', '46'), ('8b4513', '12'), ('8b4513', '43'), ('8b4513', '15'), ('8b4513', '12'), ('8b4513', '32'), ('8b4513', '26'), ('0000ff', '39'), ('0000ff', '42'), ('0000ff', '33'), ('0000ff', '38'), ('ff0000', '42'), ('ff0000', '37'), ('8b4513', '23'), ('8b4513', '37'), ('8b4513', '22'), ('8b4513', '1'), ('8b4513', '4'), ('8b4513', '23'), ('0000ff', '39'), ('0000ff', '42'), ('0000ff', '33'), ('0000ff', '38'), ('ff0000', '42'), ('ff0000', '37'), ('8b4513', '23'), ('8b4513', '37'), ('8b4513', '22'), ('8b4513', '1'), ('8b4513', '4'), ('8b4513', '23'), ('bbbb00', '33'), ('bbbb00', '33'), ('bbbb00', '39'), ('bbbb00', '36'), ('ff0000', '44'), ('ff0000', '33'), ('8b4513', '19'), ('8b4513', '8'), ('8b4513', '33'), ('8b4513', '27'), ('8b4513', '18'), ('8b4513', '17'), ('bbbb00', '33'), ('bbbb00', '33'), ('bbbb00', '39'), ('bbbb00', '36'), ('ff0000', '44'), ('ff0000', '33'), ('8b4513', '19'), ('8b4513', '8'), ('8b4513', '33'), ('8b4513', '27'), ('8b4513', '18'), ('8b4513', '17'), ('ff0000', '37'), ('ff0000', '53 BFD'), ('0000ff', '32'), ('0000ff', '46'), ('bbbb00', '40'), ('bbbb00', '42'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '9'), ('8b4513', '7'), ('8b4513', '33'), ('8b4513', '25'), ('ff0000', '37'), ('ff0000', '53 BFD'), ('0000ff', '32'), ('0000ff', '46'), ('bbbb00', '40'), ('bbbb00', '42'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '9'), ('8b4513', '7'), ('8b4513', '33'), ('8b4513', '25'), ('bbbb00', '10'), ('bbbb00', '53 BFD'), ('0000ff', '42'), ('0000ff', '51.6 DPI'), ('bbbb00', '48'), ('bbbb00', '46'), ('8b4513', '18'), ('8b4513', '30'), ('8b4513', '19'), ('8b4513', '5'), ('8b4513', '29'), ('8b4513', '19'), ('bbbb00', '10'), ('bbbb00', '53 BFD'), ('0000ff', '42'), ('0000ff', '51.6 DPI'), ('bbbb00', '48'), ('bbbb00', '46'), ('8b4513', '18'), ('8b4513', '30'), ('8b4513', '19'), ('8b4513', '5'), ('8b4513', '29'), ('8b4513', '19'), ('0000ff', '35'), ('0000ff', '53 BFD'), ('ff0000', '45'), ('ff0000', '38'), ('ff0000', '38'), ('ff0000', '39'), ('8b4513', '20'), ('8b4513', '5'), ('8b4513', '20'), ('8b4513', '11'), ('8b4513', '15'), ('8b4513', '53'), ('0000ff', '35'), ('0000ff', '53 BFD'), ('ff0000', '45'), ('ff0000', '38'), ('ff0000', '38'), ('ff0000', '39'), ('8b4513', '20'), ('8b4513', '5'), ('8b4513', '20'), ('8b4513', '11'), ('8b4513', '15'), ('8b4513', '53'), ('bbbb00', '43'), ('bbbb00', '36'), ('0000ff', '21'), ('0000ff', '40'), ('ff0000', '22'), ('ff0000', '36'), ('8b4513', '25'), ('8b4513', '10'), ('8b4513', '31'), ('8b4513', '38'), ('8b4513', '27'), ('8b4513', '24'), ('bbbb00', '43'), ('bbbb00', '36'), ('0000ff', '21'), ('0000ff', '40'), ('ff0000', '22'), ('ff0000', '36'), ('8b4513', '25'), ('8b4513', '10'), ('8b4513', '31'), ('8b4513', '38'), ('8b4513', '27'), ('8b4513', '24'), ('ff0000', '44'), ('ff0000', '53 BFD'), ('bbbb00', '44'), ('bbbb00', '44'), ('bbbb00', '47'), ('bbbb00', '40'), ('8b4513', '14'), ('8b4513', '13'), ('8b4513', '6'), ('8b4513', '8'), ('8b4513', '30'), ('8b4513', '29'), ('ff0000', '44'), ('ff0000', '53 BFD'), ('bbbb00', '44'), ('bbbb00', '44'), ('bbbb00', '47'), ('bbbb00', '40'), ('8b4513', '14'), ('8b4513', '13'), ('8b4513', '6'), ('8b4513', '8'), ('8b4513', '30'), ('8b4513', '29'), ('ff0000', '43'), ('ff0000', '31'), ('bbbb00', '45'), ('bbbb00', '43'), ('0000ff', '48'), ('0000ff', '45'), ('8b4513', '2'), ('8b4513', '12'), ('8b4513', '34'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '30'), ('ff0000', '43'), ('ff0000', '31'), ('bbbb00', '45'), ('bbbb00', '43'), ('0000ff', '48'), ('0000ff', '45'), ('8b4513', '2'), ('8b4513', '12'), ('8b4513', '34'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '30'), ('bbbb00', '15'), ('bbbb00', '26'), ('ff0000', '53 DNF'), ('ff0000', '45'), ('0000ff', '43'), ('0000ff', '39'), ('8b4513', '22'), ('8b4513', '45'), ('8b4513', '32'), ('8b4513', '28'), ('8b4513', '11'), ('8b4513', '20'), ('bbbb00', '15'), ('bbbb00', '26'), ('ff0000', '53 DNF'), ('ff0000', '45'), ('0000ff', '43'), ('0000ff', '39'), ('8b4513', '22'), ('8b4513', '45'), ('8b4513', '32'), ('8b4513', '28'), ('8b4513', '11'), ('8b4513', '20'), ('0000ff', '10'), ('0000ff', '41'), ('0000ff', '38'), ('0000ff', '44'), ('bbbb00', '39'), ('bbbb00', '36'), ('8b4513', '53 RET'), ('8b4513', '32'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '21'), ('8b4513', '28'), ('0000ff', '10'), ('0000ff', '41'), ('0000ff', '38'), ('0000ff', '44'), ('bbbb00', '39'), ('bbbb00', '36'), ('8b4513', '53 RET'), ('8b4513', '32'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '21'), ('8b4513', '28'), ('ff0000', '41'), ('ff0000', '37'), ('bbbb00', '42'), ('bbbb00', '40'), ('ff0000', '45'), ('ff0000', '43'), ('8b4513', '36'), ('8b4513', '31'), ('8b4513', '24'), ('8b4513', '23'), ('8b4513', '23'), ('8b4513', '27'), ('ff0000', '41'), ('ff0000', '37'), ('bbbb00', '42'), ('bbbb00', '40'), ('ff0000', '45'), ('ff0000', '43'), ('8b4513', '36'), ('8b4513', '31'), ('8b4513', '24'), ('8b4513', '23'), ('8b4513', '23'), ('8b4513', '27'), ('ff0000', '39'), ('ff0000', '53 BFD'), ('bbbb00', '40'), ('bbbb00', '45'), ('bbbb00', '46'), ('bbbb00', '47'), ('8b4513', '31'), ('8b4513', '2'), ('8b4513', '53 RET'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '33'), ('ff0000', '39'), ('ff0000', '53 BFD'), ('bbbb00', '40'), ('bbbb00', '45'), ('bbbb00', '46'), ('bbbb00', '47'), ('8b4513', '31'), ('8b4513', '2'), ('8b4513', '53 RET'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '33'), ('bbbb00', '39'), ('bbbb00', '38'), ('ff0000', '44'), ('ff0000', '46'), ('0000ff', '47'), ('0000ff', '49'), ('8b4513', '30'), ('8b4513', '15'), ('8b4513', '37'), ('8b4513', '32'), ('8b4513', '34'), ('8b4513', '21'), ('bbbb00', '39'), ('bbbb00', '38'), ('ff0000', '44'), ('ff0000', '46'), ('0000ff', '47'), ('0000ff', '49'), ('8b4513', '30'), ('8b4513', '15'), ('8b4513', '37'), ('8b4513', '32'), ('8b4513', '34'), ('8b4513', '21'), ('ff0000', '34'), ('ff0000', '35'), ('bbbb00', '47'), ('bbbb00', '42'), ('0000ff', '49'), ('0000ff', '47'), ('8b4513', '21'), ('8b4513', '27'), ('8b4513', '38'), ('8b4513', '40'), ('8b4513', '37'), ('8b4513', '34'), ('ff0000', '34'), ('ff0000', '35'), ('bbbb00', '47'), ('bbbb00', '42'), ('0000ff', '49'), ('0000ff', '47'), ('8b4513', '21'), ('8b4513', '27'), ('8b4513', '38'), ('8b4513', '40'), ('8b4513', '37'), ('8b4513', '34'), ('0000ff', '47'), ('0000ff', '44'), ('0000ff', '41'), ('0000ff', '48'), ('0000ff', '46'), ('0000ff', '50'), ('8b4513', '33'), ('8b4513', '17'), ('8b4513', '30'), ('8b4513', '42'), ('8b4513', '25'), ('8b4513', '32'), ('0000ff', '47'), ('0000ff', '44'), ('0000ff', '41'), ('0000ff', '48'), ('0000ff', '46'), ('0000ff', '50'), ('8b4513', '33'), ('8b4513', '17'), ('8b4513', '30'), ('8b4513', '42'), ('8b4513', '25'), ('8b4513', '32'), ('0000ff', '48'), ('0000ff', '45'), ('ff0000', '47'), ('ff0000', '47'), ('0000ff', '41'), ('0000ff', '43'), ('8b4513', '41'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '30'), ('8b4513', '17'), ('8b4513', '31'), ('0000ff', '48'), ('0000ff', '45'), ('ff0000', '47'), ('ff0000', '47'), ('0000ff', '41'), ('0000ff', '43'), ('8b4513', '41'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '30'), ('8b4513', '17'), ('8b4513', '31'), ('ff0000', '49'), ('ff0000', '33'), ('bbbb00', '49'), ('bbbb00', '48'), ('ff0000', '46'), ('ff0000', '44'), ('8b4513', '40'), ('8b4513', '29'), ('8b4513', '5'), ('8b4513', '43'), ('8b4513', '36'), ('8b4513', '36'), ('ff0000', '49'), ('ff0000', '33'), ('bbbb00', '49'), ('bbbb00', '48'), ('ff0000', '46'), ('ff0000', '44'), ('8b4513', '40'), ('8b4513', '29'), ('8b4513', '5'), ('8b4513', '43'), ('8b4513', '36'), ('8b4513', '36'), ('0000ff', '42'), ('0000ff', '47'), ('bbbb00', '46'), ('bbbb00', '47'), ('0000ff', '50'), ('0000ff', '50.6 DPI'), ('8b4513', '34'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '38'), ('8b4513', '35'), ('0000ff', '42'), ('0000ff', '47'), ('bbbb00', '46'), ('bbbb00', '47'), ('0000ff', '50'), ('0000ff', '50.6 DPI'), ('8b4513', '34'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '38'), ('8b4513', '35'), ('bbbb00', '47'), ('bbbb00', '40'), ('0000ff', '36'), ('0000ff', '33'), ('ff0000', '43'), ('ff0000', '35'), ('8b4513', '37'), ('8b4513', '50'), ('8b4513', '36'), ('8b4513', '35'), ('8b4513', '39'), ('8b4513', '37'), ('bbbb00', '47'), ('bbbb00', '40'), ('0000ff', '36'), ('0000ff', '33'), ('ff0000', '43'), ('ff0000', '35'), ('8b4513', '37'), ('8b4513', '50'), ('8b4513', '36'), ('8b4513', '35'), ('8b4513', '39'), ('8b4513', '37'), ('bbbb00', '51'), ('bbbb00', '43'), ('ff0000', '48'), ('ff0000', '48'), ('ff0000', '47'), ('ff0000', '45'), ('8b4513', '35'), ('8b4513', '36'), ('8b4513', '25'), ('8b4513', '33'), ('8b4513', '41'), ('8b4513', '39'), ('bbbb00', '51'), ('bbbb00', '43'), ('ff0000', '48'), ('ff0000', '48'), ('ff0000', '47'), ('ff0000', '45'), ('8b4513', '35'), ('8b4513', '36'), ('8b4513', '25'), ('8b4513', '33'), ('8b4513', '41'), ('8b4513', '39'), ('ff0000', '45'), ('ff0000', '34'), ('bbbb00', '43'), ('bbbb00', '48.6 DPI'), ('bbbb00', '44'), ('bbbb00', '48'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '41'), ('8b4513', '37'), ('8b4513', '40'), ('8b4513', '38'), ('ff0000', '45'), ('ff0000', '34'), ('bbbb00', '43'), ('bbbb00', '48.6 DPI'), ('bbbb00', '44'), ('bbbb00', '48'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '41'), ('8b4513', '37'), ('8b4513', '40'), ('8b4513', '38'), ('0000ff', '49'), ('0000ff', '48'), ('0000ff', '53 BFD'), ('0000ff', '50'), ('ff0000', '48'), ('ff0000', '46'), ('8b4513', '29'), ('8b4513', '41'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '47'), ('8b4513', '41'), ('0000ff', '49'), ('0000ff', '48'), ('0000ff', '53 BFD'), ('0000ff', '50'), ('ff0000', '48'), ('ff0000', '46'), ('8b4513', '29'), ('8b4513', '41'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '47'), ('8b4513', '41'), ('ff0000', '52'), ('ff0000', '36'), ('ff0000', '49'), ('ff0000', '49'), ('bbbb00', '50'), ('bbbb00', '51'), ('8b4513', '38'), ('8b4513', '26'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('ff0000', '52'), ('ff0000', '36'), ('ff0000', '49'), ('ff0000', '49'), ('bbbb00', '50'), ('bbbb00', '51'), ('8b4513', '38'), ('8b4513', '26'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('ff0000', '50'), ('ff0000', '32'), ('bbbb00', '50'), ('bbbb00', '51'), ('ff0000', '49'), ('ff0000', '47'), ('8b4513', '42'), ('8b4513', '34'), ('8b4513', '45'), ('8b4513', '44'), ('8b4513', '49'), ('8b4513', '42'), ('ff0000', '50'), ('ff0000', '32'), ('bbbb00', '50'), ('bbbb00', '51'), ('ff0000', '49'), ('ff0000', '47'), ('8b4513', '42'), ('8b4513', '34'), ('8b4513', '45'), ('8b4513', '44'), ('8b4513', '49'), ('8b4513', '42'), ('bbbb00', '50'), ('bbbb00', '42'), ('bbbb00', '52'), ('bbbb00', '52'), ('ff0000', '50'), ('ff0000', '49'), ('8b4513', '43'), ('8b4513', '42'), ('8b4513', '42'), ('8b4513', '41'), ('8b4513', '45'), ('8b4513', '48'), ('bbbb00', '50'), ('bbbb00', '42'), ('bbbb00', '52'), ('bbbb00', '52'), ('ff0000', '50'), ('ff0000', '49'), ('8b4513', '43'), ('8b4513', '42'), ('8b4513', '42'), ('8b4513', '41'), ('8b4513', '45'), ('8b4513', '48'), ('0000ff', '46'), ('0000ff', '46'), ('0000ff', '44'), ('0000ff', '52'), ('ff0000', '51'), ('ff0000', '48'), ('8b4513', '45'), ('8b4513', '40'), ('8b4513', '40'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '45'), ('0000ff', '46'), ('0000ff', '46'), ('0000ff', '44'), ('0000ff', '52'), ('ff0000', '51'), ('ff0000', '48'), ('8b4513', '45'), ('8b4513', '40'), ('8b4513', '40'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '45'), ('bbbb00', '52'), ('bbbb00', '44'), ('bbbb00', '48'), ('bbbb00', '49'), ('bbbb00', '49'), ('bbbb00', '49'), ('8b4513', '49'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '45'), ('8b4513', '42'), ('8b4513', '40'), ('bbbb00', '52'), ('bbbb00', '44'), ('bbbb00', '48'), ('bbbb00', '49'), ('bbbb00', '49'), ('bbbb00', '49'), ('8b4513', '49'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '45'), ('8b4513', '42'), ('8b4513', '40'), ('bbbb00', '49'), ('bbbb00', '41'), ('ff0000', '50'), ('ff0000', '51'), ('bbbb00', '51'), ('bbbb00', '50'), ('8b4513', '47'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('8b4513', '44'), ('bbbb00', '49'), ('bbbb00', '41'), ('ff0000', '50'), ('ff0000', '51'), ('bbbb00', '51'), ('bbbb00', '50'), ('8b4513', '47'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('8b4513', '44'), ('bbbb00', '40'), ('bbbb00', '16'), ('0000ff', '53 BFD'), ('0000ff', '41'), ('ff0000', '53 DNC'), ('ff0000', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53'), ('bbbb00', '40'), ('bbbb00', '16'), ('0000ff', '53 BFD'), ('0000ff', '41'), ('ff0000', '53 DNC'), ('ff0000', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53'), ('0000ff', '51'), ('0000ff', '50'), ('bbbb00', '51'), ('bbbb00', '50'), ('bbbb00', '52'), ('bbbb00', '52'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '43'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '53'), ('0000ff', '51'), ('0000ff', '50'), ('bbbb00', '51'), ('bbbb00', '50'), ('bbbb00', '52'), ('bbbb00', '52'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '43'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '53'), ('ff0000', '48'), ('ff0000', '53 BFD'), ('ff0000', '51'), ('ff0000', '50'), ('0000ff', '51'), ('0000ff', '52'), ('8b4513', '48'), ('8b4513', '48'), ('8b4513', '44'), ('8b4513', '50'), ('8b4513', '46'), ('8b4513', '47'), ('ff0000', '48'), ('ff0000', '53 BFD'), ('ff0000', '51'), ('ff0000', '50'), ('0000ff', '51'), ('0000ff', '52'), ('8b4513', '48'), ('8b4513', '48'), ('8b4513', '44'), ('8b4513', '50'), ('8b4513', '46'), ('8b4513', '47'), ('0000ff', '52'), ('0000ff', '49'), ('0000ff', '43'), ('0000ff', '51'), ('0000ff', '52'), ('0000ff', '51'), ('8b4513', '44'), ('8b4513', '51'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '50'), ('8b4513', '46'), ('0000ff', '52'), ('0000ff', '49'), ('0000ff', '43'), ('0000ff', '51'), ('0000ff', '52'), ('0000ff', '51'), ('8b4513', '44'), ('8b4513', '51'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '50'), ('8b4513', '46')]
def blah():
tstname='worldChamps2019.csv'
df = pd.read_csv(tstname)
# for index, row in df.iterrows():
# print (index, row[5])
# df.columns = df.iloc[1]
# print (df.columns, '\n')
# print(df.columns[0]==1,'\n')
# df.drop(columns=[1, 'Pos','Bow#'], inplace=True)
# df.dropna(axis = 1,inplace=True)
# df.drop_duplicates(inplace=True)
# for index, row in df.iterrows():
# df['Name'][index] = ' '.join(reversed(row['Name'].split(' ')))
QR1_blue = []
QR1_red = []
QR1_yellow = []
QR2_blue = []
QR2_red = []
QR2_yellow = []
QR3_blue = []
QR3_red = []
QR3_yellow = []
QR4_blue = []
QR4_red = []
QR4_yellow = []
QR5_blue = []
QR5_red = []
QR5_yellow = []
QR6_blue = []
QR6_red = []
QR6_yellow = []
FR7_gold = []
FR7_silver = []
FR7_bronze = []
FR8_gold = []
FR8_silver = []
FR8_bronze = []
FR9_gold = []
FR9_silver = []
FR9_bronze = []
FR10_gold = []
FR10_silver = []
FR10_bronze = []
FR11_gold = []
FR11_silver = []
FR11_bronze = []
FR12_gold = []
FR12_silver = []
FR12_bronze = []
lstQRs=[ QR1_blue , \
QR1_red , \
QR1_yellow , \
QR2_blue , \
QR2_red , \
QR2_yellow , \
QR3_blue , \
QR3_red , \
QR3_yellow , \
QR4_blue , \
QR4_red , \
QR4_yellow , \
QR5_blue , \
QR5_red , \
QR5_yellow , \
QR6_blue , \
QR6_red , \
QR6_yellow]
lstFRs=[ FR7_gold , \
FR7_silver , \
FR7_bronze , \
FR8_gold , \
FR8_silver , \
FR8_bronze , \
FR9_gold , \
FR9_silver , \
FR9_bronze , \
FR10_gold , \
FR10_silver , \
FR10_bronze , \
FR11_gold , \
FR11_silver , \
FR11_bronze , \
FR12_gold , \
FR12_silver , \
FR12_bronze ]
colors = {'0000ff': 'blue', 'bbbb00': 'yellow', '999900': 'gold',
'ff0000': 'red', '999999':'silver', '8b4513': 'bronze'}
for index, row in df.iterrows():
lstIndex = index*24
# if int(colorlst[lstIndex][1]) == int(row['QR1']):
# print(('{:39} scored '+str(row['QR1']) + '\t in QR1'\
# ' with color code ' + colors.get(colorlst[lstIndex][0])).format(row['Name']) )
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'blue':
QR1_blue.append([row['Name'],row['QR1']])
QR2_blue.append([row['Name'],row['QR2']])
elif colors.get(colorKey) == 'red':
QR1_red.append([row['Name'],row['QR1']])
QR2_red.append([row['Name'],row['QR2']])
elif colors.get(colorKey) == 'yellow':
QR1_yellow.append([row['Name'],row['QR1']])
QR2_yellow.append([row['Name'],row['QR2']])
lstIndex += 2
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'blue':
QR3_blue.append([row['Name'],row['QR3']])
QR4_blue.append([row['Name'],row['QR4']])
elif colors.get(colorKey) == 'red':
QR3_red.append([row['Name'],row['QR3']])
QR4_red.append([row['Name'],row['QR4']])
elif colors.get(colorKey) == 'yellow':
QR3_yellow.append([row['Name'],row['QR3']])
QR4_yellow.append([row['Name'],row['QR4']])
lstIndex += 2
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'blue':
QR5_blue.append([row['Name'],row['QR5']])
QR6_blue.append([row['Name'],row['QR6']])
elif colors.get(colorKey) == 'red':
QR5_red.append([row['Name'],row['QR5']])
QR6_red.append([row['Name'],row['QR6']])
elif colors.get(colorKey) == 'yellow':
QR5_yellow.append([row['Name'],row['QR5']])
QR6_yellow.append([row['Name'],row['QR6']])
lstIndex += 2
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'gold':
FR7_gold.append([row['Name'],row['FR7']])
FR8_gold.append([row['Name'],row['FR8']])
FR9_gold.append([row['Name'],row['FR9']])
FR10_gold.append([row['Name'],row['FR10']])
FR11_gold.append([row['Name'],row['FR11']])
FR12_gold.append([row['Name'],row['FR12']])
elif colors.get(colorKey) == 'silver':
FR7_silver.append([row['Name'],row['FR7']])
FR8_silver.append([row['Name'],row['FR8']])
FR9_silver.append([row['Name'],row['FR9']])
FR10_silver.append([row['Name'],row['FR10']])
FR11_silver.append([row['Name'],row['FR11']])
FR12_silver.append([row['Name'],row['FR12']])
elif colors.get(colorKey) == 'bronze':
FR7_bronze.append([row['Name'],row['FR7']])
FR8_bronze.append([row['Name'],row['FR8']])
FR9_bronze.append([row['Name'],row['FR9']])
FR10_bronze.append([row['Name'],row['FR10']])
FR11_bronze.append([row['Name'],row['FR11']])
FR12_bronze.append([row['Name'],row['FR12']])
indexColors = {0:'blue', 1:'red', 2:'yellow'}
for i in range(18):
qr = lstQRs[i]
qr.sort(key = lambda sailor:sailor[1])
currentColor = indexColors.get(i % 3)
QRnum = int(i / 3) + 1
print(qr)
for j in range(len(qr)):
result = qr[j]
sailor = result[0]
rr = result[1]
sailorIndex = df.loc[df['Name']==sailor].index
colName = 'QR{}_{}'.format(str(QRnum),str(currentColor))
print ("{:39} had result {:3} in race {}".format(\
sailor,rr,colName))
try:
df.at[sailorIndex,colName] = rr
except Exception as e:
df[colName] = np.nan
df.at[sailorIndex,colName] = rr
indexColors = {0:'gold', 1:'silver', 2:'bronze'}
for i in range(18):
fr = lstFRs[i]
fr.sort(key = lambda sailor:sailor[1])
currentColor = indexColors.get(i % 3)
FRnum = int(i / 3) + 1
print(fr)
for j in range(len(fr)):
result = fr[j]
sailor = result[0]
rr = result[1]
sailorIndex = df.loc[df['Name']==sailor].index
colName = 'FR{}_{}'.format(str(FRnum + 6),str(currentColor))
print ("{:39} had result {:3} in race {}".format(\
sailor,rr,colName))
try:
df.at[sailorIndex,colName] = rr
except Exception as e:
df[colName] = np.nan
df.at[sailorIndex,colName] = rr
df.to_csv('MOD' + tstname, index=False)
#rawHTML=""""""
def blah2():
Regex=re.compile(r'color=" #([0-9a-f]{6})">(?:<s>)?(\d{1,2}|\d{1,2}\.?\d?\s[A-Z]{3})\s?(?:<\/s>)?<\/font><\/td>')
print(Regex.findall(rawHTML))
def wc2020():
df = pd.read_csv('WorldChamps2020.csv')
# colors = {0:'yellow',1:'blue',2:'red'}
# for i in range(1,7):
# for j in range(3):
# inputFile = 'WorldChamps2020R{}{}'.format(str(i),str(j))
# dfTmp = pd.read_csv(inputFile + '.csv')
# colName = 'QR{}_{}'.format(str(i),colors.get(j))
# for index, row in dfTmp.iterrows():
# sailor = row['Name'].replace(u'\xa0', u' ')
# rr = row['Points']
# dfIndex = df.loc[df['Name']==sailor].index
# try:
# df.at[dfIndex,colName] = rr
# except Exception as e:
# df[colName] = np.nan
# df.at[dfIndex,colName] = rr
# colors = {0:'gold',1:'silver',2:'bronze'}
# for i in range(7,13):
# for j in range(3):
# inputFile = 'WorldChamps2020R{}{}'.format(str(i),colors.get(j))
# dfTmp = pd.read_csv(inputFile + '.csv')
# colName = 'FR{}_{}'.format(str(i),colors.get(j))
# for index, row in dfTmp.iterrows():
# sailor = row['Name'].replace(u'\xa0', u' ')
# rr = row['Points']
# dfIndex = df.loc[df['Name']==sailor].index
# try:
# df.at[dfIndex,colName] = rr
# except Exception as e:
# df[colName] = np.nan
# df.at[dfIndex,colName] = rr
# df.to_csv('WorldChamps2020Entries.csv',index=False)
#df['Total'] = df.iloc[:,5:].sum(axis=1)
#print(df['Total'])
# df['Net'] = df['Total'] - df.iloc[:,5:18+4+1].max(axis=1) \
# - df.iloc[:,18+4+1:].max(axis=1)
# print(df.at[3,'FR12_bronze'])
# for index, row in df.iterrows():
# #print(row['FR12_gold'])
# if str(row['FR12_gold']) != 'nan':
# df.at[index,'fleet'] = 1
# elif str(row['FR12_silver']) != 'nan':
# df.at[index,'fleet'] = 2
# else:
# df.at[index,'fleet'] = 3
#df.sort_values(['fleet','Net'],inplace=True)
#print( tabulate(df, headers='keys', tablefmt='psql'))
df.to_csv('WorldChamps2020.csv',index=False)
def hempelWCmiami2019():
df = pd.read_csv('HempelWCMiami2019Overall.csv')
races = ['QR1_yellow','QR2_yellow','QR3_blue','QR4_blue','FR5_gold','FR6_gold','FR7_gold','FR8_gold','FR9_gold','FR10_gold','FR11_gold','FR_medal','QR3_yellow','QR4_yellow','QR1_blue','QR2_blue','FR5_silver','FR6_silver','FR7_silver','FR8_silver','FR9_silver','FR10_silver']
for race in races:
inputFile = 'HempelWCMiami2019{}.csv'.format(race)
dfTmp = pd.read_csv(inputFile)
for index, row in dfTmp.iterrows():
sailor = row['Crew']
rr = row['Race Points']
dfIndex = df.loc[df['Name']==sailor].index
try:
df.at[dfIndex,race] = rr
except Exception as e:
df[race] = np.nan
df.at[dfIndex,race] = rr
df.to_csv('HempelWCMiami2019.csv')
##### main ########
hempelWCmiami2019() | 213.19891 | 67,521 | 0.453121 |
import csv
import sys
import pandas as pd
import numpy as np
import os
import re
from selenium import webdriver
from time import sleep
from text_unidecode import unidecode
from tabulate import tabulate
sys.path.append('/home/daniel/Desktop/elo_sailor/Glicko2approach')
from SailingGlicko2 import *
from Scrape import *
os.chdir("..")
def getHiddenHTML(currentRegatta):
browser = webdriver.Firefox()
browser.get(currentRegatta)
sleep(2)
innerHTML = browser.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
browser.close()
return innerHTML
colorlst=[('0000ff', '2'), ('0000ff', '21'), ('bbbb00', '4'), ('bbbb00', '4'), ('bbbb00', '3'), ('bbbb00', '4'), ('999900', '8'), ('999900', '2'), ('999900', '5'), ('999900', '13'), ('999900', '14'), ('999900', '51'), ('0000ff', '2'), ('0000ff', '21'), ('bbbb00', '4'), ('bbbb00', '4'), ('bbbb00', '3'), ('bbbb00', '4'), ('999900', '8'), ('999900', '2'), ('999900', '5'), ('999900', '13'), ('999900', '14'), ('999900', '51'), ('ff0000', '13'), ('ff0000', '7'), ('ff0000', '3'), ('ff0000', '1'), ('0000ff', '2'), ('0000ff', '1'), ('999900', '19'), ('999900', '18'), ('999900', '30'), ('999900', '1'), ('999900', '6'), ('999900', '5'), ('ff0000', '13'), ('ff0000', '7'), ('ff0000', '3'), ('ff0000', '1'), ('0000ff', '2'), ('0000ff', '1'), ('999900', '19'), ('999900', '18'), ('999900', '30'), ('999900', '1'), ('999900', '6'), ('999900', '5'), ('ff0000', '2'), ('ff0000', '2'), ('0000ff', '3'), ('0000ff', '7'), ('ff0000', '4'), ('ff0000', '53 UFD'), ('999900', '4'), ('999900', '38'), ('999900', '13'), ('999900', '23'), ('999900', '2'), ('999900', '9'), ('ff0000', '2'), ('ff0000', '2'), ('0000ff', '3'), ('0000ff', '7'), ('ff0000', '4'), ('ff0000', '53 UFD'), ('999900', '4'), ('999900', '38'), ('999900', '13'), ('999900', '23'), ('999900', '2'), ('999900', '9'), ('0000ff', '12'), ('0000ff', '7'), ('bbbb00', '2'), ('bbbb00', '2'), ('ff0000', '1'), ('ff0000', '1'), ('999900', '17'), ('999900', '8'), ('999900', '39'), ('999900', '21'), ('999900', '1'), ('999900', '12'), ('0000ff', '12'), ('0000ff', '7'), ('bbbb00', '2'), ('bbbb00', '2'), ('ff0000', '1'), ('ff0000', '1'), ('999900', '17'), ('999900', '8'), ('999900', '39'), ('999900', '21'), ('999900', '1'), ('999900', '12'), ('ff0000', '17'), ('ff0000', '4'), ('0000ff', '9'), ('0000ff', '9'), ('bbbb00', '2'), ('bbbb00', '5'), ('999900', '23'), ('999900', '7'), ('999900', '7'), ('999900', '22'), ('999900', '4'), ('999900', '4'), ('ff0000', '17'), ('ff0000', '4'), ('0000ff', '9'), ('0000ff', '9'), ('bbbb00', '2'), ('bbbb00', '5'), ('999900', '23'), ('999900', '7'), ('999900', '7'), ('999900', '22'), ('999900', '4'), ('999900', '4'), ('0000ff', '3'), ('0000ff', '2'), ('ff0000', '7'), ('ff0000', '2'), ('0000ff', '1'), ('0000ff', '7'), ('999900', '22'), ('999900', '22'), ('999900', '2'), ('999900', '11'), ('999900', '8'), ('999900', '20'), ('0000ff', '3'), ('0000ff', '2'), ('ff0000', '7'), ('ff0000', '2'), ('0000ff', '1'), ('0000ff', '7'), ('999900', '22'), ('999900', '22'), ('999900', '2'), ('999900', '11'), ('999900', '8'), ('999900', '20'), ('bbbb00', '5'), ('bbbb00', '13'), ('0000ff', '5'), ('0000ff', '4'), ('bbbb00', '1'), ('bbbb00', '1'), ('999900', '24'), ('999900', '40'), ('999900', '29'), ('999900', '4'), ('999900', '9'), ('999900', '10'), ('bbbb00', '5'), ('bbbb00', '13'), ('0000ff', '5'), ('0000ff', '4'), ('bbbb00', '1'), ('bbbb00', '1'), ('999900', '24'), ('999900', '40'), ('999900', '29'), ('999900', '4'), ('999900', '9'), ('999900', '10'), ('ff0000', '5'), ('ff0000', '6'), ('bbbb00', '12'), ('bbbb00', '14'), ('ff0000', '15'), ('ff0000', '7'), ('999900', '16'), ('999900', '6'), ('999900', '12'), ('999900', '7'), ('999900', '45'), ('999900', '7'), ('ff0000', '5'), ('ff0000', '6'), ('bbbb00', '12'), ('bbbb00', '14'), ('ff0000', '15'), ('ff0000', '7'), ('999900', '16'), ('999900', '6'), ('999900', '12'), ('999900', '7'), ('999900', '45'), ('999900', '7'), ('bbbb00', '6'), ('bbbb00', '10'), ('ff0000', '2'), ('ff0000', '5'), ('0000ff', '7'), ('0000ff', '3'), ('999900', '7'), ('999900', '14'), ('999900', '31'), ('999900', '10'), ('999900', '53 BFD'), ('999900', '11'), ('bbbb00', '6'), ('bbbb00', '10'), ('ff0000', '2'), ('ff0000', '5'), ('0000ff', '7'), ('0000ff', '3'), ('999900', '7'), ('999900', '14'), ('999900', '31'), ('999900', '10'), ('999900', '53 BFD'), ('999900', '11'), ('bbbb00', '21'), ('bbbb00', '5'), ('ff0000', '10'), ('ff0000', '7'), ('0000ff', '3'), ('0000ff', '4'), ('999900', '15'), ('999900', '11'), ('999900', '18'), ('999900', '6'), ('999900', '32'), ('999900', '17'), ('bbbb00', '21'), ('bbbb00', '5'), ('ff0000', '10'), ('ff0000', '7'), ('0000ff', '3'), ('0000ff', '4'), ('999900', '15'), ('999900', '11'), ('999900', '18'), ('999900', '6'), ('999900', '32'), ('999900', '17'), ('ff0000', '15'), ('ff0000', '18'), ('ff0000', '6'), ('ff0000', '6'), ('ff0000', '2'), ('ff0000', '4'), ('999900', '35'), ('999900', '15'), ('999900', '3'), ('999900', '2'), ('999900', '18'), ('999900', '34'), ('ff0000', '15'), ('ff0000', '18'), ('ff0000', '6'), ('ff0000', '6'), ('ff0000', '2'), ('ff0000', '4'), ('999900', '35'), ('999900', '15'), ('999900', '3'), ('999900', '2'), ('999900', '18'), ('999900', '34'), ('0000ff', '27'), ('0000ff', '3'), ('0000ff', '4'), ('0000ff', '3'), ('bbbb00', '5'), ('bbbb00', '2'), ('999900', '14'), ('999900', '30'), ('999900', '15'), ('999900', '25'), ('999900', '21'), ('999900', '32'), ('0000ff', '27'), ('0000ff', '3'), ('0000ff', '4'), ('0000ff', '3'), ('bbbb00', '5'), ('bbbb00', '2'), ('999900', '14'), ('999900', '30'), ('999900', '15'), ('999900', '25'), ('999900', '21'), ('999900', '32'), ('ff0000', '8'), ('ff0000', '3'), ('bbbb00', '3'), ('bbbb00', '1'), ('bbbb00', '8'), ('bbbb00', '8'), ('999900', '22 SCP'), ('999900', '27'), ('999900', '28'), ('999900', '12'), ('999900', '33'), ('999900', '13'), ('ff0000', '8'), ('ff0000', '3'), ('bbbb00', '3'), ('bbbb00', '1'), ('bbbb00', '8'), ('bbbb00', '8'), ('999900', '22 SCP'), ('999900', '27'), ('999900', '28'), ('999900', '12'), ('999900', '33'), ('999900', '13'), ('0000ff', '6'), ('0000ff', '17'), ('bbbb00', '15'), ('bbbb00', '15'), ('0000ff', '14'), ('0000ff', '17'), ('999900', '3'), ('999900', '3'), ('999900', '20'), ('999900', '37'), ('999900', '17'), ('999900', '19'), ('0000ff', '6'), ('0000ff', '17'), ('bbbb00', '15'), ('bbbb00', '15'), ('0000ff', '14'), ('0000ff', '17'), ('999900', '3'), ('999900', '3'), ('999900', '20'), ('999900', '37'), ('999900', '17'), ('999900', '19'), ('bbbb00', '4'), ('bbbb00', '4'), ('ff0000', '1'), ('ff0000', '8'), ('0000ff', '13'), ('0000ff', '10'), ('999900', '49'), ('999900', '45'), ('999900', '35'), ('999900', '9'), ('999900', '15'), ('999900', '2'), ('bbbb00', '4'), ('bbbb00', '4'), ('ff0000', '1'), ('ff0000', '8'), ('0000ff', '13'), ('0000ff', '10'), ('999900', '49'), ('999900', '45'), ('999900', '35'), ('999900', '9'), ('999900', '15'), ('999900', '2'), ('0000ff', '16'), ('0000ff', '12'), ('0000ff', '2'), ('0000ff', '1'), ('bbbb00', '4'), ('bbbb00', '3'), ('999900', '27'), ('999900', '21'), ('999900', '33'), ('999900', '29'), ('999900', '29'), ('999900', '6'), ('0000ff', '16'), ('0000ff', '12'), ('0000ff', '2'), ('0000ff', '1'), ('bbbb00', '4'), ('bbbb00', '3'), ('999900', '27'), ('999900', '21'), ('999900', '33'), ('999900', '29'), ('999900', '29'), ('999900', '6'), ('ff0000', '14'), ('ff0000', '1'), ('bbbb00', '8'), ('bbbb00', '3'), ('ff0000', '9'), ('ff0000', '9'), ('999900', '1'), ('999900', '46'), ('999900', '41'), ('999900', '33'), ('999900', '30'), ('999900', '3'), ('ff0000', '14'), ('ff0000', '1'), ('bbbb00', '8'), ('bbbb00', '3'), ('ff0000', '9'), ('ff0000', '9'), ('999900', '1'), ('999900', '46'), ('999900', '41'), ('999900', '33'), ('999900', '30'), ('999900', '3'), ('ff0000', '18'), ('ff0000', '53 BFD'), ('ff0000', '4'), ('ff0000', '9'), ('bbbb00', '6'), ('bbbb00', '7'), ('999900', '53 UFD'), ('999900', '34'), ('999900', '10'), ('999900', '15'), ('999900', '22'), ('999900', '15'), ('ff0000', '18'), ('ff0000', '53 BFD'), ('ff0000', '4'), ('ff0000', '9'), ('bbbb00', '6'), ('bbbb00', '7'), ('999900', '53 UFD'), ('999900', '34'), ('999900', '10'), ('999900', '15'), ('999900', '22'), ('999900', '15'), ('0000ff', '5'), ('0000ff', '20'), ('0000ff', '1'), ('0000ff', '16'), ('bbbb00', '15'), ('bbbb00', '10'), ('999900', '36'), ('999900', '9'), ('999900', '4'), ('999900', '3'), ('999900', '42'), ('999900', '43'), ('0000ff', '5'), ('0000ff', '20'), ('0000ff', '1'), ('0000ff', '16'), ('bbbb00', '15'), ('bbbb00', '10'), ('999900', '36'), ('999900', '9'), ('999900', '4'), ('999900', '3'), ('999900', '42'), ('999900', '43'), ('bbbb00', '9'), ('bbbb00', '2'), ('0000ff', '10'), ('0000ff', '8'), ('ff0000', '23'), ('ff0000', '13'), ('999900', '11'), ('999900', '44'), ('999900', '19'), ('999900', '14'), ('999900', '20'), ('999900', '39'), ('bbbb00', '9'), ('bbbb00', '2'), ('0000ff', '10'), ('0000ff', '8'), ('ff0000', '23'), ('ff0000', '13'), ('999900', '11'), ('999900', '44'), ('999900', '19'), ('999900', '14'), ('999900', '20'), ('999900', '39'), ('bbbb00', '37'), ('bbbb00', '53 BFD'), ('bbbb00', '1'), ('bbbb00', '5'), ('0000ff', '6'), ('0000ff', '5'), ('999900', '26'), ('999900', '26'), ('999900', '32'), ('999900', '20'), ('999900', '12'), ('999900', '16'), ('bbbb00', '37'), ('bbbb00', '53 BFD'), ('bbbb00', '1'), ('bbbb00', '5'), ('0000ff', '6'), ('0000ff', '5'), ('999900', '26'), ('999900', '26'), ('999900', '32'), ('999900', '20'), ('999900', '12'), ('999900', '16'), ('bbbb00', '7'), ('bbbb00', '7'), ('0000ff', '53 BFD'), ('0000ff', '6'), ('0000ff', '11'), ('0000ff', '11'), ('999900', '30'), ('999900', '29'), ('999900', '9'), ('999900', '26'), ('999900', '16'), ('999900', '31'), ('bbbb00', '7'), ('bbbb00', '7'), ('0000ff', '53 BFD'), ('0000ff', '6'), ('0000ff', '11'), ('0000ff', '11'), ('999900', '30'), ('999900', '29'), ('999900', '9'), ('999900', '26'), ('999900', '16'), ('999900', '31'), ('0000ff', '14'), ('0000ff', '16'), ('ff0000', '16'), ('ff0000', '10'), ('0000ff', '15'), ('0000ff', '2'), ('999900', '18'), ('999900', '42'), ('999900', '47'), ('999900', '17'), ('999900', '13'), ('999900', '8'), ('0000ff', '14'), ('0000ff', '16'), ('ff0000', '16'), ('ff0000', '10'), ('0000ff', '15'), ('0000ff', '2'), ('999900', '18'), ('999900', '42'), ('999900', '47'), ('999900', '17'), ('999900', '13'), ('999900', '8'), ('bbbb00', '1'), ('bbbb00', '3'), ('bbbb00', '17'), ('bbbb00', '12'), ('0000ff', '9'), ('0000ff', '6'), ('999900', '9'), ('999900', '37'), ('999900', '8'), ('999900', '34'), ('999900', '49'), ('999900', '40'), ('bbbb00', '1'), ('bbbb00', '3'), ('bbbb00', '17'), ('bbbb00', '12'), ('0000ff', '9'), ('0000ff', '6'), ('999900', '9'), ('999900', '37'), ('999900', '8'), ('999900', '34'), ('999900', '49'), ('999900', '40'), ('0000ff', '30'), ('0000ff', '26'), ('0000ff', '7'), ('0000ff', '15'), ('ff0000', '3'), ('ff0000', '6'), ('999900', '32'), ('999900', '43'), ('999900', '42'), ('999900', '18'), ('999900', '10'), ('999900', '1'), ('0000ff', '30'), ('0000ff', '26'), ('0000ff', '7'), ('0000ff', '15'), ('ff0000', '3'), ('ff0000', '6'), ('999900', '32'), ('999900', '43'), ('999900', '42'), ('999900', '18'), ('999900', '10'), ('999900', '1'), ('ff0000', '20'), ('ff0000', '17'), ('0000ff', '11'), ('0000ff', '19'), ('bbbb00', '17'), ('bbbb00', '11'), ('999900', '10'), ('999900', '23'), ('999900', '22'), ('999900', '31'), ('999900', '3'), ('999900', '33'), ('ff0000', '20'), ('ff0000', '17'), ('0000ff', '11'), ('0000ff', '19'), ('bbbb00', '17'), ('bbbb00', '11'), ('999900', '10'), ('999900', '23'), ('999900', '22'), ('999900', '31'), ('999900', '3'), ('999900', '33'), ('ff0000', '9'), ('ff0000', '25'), ('0000ff', '14'), ('0000ff', '5'), ('0000ff', '21'), ('0000ff', '19'), ('999900', '29'), ('999900', '10'), ('999900', '21'), ('999900', '8'), ('999900', '37'), ('999900', '38'), ('ff0000', '9'), ('ff0000', '25'), ('0000ff', '14'), ('0000ff', '5'), ('0000ff', '21'), ('0000ff', '19'), ('999900', '29'), ('999900', '10'), ('999900', '21'), ('999900', '8'), ('999900', '37'), ('999900', '38'), ('ff0000', '32'), ('ff0000', '16'), ('0000ff', '16'), ('0000ff', '17'), ('0000ff', '5'), ('0000ff', '8'), ('999900', '5'), ('999900', '48'), ('999900', '1'), ('999900', '40'), ('999900', '53 BFD'), ('999900', '18'), ('ff0000', '32'), ('ff0000', '16'), ('0000ff', '16'), ('0000ff', '17'), ('0000ff', '5'), ('0000ff', '8'), ('999900', '5'), ('999900', '48'), ('999900', '1'), ('999900', '40'), ('999900', '53 BFD'), ('999900', '18'), ('0000ff', '8'), ('0000ff', '6'), ('ff0000', '22'), ('ff0000', '14'), ('bbbb00', '12'), ('bbbb00', '21'), ('999900', '37'), ('999900', '33'), ('999900', '14'), ('999900', '5'), ('999900', '27'), ('999900', '47'), ('0000ff', '8'), ('0000ff', '6'), ('ff0000', '22'), ('ff0000', '14'), ('bbbb00', '12'), ('bbbb00', '21'), ('999900', '37'), ('999900', '33'), ('999900', '14'), ('999900', '5'), ('999900', '27'), ('999900', '47'), ('bbbb00', '2'), ('bbbb00', '28'), ('bbbb00', '13'), ('bbbb00', '9'), ('0000ff', '18'), ('0000ff', '18'), ('999900', '34'), ('999900', '13'), ('999900', '50'), ('999900', '32'), ('999900', '34'), ('999900', '14'), ('bbbb00', '2'), ('bbbb00', '28'), ('bbbb00', '13'), ('bbbb00', '9'), ('0000ff', '18'), ('0000ff', '18'), ('999900', '34'), ('999900', '13'), ('999900', '50'), ('999900', '32'), ('999900', '34'), ('999900', '14'), ('ff0000', '35'), ('ff0000', '5'), ('0000ff', '17'), ('0000ff', '11'), ('ff0000', '7'), ('ff0000', '23'), ('999900', '13'), ('999900', '35'), ('999900', '17'), ('999900', '35 SCP'), ('999900', '26'), ('999900', '36'), ('ff0000', '35'), ('ff0000', '5'), ('0000ff', '17'), ('0000ff', '11'), ('ff0000', '7'), ('ff0000', '23'), ('999900', '13'), ('999900', '35'), ('999900', '17'), ('999900', '35 SCP'), ('999900', '26'), ('999900', '36'), ('ff0000', '19'), ('ff0000', '14'), ('ff0000', '13'), ('ff0000', '21'), ('bbbb00', '21'), ('bbbb00', '6'), ('999900', '44'), ('999900', '28'), ('999900', '6'), ('999900', '16'), ('999900', '28'), ('999900', '45'), ('ff0000', '19'), ('ff0000', '14'), ('ff0000', '13'), ('ff0000', '21'), ('bbbb00', '21'), ('bbbb00', '6'), ('999900', '44'), ('999900', '28'), ('999900', '6'), ('999900', '16'), ('999900', '28'), ('999900', '45'), ('ff0000', '23'), ('ff0000', '24'), ('0000ff', '12'), ('0000ff', '12'), ('0000ff', '8'), ('0000ff', '20'), ('999900', '12'), ('999900', '24'), ('999900', '37'), ('999900', '42'), ('999900', '7'), ('999900', '41'), ('ff0000', '23'), ('ff0000', '24'), ('0000ff', '12'), ('0000ff', '12'), ('0000ff', '8'), ('0000ff', '20'), ('999900', '12'), ('999900', '24'), ('999900', '37'), ('999900', '42'), ('999900', '7'), ('999900', '41'), ('ff0000', '26'), ('ff0000', '12'), ('bbbb00', '11'), ('bbbb00', '10'), ('ff0000', '12'), ('ff0000', '8'), ('999900', '21'), ('999900', '21 SCP'), ('999900', '44'), ('999900', '50'), ('999900', '31'), ('999900', '26'), ('ff0000', '26'), ('ff0000', '12'), ('bbbb00', '11'), ('bbbb00', '10'), ('ff0000', '12'), ('ff0000', '8'), ('999900', '21'), ('999900', '21 SCP'), ('999900', '44'), ('999900', '50'), ('999900', '31'), ('999900', '26'), ('0000ff', '4'), ('0000ff', '15'), ('bbbb00', '7'), ('bbbb00', '7'), ('ff0000', '17'), ('ff0000', '3'), ('999900', '2'), ('999900', '52'), ('999900', '34'), ('999900', '48'), ('999900', '40'), ('999900', '37'), ('0000ff', '4'), ('0000ff', '15'), ('bbbb00', '7'), ('bbbb00', '7'), ('ff0000', '17'), ('ff0000', '3'), ('999900', '2'), ('999900', '52'), ('999900', '34'), ('999900', '48'), ('999900', '40'), ('999900', '37'), ('0000ff', '9'), ('0000ff', '22'), ('ff0000', '11'), ('ff0000', '4'), ('bbbb00', '7'), ('bbbb00', '9'), ('999900', '40'), ('999900', '19'), ('999900', '38'), ('999900', '41'), ('999900', '38'), ('999900', '24'), ('0000ff', '9'), ('0000ff', '22'), ('ff0000', '11'), ('ff0000', '4'), ('bbbb00', '7'), ('bbbb00', '9'), ('999900', '40'), ('999900', '19'), ('999900', '38'), ('999900', '41'), ('999900', '38'), ('999900', '24'), ('0000ff', '36'), ('0000ff', '9'), ('0000ff', '13'), ('0000ff', '21'), ('ff0000', '20'), ('ff0000', '5'), ('999900', '39'), ('999900', '32'), ('999900', '16'), ('999900', '35'), ('999900', '11'), ('999900', '50'), ('0000ff', '36'), ('0000ff', '9'), ('0000ff', '13'), ('0000ff', '21'), ('ff0000', '20'), ('ff0000', '5'), ('999900', '39'), ('999900', '32'), ('999900', '16'), ('999900', '35'), ('999900', '11'), ('999900', '50'), ('ff0000', '29'), ('ff0000', '13'), ('0000ff', '53 BFD'), ('0000ff', '10'), ('0000ff', '12'), ('0000ff', '16'), ('999900', '48'), ('999900', '31'), ('999900', '25'), ('999900', '30'), ('999900', '5'), ('999900', '30'), ('ff0000', '29'), ('ff0000', '13'), ('0000ff', '53 BFD'), ('0000ff', '10'), ('0000ff', '12'), ('0000ff', '16'), ('999900', '48'), ('999900', '31'), ('999900', '25'), ('999900', '30'), ('999900', '5'), ('999900', '30'), ('0000ff', '1'), ('0000ff', '19'), ('0000ff', '6'), ('0000ff', '25'), ('0000ff', '20'), ('0000ff', '35'), ('999900', '45'), ('999900', '12'), ('999900', '11'), ('999900', '28'), ('999900', '47'), ('999900', '35'), ('0000ff', '1'), ('0000ff', '19'), ('0000ff', '6'), ('0000ff', '25'), ('0000ff', '20'), ('0000ff', '35'), ('999900', '45'), ('999900', '12'), ('999900', '11'), ('999900', '28'), ('999900', '47'), ('999900', '35'), ('0000ff', '24'), ('0000ff', '30'), ('bbbb00', '10'), ('bbbb00', '11'), ('ff0000', '18'), ('ff0000', '17'), ('999900', '42'), ('999900', '1'), ('999900', '27'), ('999900', '38'), ('999900', '41'), ('999900', '21'), ('0000ff', '24'), ('0000ff', '30'), ('bbbb00', '10'), ('bbbb00', '11'), ('ff0000', '18'), ('ff0000', '17'), ('999900', '42'), ('999900', '1'), ('999900', '27'), ('999900', '38'), ('999900', '41'), ('999900', '21'), ('0000ff', '34'), ('0000ff', '25'), ('0000ff', '8'), ('0000ff', '20'), ('ff0000', '10'), ('ff0000', '11'), ('999900', '28'), ('999900', '20'), ('999900', '43'), ('999900', '44'), ('999900', '24'), ('999900', '28'), ('0000ff', '34'), ('0000ff', '25'), ('0000ff', '8'), ('0000ff', '20'), ('ff0000', '10'), ('ff0000', '11'), ('999900', '28'), ('999900', '20'), ('999900', '43'), ('999900', '44'), ('999900', '24'), ('999900', '28'), ('bbbb00', '13'), ('bbbb00', '1'), ('ff0000', '5'), ('ff0000', '3'), ('ff0000', '8'), ('ff0000', '2'), ('999900', '53 UFD'), ('999900', '36'), ('999900', '23'), ('999900', '52'), ('999900', '44'), ('999900', '44'), ('bbbb00', '13'), ('bbbb00', '1'), ('ff0000', '5'), ('ff0000', '3'), ('ff0000', '8'), ('ff0000', '2'), ('999900', '53 UFD'), ('999900', '36'), ('999900', '23'), ('999900', '52'), ('999900', '44'), ('999900', '44'), ('0000ff', '28'), ('0000ff', '1'), ('bbbb00', '6'), ('bbbb00', '16'), ('ff0000', '29'), ('ff0000', '10'), ('999900', '41'), ('999900', '25'), ('999900', '40'), ('999900', '39'), ('999900', '25'), ('999900', '29'), ('0000ff', '28'), ('0000ff', '1'), ('bbbb00', '6'), ('bbbb00', '16'), ('ff0000', '29'), ('ff0000', '10'), ('999900', '41'), ('999900', '25'), ('999900', '40'), ('999900', '39'), ('999900', '25'), ('999900', '29'), ('bbbb00', '12'), ('bbbb00', '53 BFD'), ('ff0000', '14'), ('ff0000', '19'), ('0000ff', '4'), ('0000ff', '14'), ('999900', '20'), ('999900', '41'), ('999900', '24'), ('999900', '43'), ('999900', '36'), ('999900', '53'), ('bbbb00', '12'), ('bbbb00', '53 BFD'), ('ff0000', '14'), ('ff0000', '19'), ('0000ff', '4'), ('0000ff', '14'), ('999900', '20'), ('999900', '41'), ('999900', '24'), ('999900', '43'), ('999900', '36'), ('999900', '53'), ('ff0000', '1'), ('ff0000', '11'), ('0000ff', '15'), ('0000ff', '24'), ('ff0000', '26'), ('ff0000', '22'), ('999900', '33'), ('999900', '4'), ('999900', '51'), ('999900', '45'), ('999900', '53 BFD'), ('999900', '25'), ('ff0000', '1'), ('ff0000', '11'), ('0000ff', '15'), ('0000ff', '24'), ('ff0000', '26'), ('ff0000', '22'), ('999900', '33'), ('999900', '4'), ('999900', '51'), ('999900', '45'), ('999900', '53 BFD'), ('999900', '25'), ('0000ff', '11'), ('0000ff', '4'), ('bbbb00', '25'), ('bbbb00', '17'), ('0000ff', '25'), ('0000ff', '27'), ('999900', '43'), ('999900', '16'), ('999900', '26'), ('999900', '46'), ('999900', '23'), ('999900', '42'), ('0000ff', '11'), ('0000ff', '4'), ('bbbb00', '25'), ('bbbb00', '17'), ('0000ff', '25'), ('0000ff', '27'), ('999900', '43'), ('999900', '16'), ('999900', '26'), ('999900', '46'), ('999900', '23'), ('999900', '42'), ('bbbb00', '27'), ('bbbb00', '14'), ('ff0000', '9'), ('ff0000', '17'), ('ff0000', '13'), ('ff0000', '15'), ('999900', '25'), ('999900', '51'), ('999900', '48'), ('999900', '27'), ('999900', '48'), ('999900', '27'), ('bbbb00', '27'), ('bbbb00', '14'), ('ff0000', '9'), ('ff0000', '17'), ('ff0000', '13'), ('ff0000', '15'), ('999900', '25'), ('999900', '51'), ('999900', '48'), ('999900', '27'), ('999900', '48'), ('999900', '27'), ('ff0000', '6'), ('ff0000', '9'), ('0000ff', '27'), ('0000ff', '22'), ('bbbb00', '36'), ('bbbb00', '16'), ('999900', '50'), ('999900', '50'), ('999900', '36'), ('999900', '24'), ('999900', '35'), ('999900', '23'), ('ff0000', '6'), ('ff0000', '9'), ('0000ff', '27'), ('0000ff', '22'), ('bbbb00', '36'), ('bbbb00', '16'), ('999900', '50'), ('999900', '50'), ('999900', '36'), ('999900', '24'), ('999900', '35'), ('999900', '23'), ('ff0000', '30'), ('ff0000', '10'), ('bbbb00', '9'), ('bbbb00', '18'), ('bbbb00', '23'), ('bbbb00', '15'), ('999900', '47'), ('999900', '17'), ('999900', '49'), ('999900', '49'), ('999900', '19'), ('999900', '49'), ('ff0000', '30'), ('ff0000', '10'), ('bbbb00', '9'), ('bbbb00', '18'), ('bbbb00', '23'), ('bbbb00', '15'), ('999900', '47'), ('999900', '17'), ('999900', '49'), ('999900', '49'), ('999900', '19'), ('999900', '49'), ('ff0000', '12'), ('ff0000', '28'), ('bbbb00', '5'), ('bbbb00', '8'), ('bbbb00', '25'), ('bbbb00', '12'), ('999900', '38'), ('999900', '39'), ('999900', '45'), ('999900', '36'), ('999900', '43'), ('999900', '46'), ('ff0000', '12'), ('ff0000', '28'), ('bbbb00', '5'), ('bbbb00', '8'), ('bbbb00', '25'), ('bbbb00', '12'), ('999900', '38'), ('999900', '39'), ('999900', '45'), ('999900', '36'), ('999900', '43'), ('999900', '46'), ('ff0000', '3'), ('ff0000', '53 BFD'), ('ff0000', '17'), ('ff0000', '22'), ('bbbb00', '14'), ('bbbb00', '13'), ('999900', '31'), ('999900', '49'), ('999900', '53 UFD'), ('999900', '51'), ('999900', '46'), ('999900', '22'), ('ff0000', '3'), ('ff0000', '53 BFD'), ('ff0000', '17'), ('ff0000', '22'), ('bbbb00', '14'), ('bbbb00', '13'), ('999900', '31'), ('999900', '49'), ('999900', '53 UFD'), ('999900', '51'), ('999900', '46'), ('999900', '22'), ('bbbb00', '20'), ('bbbb00', '12'), ('bbbb00', '20'), ('bbbb00', '6'), ('0000ff', '27'), ('0000ff', '9'), ('999900', '46'), ('999900', '47'), ('999900', '46'), ('999900', '47'), ('999900', '39'), ('999900', '48'), ('bbbb00', '20'), ('bbbb00', '12'), ('bbbb00', '20'), ('bbbb00', '6'), ('0000ff', '27'), ('0000ff', '9'), ('999900', '46'), ('999900', '47'), ('999900', '46'), ('999900', '47'), ('999900', '39'), ('999900', '48'), ('0000ff', '19'), ('0000ff', '8'), ('ff0000', '27'), ('ff0000', '15'), ('bbbb00', '30'), ('bbbb00', '18'), ('999999', '1'), ('999999', '2'), ('999999', '15'), ('999999', '6'), ('999999', '15'), ('999999', '13'), ('0000ff', '19'), ('0000ff', '8'), ('ff0000', '27'), ('ff0000', '15'), ('bbbb00', '30'), ('bbbb00', '18'), ('999999', '1'), ('999999', '2'), ('999999', '15'), ('999999', '6'), ('999999', '15'), ('999999', '13'), ('bbbb00', '14'), ('bbbb00', '24'), ('0000ff', '23'), ('0000ff', '35'), ('bbbb00', '11'), ('bbbb00', '30'), ('999999', '3'), ('999999', '3'), ('999999', '13'), ('999999', '1'), ('999999', '7'), ('999999', '16'), ('bbbb00', '14'), ('bbbb00', '24'), ('0000ff', '23'), ('0000ff', '35'), ('bbbb00', '11'), ('bbbb00', '30'), ('999999', '3'), ('999999', '3'), ('999999', '13'), ('999999', '1'), ('999999', '7'), ('999999', '16'), ('bbbb00', '17'), ('bbbb00', '53 BFD'), ('bbbb00', '24'), ('bbbb00', '30'), ('0000ff', '19'), ('0000ff', '22'), ('999999', '25'), ('999999', '7'), ('999999', '6'), ('999999', '8'), ('999999', '9'), ('999999', '17'), ('bbbb00', '17'), ('bbbb00', '53 BFD'), ('bbbb00', '24'), ('bbbb00', '30'), ('0000ff', '19'), ('0000ff', '22'), ('999999', '25'), ('999999', '7'), ('999999', '6'), ('999999', '8'), ('999999', '9'), ('999999', '17'), ('0000ff', '44'), ('0000ff', '18'), ('ff0000', '12'), ('ff0000', '53 UFD'), ('ff0000', '6'), ('ff0000', '30'), ('999999', '19'), ('999999', '19'), ('999999', '2'), ('999999', '7'), ('999999', '5'), ('999999', '44'), ('0000ff', '44'), ('0000ff', '18'), ('ff0000', '12'), ('ff0000', '53 UFD'), ('ff0000', '6'), ('ff0000', '30'), ('999999', '19'), ('999999', '19'), ('999999', '2'), ('999999', '7'), ('999999', '5'), ('999999', '44'), ('ff0000', '46'), ('ff0000', '8'), ('ff0000', '32'), ('ff0000', '11'), ('bbbb00', '9'), ('bbbb00', '24'), ('999999', '13'), ('999999', '28'), ('999999', '37'), ('999999', '24'), ('999999', '13'), ('999999', '1'), ('ff0000', '46'), ('ff0000', '8'), ('ff0000', '32'), ('ff0000', '11'), ('bbbb00', '9'), ('bbbb00', '24'), ('999999', '13'), ('999999', '28'), ('999999', '37'), ('999999', '24'), ('999999', '13'), ('999999', '1'), ('ff0000', '7'), ('ff0000', '27'), ('bbbb00', '21'), ('bbbb00', '22'), ('bbbb00', '10'), ('bbbb00', '29'), ('999999', '53 BFD'), ('999999', '33'), ('999999', '1'), ('999999', '37'), ('999999', '11'), ('999999', '3'), ('ff0000', '7'), ('ff0000', '27'), ('bbbb00', '21'), ('bbbb00', '22'), ('bbbb00', '10'), ('bbbb00', '29'), ('999999', '53 BFD'), ('999999', '33'), ('999999', '1'), ('999999', '37'), ('999999', '11'), ('999999', '3'), ('bbbb00', '8'), ('bbbb00', '29'), ('ff0000', '19'), ('ff0000', '32'), ('ff0000', '28'), ('ff0000', '21'), ('999999', '21'), ('999999', '6'), ('999999', '30'), ('999999', '9'), ('999999', '12'), ('999999', '20'), ('bbbb00', '8'), ('bbbb00', '29'), ('ff0000', '19'), ('ff0000', '32'), ('ff0000', '28'), ('ff0000', '21'), ('999999', '21'), ('999999', '6'), ('999999', '30'), ('999999', '9'), ('999999', '12'), ('999999', '20'), ('0000ff', '40'), ('0000ff', '34'), ('0000ff', '53 BFD'), ('0000ff', '2'), ('bbbb00', '18'), ('bbbb00', '32'), ('999999', '6'), ('999999', '30'), ('999999', '7'), ('999999', '14'), ('999999', '20'), ('999999', '11'), ('0000ff', '40'), ('0000ff', '34'), ('0000ff', '53 BFD'), ('0000ff', '2'), ('bbbb00', '18'), ('bbbb00', '32'), ('999999', '6'), ('999999', '30'), ('999999', '7'), ('999999', '14'), ('999999', '20'), ('999999', '11'), ('0000ff', '43'), ('0000ff', '13'), ('ff0000', '21'), ('ff0000', '23'), ('bbbb00', '22'), ('bbbb00', '25'), ('999999', '9'), ('999999', '22'), ('999999', '12'), ('999999', '16'), ('999999', '17'), ('999999', '21'), ('0000ff', '43'), ('0000ff', '13'), ('ff0000', '21'), ('ff0000', '23'), ('bbbb00', '22'), ('bbbb00', '25'), ('999999', '9'), ('999999', '22'), ('999999', '12'), ('999999', '16'), ('999999', '17'), ('999999', '21'), ('0000ff', '7'), ('0000ff', '32'), ('ff0000', '30'), ('ff0000', '18'), ('bbbb00', '13'), ('bbbb00', '33'), ('999999', '16'), ('999999', '10'), ('999999', '5'), ('999999', '42'), ('999999', '53 BFD'), ('999999', '8'), ('0000ff', '7'), ('0000ff', '32'), ('ff0000', '30'), ('ff0000', '18'), ('bbbb00', '13'), ('bbbb00', '33'), ('999999', '16'), ('999999', '10'), ('999999', '5'), ('999999', '42'), ('999999', '53 BFD'), ('999999', '8'), ('bbbb00', '29'), ('bbbb00', '31'), ('0000ff', '31'), ('0000ff', '29'), ('ff0000', '24'), ('ff0000', '19'), ('999999', '36'), ('999999', '4'), ('999999', '25'), ('999999', '29'), ('999999', '1'), ('999999', '4'), ('bbbb00', '29'), ('bbbb00', '31'), ('0000ff', '31'), ('0000ff', '29'), ('ff0000', '24'), ('ff0000', '19'), ('999999', '36'), ('999999', '4'), ('999999', '25'), ('999999', '29'), ('999999', '1'), ('999999', '4'), ('bbbb00', '28'), ('bbbb00', '53 BFD'), ('ff0000', '8'), ('ff0000', '27'), ('ff0000', '19'), ('ff0000', '31'), ('999999', '18'), ('999999', '24'), ('999999', '33'), ('999999', '26'), ('999999', '2'), ('999999', '15'), ('bbbb00', '28'), ('bbbb00', '53 BFD'), ('ff0000', '8'), ('ff0000', '27'), ('ff0000', '19'), ('ff0000', '31'), ('999999', '18'), ('999999', '24'), ('999999', '33'), ('999999', '26'), ('999999', '2'), ('999999', '15'), ('ff0000', '40'), ('ff0000', '19'), ('ff0000', '20'), ('ff0000', '29'), ('0000ff', '35'), ('0000ff', '42'), ('999999', '20'), ('999999', '13'), ('999999', '10'), ('999999', '22'), ('999999', '4'), ('999999', '26'), ('ff0000', '40'), ('ff0000', '19'), ('ff0000', '20'), ('ff0000', '29'), ('0000ff', '35'), ('0000ff', '42'), ('999999', '20'), ('999999', '13'), ('999999', '10'), ('999999', '22'), ('999999', '4'), ('999999', '26'), ('ff0000', '10'), ('ff0000', '23'), ('0000ff', '22'), ('0000ff', '34'), ('bbbb00', '37'), ('bbbb00', '17'), ('999999', '14'), ('999999', '17'), ('999999', '18'), ('999999', '15'), ('999999', '32'), ('999999', '30'), ('ff0000', '10'), ('ff0000', '23'), ('0000ff', '22'), ('0000ff', '34'), ('bbbb00', '37'), ('bbbb00', '17'), ('999999', '14'), ('999999', '17'), ('999999', '18'), ('999999', '15'), ('999999', '32'), ('999999', '30'), ('0000ff', '37'), ('0000ff', '11'), ('bbbb00', '26'), ('bbbb00', '29'), ('ff0000', '36'), ('ff0000', '27'), ('999999', '44'), ('999999', '18'), ('999999', '3'), ('999999', '19'), ('999999', '3'), ('999999', '27'), ('0000ff', '37'), ('0000ff', '11'), ('bbbb00', '26'), ('bbbb00', '29'), ('ff0000', '36'), ('ff0000', '27'), ('999999', '44'), ('999999', '18'), ('999999', '3'), ('999999', '19'), ('999999', '3'), ('999999', '27'), ('ff0000', '24'), ('ff0000', '15'), ('ff0000', '26'), ('ff0000', '34'), ('bbbb00', '19'), ('bbbb00', '31'), ('999999', '22'), ('999999', '39'), ('999999', '35'), ('999999', '2'), ('999999', '14'), ('999999', '12'), ('ff0000', '24'), ('ff0000', '15'), ('ff0000', '26'), ('ff0000', '34'), ('bbbb00', '19'), ('bbbb00', '31'), ('999999', '22'), ('999999', '39'), ('999999', '35'), ('999999', '2'), ('999999', '14'), ('999999', '12'), ('0000ff', '32'), ('0000ff', '31'), ('0000ff', '53 BFD'), ('0000ff', '23'), ('0000ff', '17'), ('0000ff', '21'), ('999999', '31'), ('999999', '8'), ('999999', '4'), ('999999', '27'), ('999999', '22'), ('999999', '18'), ('0000ff', '32'), ('0000ff', '31'), ('0000ff', '53 BFD'), ('0000ff', '23'), ('0000ff', '17'), ('0000ff', '21'), ('999999', '31'), ('999999', '8'), ('999999', '4'), ('999999', '27'), ('999999', '22'), ('999999', '18'), ('bbbb00', '11'), ('bbbb00', '6'), ('ff0000', '23'), ('ff0000', '33'), ('ff0000', '25'), ('ff0000', '20'), ('999999', '2'), ('999999', '43'), ('999999', '34'), ('999999', '36'), ('999999', '8'), ('999999', '40'), ('bbbb00', '11'), ('bbbb00', '6'), ('ff0000', '23'), ('ff0000', '33'), ('ff0000', '25'), ('ff0000', '20'), ('999999', '2'), ('999999', '43'), ('999999', '34'), ('999999', '36'), ('999999', '8'), ('999999', '40'), ('bbbb00', '24'), ('bbbb00', '32'), ('bbbb00', '27'), ('bbbb00', '19'), ('0000ff', '26'), ('0000ff', '30'), ('999999', '29'), ('999999', '15'), ('999999', '19'), ('999999', '12'), ('999999', '33'), ('999999', '5'), ('bbbb00', '24'), ('bbbb00', '32'), ('bbbb00', '27'), ('bbbb00', '19'), ('0000ff', '26'), ('0000ff', '30'), ('999999', '29'), ('999999', '15'), ('999999', '19'), ('999999', '12'), ('999999', '33'), ('999999', '5'), ('0000ff', '29'), ('0000ff', '5'), ('bbbb00', '32'), ('bbbb00', '34'), ('0000ff', '44'), ('0000ff', '32'), ('999999', '5'), ('999999', '1'), ('999999', '11'), ('999999', '32'), ('999999', '26'), ('999999', '38'), ('0000ff', '29'), ('0000ff', '5'), ('bbbb00', '32'), ('bbbb00', '34'), ('0000ff', '44'), ('0000ff', '32'), ('999999', '5'), ('999999', '1'), ('999999', '11'), ('999999', '32'), ('999999', '26'), ('999999', '38'), ('0000ff', '25'), ('0000ff', '38'), ('bbbb00', '28'), ('bbbb00', '24'), ('0000ff', '31'), ('0000ff', '28'), ('999999', '15'), ('999999', '20'), ('999999', '21'), ('999999', '39'), ('999999', '6'), ('999999', '10'), ('0000ff', '25'), ('0000ff', '38'), ('bbbb00', '28'), ('bbbb00', '24'), ('0000ff', '31'), ('0000ff', '28'), ('999999', '15'), ('999999', '20'), ('999999', '21'), ('999999', '39'), ('999999', '6'), ('999999', '10'), ('0000ff', '22'), ('0000ff', '28'), ('bbbb00', '14'), ('bbbb00', '20'), ('ff0000', '11'), ('ff0000', '16'), ('999999', '10'), ('999999', '27'), ('999999', '26'), ('999999', '44'), ('999999', '53 BFD'), ('999999', '19'), ('0000ff', '22'), ('0000ff', '28'), ('bbbb00', '14'), ('bbbb00', '20'), ('ff0000', '11'), ('ff0000', '16'), ('999999', '10'), ('999999', '27'), ('999999', '26'), ('999999', '44'), ('999999', '53 BFD'), ('999999', '19'), ('bbbb00', '34'), ('bbbb00', '20'), ('0000ff', '24'), ('0000ff', '26'), ('ff0000', '27'), ('ff0000', '26'), ('999999', '38'), ('999999', '5'), ('999999', '14'), ('999999', '13'), ('999999', '21'), ('999999', '34'), ('bbbb00', '34'), ('bbbb00', '20'), ('0000ff', '24'), ('0000ff', '26'), ('ff0000', '27'), ('ff0000', '26'), ('999999', '38'), ('999999', '5'), ('999999', '14'), ('999999', '13'), ('999999', '21'), ('999999', '34'), ('ff0000', '36'), ('ff0000', '21'), ('ff0000', '15'), ('ff0000', '13'), ('ff0000', '5'), ('ff0000', '28'), ('999999', '4'), ('999999', '40'), ('999999', '48'), ('999999', '5'), ('999999', '53 RET'), ('999999', '32'), ('ff0000', '36'), ('ff0000', '21'), ('ff0000', '15'), ('ff0000', '13'), ('ff0000', '5'), ('ff0000', '28'), ('999999', '4'), ('999999', '40'), ('999999', '48'), ('999999', '5'), ('999999', '53 RET'), ('999999', '32'), ('ff0000', '11'), ('ff0000', '26'), ('ff0000', '46'), ('ff0000', '16'), ('ff0000', '35'), ('ff0000', '53 UFD'), ('999999', '12'), ('999999', '46'), ('999999', '31'), ('999999', '18'), ('999999', '16'), ('999999', '2'), ('ff0000', '11'), ('ff0000', '26'), ('ff0000', '46'), ('ff0000', '16'), ('ff0000', '35'), ('ff0000', '53 UFD'), ('999999', '12'), ('999999', '46'), ('999999', '31'), ('999999', '18'), ('999999', '16'), ('999999', '2'), ('bbbb00', '41'), ('bbbb00', '8'), ('ff0000', '31'), ('ff0000', '38.6 DPI'), ('0000ff', '24'), ('0000ff', '12'), ('999999', '17'), ('999999', '16'), ('999999', '17'), ('999999', '28'), ('999999', '53 RET'), ('999999', '22'), ('bbbb00', '41'), ('bbbb00', '8'), ('ff0000', '31'), ('ff0000', '38.6 DPI'), ('0000ff', '24'), ('0000ff', '12'), ('999999', '17'), ('999999', '16'), ('999999', '17'), ('999999', '28'), ('999999', '53 RET'), ('999999', '22'), ('ff0000', '31'), ('ff0000', '29'), ('bbbb00', '18'), ('bbbb00', '21'), ('ff0000', '31'), ('ff0000', '25'), ('999999', '27'), ('999999', '25'), ('999999', '16'), ('999999', '33'), ('999999', '23'), ('999999', '7'), ('ff0000', '31'), ('ff0000', '29'), ('bbbb00', '18'), ('bbbb00', '21'), ('ff0000', '31'), ('ff0000', '25'), ('999999', '27'), ('999999', '25'), ('999999', '16'), ('999999', '33'), ('999999', '23'), ('999999', '7'), ('0000ff', '41'), ('0000ff', '43'), ('0000ff', '19'), ('0000ff', '13'), ('ff0000', '14'), ('ff0000', '14'), ('999999', '39'), ('999999', '14'), ('999999', '9'), ('999999', '50'), ('999999', '28'), ('999999', '33'), ('0000ff', '41'), ('0000ff', '43'), ('0000ff', '19'), ('0000ff', '13'), ('ff0000', '14'), ('ff0000', '14'), ('999999', '39'), ('999999', '14'), ('999999', '9'), ('999999', '50'), ('999999', '28'), ('999999', '33'), ('0000ff', '20'), ('0000ff', '35'), ('bbbb00', '16'), ('bbbb00', '25'), ('0000ff', '22'), ('0000ff', '29'), ('999999', '11'), ('999999', '12'), ('999999', '38'), ('999999', '17'), ('999999', '53 BFD'), ('999999', '35'), ('0000ff', '20'), ('0000ff', '35'), ('bbbb00', '16'), ('bbbb00', '25'), ('0000ff', '22'), ('0000ff', '29'), ('999999', '11'), ('999999', '12'), ('999999', '38'), ('999999', '17'), ('999999', '53 BFD'), ('999999', '35'), ('0000ff', '26'), ('0000ff', '29'), ('0000ff', '53 BFD'), ('0000ff', '14'), ('bbbb00', '24'), ('bbbb00', '20'), ('999999', '33'), ('999999', '35'), ('999999', '8'), ('999999', '35'), ('999999', '19'), ('999999', '24'), ('0000ff', '26'), ('0000ff', '29'), ('0000ff', '53 BFD'), ('0000ff', '14'), ('bbbb00', '24'), ('bbbb00', '20'), ('999999', '33'), ('999999', '35'), ('999999', '8'), ('999999', '35'), ('999999', '19'), ('999999', '24'), ('0000ff', '38'), ('0000ff', '27'), ('ff0000', '18'), ('ff0000', '26'), ('bbbb00', '32'), ('bbbb00', '22'), ('999999', '32'), ('999999', '21'), ('999999', '24'), ('999999', '23'), ('999999', '10'), ('999999', '39'), ('0000ff', '38'), ('0000ff', '27'), ('ff0000', '18'), ('ff0000', '26'), ('bbbb00', '32'), ('bbbb00', '22'), ('999999', '32'), ('999999', '21'), ('999999', '24'), ('999999', '23'), ('999999', '10'), ('999999', '39'), ('0000ff', '33'), ('0000ff', '23'), ('bbbb00', '29'), ('bbbb00', '38'), ('bbbb00', '28'), ('bbbb00', '26'), ('999999', '28'), ('999999', '26'), ('999999', '29'), ('999999', '4'), ('999999', '25'), ('999999', '31'), ('0000ff', '33'), ('0000ff', '23'), ('bbbb00', '29'), ('bbbb00', '38'), ('bbbb00', '28'), ('bbbb00', '26'), ('999999', '28'), ('999999', '26'), ('999999', '29'), ('999999', '4'), ('999999', '25'), ('999999', '31'), ('bbbb00', '23'), ('bbbb00', '21'), ('bbbb00', '19'), ('bbbb00', '28'), ('bbbb00', '26'), ('bbbb00', '14'), ('999999', '53 BFD'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '11'), ('999999', '18'), ('999999', '43'), ('bbbb00', '23'), ('bbbb00', '21'), ('bbbb00', '19'), ('bbbb00', '28'), ('bbbb00', '26'), ('bbbb00', '14'), ('999999', '53 BFD'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '11'), ('999999', '18'), ('999999', '43'), ('bbbb00', '16'), ('bbbb00', '53 BFD'), ('ff0000', '25'), ('ff0000', '40'), ('ff0000', '21'), ('ff0000', '24'), ('999999', '8'), ('999999', '37'), ('999999', '46'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '6'), ('bbbb00', '16'), ('bbbb00', '53 BFD'), ('ff0000', '25'), ('ff0000', '40'), ('ff0000', '21'), ('ff0000', '24'), ('999999', '8'), ('999999', '37'), ('999999', '46'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '6'), ('ff0000', '27'), ('ff0000', '20'), ('ff0000', '33'), ('ff0000', '24'), ('bbbb00', '16'), ('bbbb00', '23'), ('999999', '26'), ('999999', '32'), ('999999', '41'), ('999999', '46'), ('999999', '30'), ('999999', '29'), ('ff0000', '27'), ('ff0000', '20'), ('ff0000', '33'), ('ff0000', '24'), ('bbbb00', '16'), ('bbbb00', '23'), ('999999', '26'), ('999999', '32'), ('999999', '41'), ('999999', '46'), ('999999', '30'), ('999999', '29'), ('bbbb00', '3'), ('bbbb00', '18'), ('ff0000', '43'), ('ff0000', '37.6 DPI'), ('0000ff', '40'), ('0000ff', '31'), ('999999', '37'), ('999999', '11'), ('999999', '28'), ('999999', '41'), ('999999', '29'), ('999999', '36'), ('bbbb00', '3'), ('bbbb00', '18'), ('ff0000', '43'), ('ff0000', '37.6 DPI'), ('0000ff', '40'), ('0000ff', '31'), ('999999', '37'), ('999999', '11'), ('999999', '28'), ('999999', '41'), ('999999', '29'), ('999999', '36'), ('0000ff', '18'), ('0000ff', '14'), ('bbbb00', '30'), ('bbbb00', '32'), ('ff0000', '40'), ('ff0000', '38'), ('999999', '7'), ('999999', '34'), ('999999', '22'), ('999999', '40'), ('999999', '37'), ('999999', '50'), ('0000ff', '18'), ('0000ff', '14'), ('bbbb00', '30'), ('bbbb00', '32'), ('ff0000', '40'), ('ff0000', '38'), ('999999', '7'), ('999999', '34'), ('999999', '22'), ('999999', '40'), ('999999', '37'), ('999999', '50'), ('ff0000', '47'), ('ff0000', '53 BFD'), ('ff0000', '28'), ('ff0000', '28'), ('0000ff', '29'), ('0000ff', '13'), ('999999', '24'), ('999999', '41'), ('999999', '47'), ('999999', '30'), ('999999', '40'), ('999999', '9'), ('ff0000', '47'), ('ff0000', '53 BFD'), ('ff0000', '28'), ('ff0000', '28'), ('0000ff', '29'), ('0000ff', '13'), ('999999', '24'), ('999999', '41'), ('999999', '47'), ('999999', '30'), ('999999', '40'), ('999999', '9'), ('bbbb00', '35'), ('bbbb00', '15'), ('0000ff', '20'), ('0000ff', '18'), ('0000ff', '36'), ('0000ff', '25'), ('999999', '53 RET'), ('999999', '45'), ('999999', '42'), ('999999', '3'), ('999999', '53 BFD'), ('999999', '41'), ('bbbb00', '35'), ('bbbb00', '15'), ('0000ff', '20'), ('0000ff', '18'), ('0000ff', '36'), ('0000ff', '25'), ('999999', '53 RET'), ('999999', '45'), ('999999', '42'), ('999999', '3'), ('999999', '53 BFD'), ('999999', '41'), ('ff0000', '51'), ('ff0000', '22'), ('0000ff', '25'), ('0000ff', '30'), ('ff0000', '16'), ('ff0000', '12'), ('999999', '43'), ('999999', '44'), ('999999', '32'), ('999999', '43'), ('999999', '36'), ('999999', '46'), ('ff0000', '51'), ('ff0000', '22'), ('0000ff', '25'), ('0000ff', '30'), ('ff0000', '16'), ('ff0000', '12'), ('999999', '43'), ('999999', '44'), ('999999', '32'), ('999999', '43'), ('999999', '36'), ('999999', '46'), ('bbbb00', '45'), ('bbbb00', '23'), ('ff0000', '24'), ('ff0000', '30'), ('ff0000', '30'), ('ff0000', '18'), ('999999', '41'), ('999999', '38'), ('999999', '39'), ('999999', '47'), ('999999', '53 BFD'), ('999999', '14'), ('bbbb00', '45'), ('bbbb00', '23'), ('ff0000', '24'), ('ff0000', '30'), ('ff0000', '30'), ('ff0000', '18'), ('999999', '41'), ('999999', '38'), ('999999', '39'), ('999999', '47'), ('999999', '53 BFD'), ('999999', '14'), ('0000ff', '15'), ('0000ff', '53 BFD'), ('0000ff', '35'), ('0000ff', '27'), ('0000ff', '34'), ('0000ff', '37'), ('999999', '23'), ('999999', '53 BFD'), ('999999', '27'), ('999999', '31'), ('999999', '53 BFD'), ('999999', '25'), ('0000ff', '15'), ('0000ff', '53 BFD'), ('0000ff', '35'), ('0000ff', '27'), ('0000ff', '34'), ('0000ff', '37'), ('999999', '23'), ('999999', '53 BFD'), ('999999', '27'), ('999999', '31'), ('999999', '53 BFD'), ('999999', '25'), ('0000ff', '21'), ('0000ff', '10'), ('0000ff', '26'), ('0000ff', '28'), ('0000ff', '38'), ('0000ff', '40'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '49'), ('999999', '25'), ('999999', '35'), ('999999', '49'), ('0000ff', '21'), ('0000ff', '10'), ('0000ff', '26'), ('0000ff', '28'), ('0000ff', '38'), ('0000ff', '40'), ('999999', '34'), ('999999', '53 BFD'), ('999999', '49'), ('999999', '25'), ('999999', '35'), ('999999', '49'), ('0000ff', '23'), ('0000ff', '24'), ('bbbb00', '23'), ('bbbb00', '39'), ('ff0000', '37'), ('ff0000', '40'), ('999999', '40'), ('999999', '53 BFD'), ('999999', '36'), ('999999', '21'), ('999999', '24'), ('999999', '48'), ('0000ff', '23'), ('0000ff', '24'), ('bbbb00', '23'), ('bbbb00', '39'), ('ff0000', '37'), ('ff0000', '40'), ('999999', '40'), ('999999', '53 BFD'), ('999999', '36'), ('999999', '21'), ('999999', '24'), ('999999', '48'), ('bbbb00', '19'), ('bbbb00', '25'), ('bbbb00', '36'), ('bbbb00', '37'), ('0000ff', '30'), ('0000ff', '44'), ('999999', '30'), ('999999', '31'), ('999999', '40'), ('999999', '45'), ('999999', '31'), ('999999', '37'), ('bbbb00', '19'), ('bbbb00', '25'), ('bbbb00', '36'), ('bbbb00', '37'), ('0000ff', '30'), ('0000ff', '44'), ('999999', '30'), ('999999', '31'), ('999999', '40'), ('999999', '45'), ('999999', '31'), ('999999', '37'), ('bbbb00', '30'), ('bbbb00', '11'), ('0000ff', '53 BFD'), ('0000ff', '36'), ('bbbb00', '31'), ('bbbb00', '39'), ('999999', '53 BFD'), ('999999', '29'), ('999999', '53 RET'), ('999999', '20'), ('999999', '27'), ('999999', '42'), ('bbbb00', '30'), ('bbbb00', '11'), ('0000ff', '53 BFD'), ('0000ff', '36'), ('bbbb00', '31'), ('bbbb00', '39'), ('999999', '53 BFD'), ('999999', '29'), ('999999', '53 RET'), ('999999', '20'), ('999999', '27'), ('999999', '42'), ('ff0000', '21'), ('ff0000', '53 BFD'), ('bbbb00', '35'), ('bbbb00', '23'), ('bbbb00', '42'), ('bbbb00', '27'), ('999999', '42'), ('999999', '36'), ('999999', '45'), ('999999', '10'), ('999999', '39'), ('999999', '47'), ('ff0000', '21'), ('ff0000', '53 BFD'), ('bbbb00', '35'), ('bbbb00', '23'), ('bbbb00', '42'), ('bbbb00', '27'), ('999999', '42'), ('999999', '36'), ('999999', '45'), ('999999', '10'), ('999999', '39'), ('999999', '47'), ('bbbb00', '18'), ('bbbb00', '19'), ('bbbb00', '34'), ('bbbb00', '33'), ('bbbb00', '29'), ('bbbb00', '37'), ('999999', '53 RET'), ('999999', '53 BFD'), ('999999', '20'), ('999999', '49'), ('999999', '38'), ('999999', '28'), ('bbbb00', '18'), ('bbbb00', '19'), ('bbbb00', '34'), ('bbbb00', '33'), ('bbbb00', '29'), ('bbbb00', '37'), ('999999', '53 RET'), ('999999', '53 BFD'), ('999999', '20'), ('999999', '49'), ('999999', '38'), ('999999', '28'), ('bbbb00', '48'), ('bbbb00', '37'), ('ff0000', '40'), ('ff0000', '12'), ('bbbb00', '20'), ('bbbb00', '35'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '43'), ('999999', '48'), ('999999', '53 BFD'), ('999999', '23'), ('bbbb00', '48'), ('bbbb00', '37'), ('ff0000', '40'), ('ff0000', '12'), ('bbbb00', '20'), ('bbbb00', '35'), ('999999', '53 BFD'), ('999999', '23'), ('999999', '43'), ('999999', '48'), ('999999', '53 BFD'), ('999999', '23'), ('bbbb00', '26'), ('bbbb00', '27'), ('ff0000', '38'), ('ff0000', '42'), ('bbbb00', '27'), ('bbbb00', '28'), ('999999', '53 BFD'), ('999999', '9'), ('999999', '44'), ('999999', '51'), ('999999', '34'), ('999999', '51'), ('bbbb00', '26'), ('bbbb00', '27'), ('ff0000', '38'), ('ff0000', '42'), ('bbbb00', '27'), ('bbbb00', '28'), ('999999', '53 BFD'), ('999999', '9'), ('999999', '44'), ('999999', '51'), ('999999', '34'), ('999999', '51'), ('0000ff', '17'), ('0000ff', '33'), ('bbbb00', '41'), ('bbbb00', '13'), ('0000ff', '10'), ('0000ff', '15'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53'), ('0000ff', '17'), ('0000ff', '33'), ('bbbb00', '41'), ('bbbb00', '13'), ('0000ff', '10'), ('0000ff', '15'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53 DNC'), ('999999', '53'), ('bbbb00', '31'), ('bbbb00', '35'), ('bbbb00', '33'), ('bbbb00', '26'), ('0000ff', '28'), ('0000ff', '23'), ('999999', '35'), ('999999', '42'), ('999999', '53 UFD'), ('999999', '38'), ('999999', '53 RET'), ('999999', '45'), ('bbbb00', '31'), ('bbbb00', '35'), ('bbbb00', '33'), ('bbbb00', '26'), ('0000ff', '28'), ('0000ff', '23'), ('999999', '35'), ('999999', '42'), ('999999', '53 UFD'), ('999999', '38'), ('999999', '53 RET'), ('999999', '45'), ('ff0000', '33'), ('ff0000', '53 BFD'), ('bbbb00', '22'), ('bbbb00', '31'), ('ff0000', '34'), ('ff0000', '29'), ('8b4513', '9'), ('8b4513', '21'), ('8b4513', '3'), ('8b4513', '9'), ('8b4513', '1'), ('8b4513', '2'), ('ff0000', '33'), ('ff0000', '53 BFD'), ('bbbb00', '22'), ('bbbb00', '31'), ('ff0000', '34'), ('ff0000', '29'), ('8b4513', '9'), ('8b4513', '21'), ('8b4513', '3'), ('8b4513', '9'), ('8b4513', '1'), ('8b4513', '2'), ('bbbb00', '38'), ('bbbb00', '9'), ('ff0000', '42'), ('ff0000', '37'), ('bbbb00', '38'), ('bbbb00', '41'), ('8b4513', '7'), ('8b4513', '28'), ('8b4513', '1'), ('8b4513', '3'), ('8b4513', '2'), ('8b4513', '8'), ('bbbb00', '38'), ('bbbb00', '9'), ('ff0000', '42'), ('ff0000', '37'), ('bbbb00', '38'), ('bbbb00', '41'), ('8b4513', '7'), ('8b4513', '28'), ('8b4513', '1'), ('8b4513', '3'), ('8b4513', '2'), ('8b4513', '8'), ('ff0000', '16'), ('ff0000', '53 BFD'), ('0000ff', '34'), ('0000ff', '31'), ('ff0000', '32'), ('ff0000', '42'), ('8b4513', '8'), ('8b4513', '6'), ('8b4513', '4'), ('8b4513', '25'), ('8b4513', '12'), ('8b4513', '16'), ('ff0000', '16'), ('ff0000', '53 BFD'), ('0000ff', '34'), ('0000ff', '31'), ('ff0000', '32'), ('ff0000', '42'), ('8b4513', '8'), ('8b4513', '6'), ('8b4513', '4'), ('8b4513', '25'), ('8b4513', '12'), ('8b4513', '16'), ('ff0000', '22'), ('ff0000', '53 BFD'), ('ff0000', '36'), ('ff0000', '31'), ('ff0000', '33'), ('ff0000', '32'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '2'), ('8b4513', '26'), ('8b4513', '16'), ('8b4513', '7'), ('ff0000', '22'), ('ff0000', '53 BFD'), ('ff0000', '36'), ('ff0000', '31'), ('ff0000', '33'), ('ff0000', '32'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '2'), ('8b4513', '26'), ('8b4513', '16'), ('8b4513', '7'), ('bbbb00', '36'), ('bbbb00', '53 BFD'), ('0000ff', '18'), ('0000ff', '32'), ('ff0000', '39'), ('ff0000', '34'), ('8b4513', '3'), ('8b4513', '49'), ('8b4513', '26'), ('8b4513', '6'), ('8b4513', '6'), ('8b4513', '1'), ('bbbb00', '36'), ('bbbb00', '53 BFD'), ('0000ff', '18'), ('0000ff', '32'), ('ff0000', '39'), ('ff0000', '34'), ('8b4513', '3'), ('8b4513', '49'), ('8b4513', '26'), ('8b4513', '6'), ('8b4513', '6'), ('8b4513', '1'), ('bbbb00', '25'), ('bbbb00', '34'), ('bbbb00', '31'), ('bbbb00', '27'), ('0000ff', '33'), ('0000ff', '33'), ('8b4513', '53 UFD'), ('8b4513', '3'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '19'), ('8b4513', '18'), ('bbbb00', '25'), ('bbbb00', '34'), ('bbbb00', '31'), ('bbbb00', '27'), ('0000ff', '33'), ('0000ff', '33'), ('8b4513', '53 UFD'), ('8b4513', '3'), ('8b4513', '17'), ('8b4513', '4'), ('8b4513', '19'), ('8b4513', '18'), ('ff0000', '25'), ('ff0000', '53 DSQ'), ('0000ff', '29'), ('0000ff', '42'), ('0000ff', '32'), ('0000ff', '24'), ('8b4513', '10'), ('8b4513', '14'), ('8b4513', '11'), ('8b4513', '17'), ('8b4513', '9'), ('8b4513', '53'), ('ff0000', '25'), ('ff0000', '53 DSQ'), ('0000ff', '29'), ('0000ff', '42'), ('0000ff', '32'), ('0000ff', '24'), ('8b4513', '10'), ('8b4513', '14'), ('8b4513', '11'), ('8b4513', '17'), ('8b4513', '9'), ('8b4513', '53'), ('0000ff', '13'), ('0000ff', '37'), ('ff0000', '29'), ('ff0000', '41'), ('bbbb00', '43'), ('bbbb00', '34'), ('8b4513', '1'), ('8b4513', '33'), ('8b4513', '7'), ('8b4513', '21'), ('8b4513', '28'), ('8b4513', '11'), ('0000ff', '13'), ('0000ff', '37'), ('ff0000', '29'), ('ff0000', '41'), ('bbbb00', '43'), ('bbbb00', '34'), ('8b4513', '1'), ('8b4513', '33'), ('8b4513', '7'), ('8b4513', '21'), ('8b4513', '28'), ('8b4513', '11'), ('bbbb00', '22'), ('bbbb00', '39'), ('ff0000', '41'), ('ff0000', '43'), ('0000ff', '23'), ('0000ff', '38'), ('8b4513', '11'), ('8b4513', '11'), ('8b4513', '21'), ('8b4513', '36'), ('8b4513', '3'), ('8b4513', '12'), ('bbbb00', '22'), ('bbbb00', '39'), ('ff0000', '41'), ('ff0000', '43'), ('0000ff', '23'), ('0000ff', '38'), ('8b4513', '11'), ('8b4513', '11'), ('8b4513', '21'), ('8b4513', '36'), ('8b4513', '3'), ('8b4513', '12'), ('ff0000', '38'), ('ff0000', '53 BFD'), ('ff0000', '37'), ('ff0000', '25'), ('bbbb00', '34'), ('bbbb00', '19'), ('8b4513', '24'), ('8b4513', '22'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '14'), ('8b4513', '6'), ('ff0000', '38'), ('ff0000', '53 BFD'), ('ff0000', '37'), ('ff0000', '25'), ('bbbb00', '34'), ('bbbb00', '19'), ('8b4513', '24'), ('8b4513', '22'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '14'), ('8b4513', '6'), ('bbbb00', '32'), ('bbbb00', '53 BFD'), ('ff0000', '39'), ('ff0000', '20'), ('bbbb00', '45'), ('bbbb00', '45'), ('8b4513', '15'), ('8b4513', '1'), ('8b4513', '10'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '15'), ('bbbb00', '32'), ('bbbb00', '53 BFD'), ('ff0000', '39'), ('ff0000', '20'), ('bbbb00', '45'), ('bbbb00', '45'), ('8b4513', '15'), ('8b4513', '1'), ('8b4513', '10'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '15'), ('bbbb00', '44'), ('bbbb00', '22'), ('0000ff', '37'), ('0000ff', '45'), ('ff0000', '41'), ('ff0000', '41'), ('8b4513', '6'), ('8b4513', '20'), ('8b4513', '53 RET'), ('8b4513', '2'), ('8b4513', '5'), ('8b4513', '14'), ('bbbb00', '44'), ('bbbb00', '22'), ('0000ff', '37'), ('0000ff', '45'), ('ff0000', '41'), ('ff0000', '41'), ('8b4513', '6'), ('8b4513', '20'), ('8b4513', '53 RET'), ('8b4513', '2'), ('8b4513', '5'), ('8b4513', '14'), ('0000ff', '45'), ('0000ff', '40'), ('0000ff', '30'), ('0000ff', '37'), ('0000ff', '16'), ('0000ff', '26'), ('8b4513', '28'), ('8b4513', '25'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '22'), ('8b4513', '22'), ('0000ff', '45'), ('0000ff', '40'), ('0000ff', '30'), ('0000ff', '37'), ('0000ff', '16'), ('0000ff', '26'), ('8b4513', '28'), ('8b4513', '25'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '22'), ('8b4513', '22'), ('0000ff', '50'), ('0000ff', '36'), ('bbbb00', '38'), ('bbbb00', '41'), ('bbbb00', '41'), ('bbbb00', '44'), ('8b4513', '27'), ('8b4513', '9'), ('8b4513', '28'), ('8b4513', '10'), ('8b4513', '7'), ('8b4513', '3'), ('0000ff', '50'), ('0000ff', '36'), ('bbbb00', '38'), ('bbbb00', '41'), ('bbbb00', '41'), ('bbbb00', '44'), ('8b4513', '27'), ('8b4513', '9'), ('8b4513', '28'), ('8b4513', '10'), ('8b4513', '7'), ('8b4513', '3'), ('ff0000', '42'), ('ff0000', '53 BFD'), ('0000ff', '39'), ('0000ff', '47'), ('0000ff', '39'), ('0000ff', '41'), ('8b4513', '5'), ('8b4513', '18'), ('8b4513', '8'), ('8b4513', '22'), ('8b4513', '13'), ('8b4513', '13'), ('ff0000', '42'), ('ff0000', '53 BFD'), ('0000ff', '39'), ('0000ff', '47'), ('0000ff', '39'), ('0000ff', '41'), ('8b4513', '5'), ('8b4513', '18'), ('8b4513', '8'), ('8b4513', '22'), ('8b4513', '13'), ('8b4513', '13'), ('0000ff', '31'), ('0000ff', '39'), ('0000ff', '28'), ('0000ff', '43'), ('0000ff', '37'), ('0000ff', '36'), ('8b4513', '32'), ('8b4513', '7'), ('8b4513', '23'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '5'), ('0000ff', '31'), ('0000ff', '39'), ('0000ff', '28'), ('0000ff', '43'), ('0000ff', '37'), ('0000ff', '36'), ('8b4513', '32'), ('8b4513', '7'), ('8b4513', '23'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '5'), ('bbbb00', '42'), ('bbbb00', '30'), ('ff0000', '34'), ('ff0000', '39'), ('bbbb00', '33'), ('bbbb00', '43'), ('8b4513', '4'), ('8b4513', '24'), ('8b4513', '27'), ('8b4513', '34'), ('8b4513', '8'), ('8b4513', '9'), ('bbbb00', '42'), ('bbbb00', '30'), ('ff0000', '34'), ('ff0000', '39'), ('bbbb00', '33'), ('bbbb00', '43'), ('8b4513', '4'), ('8b4513', '24'), ('8b4513', '27'), ('8b4513', '34'), ('8b4513', '8'), ('8b4513', '9'), ('ff0000', '28'), ('ff0000', '53 BFD'), ('ff0000', '35'), ('ff0000', '44'), ('bbbb00', '35'), ('bbbb00', '38'), ('8b4513', '26'), ('8b4513', '38'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '10'), ('8b4513', '4'), ('ff0000', '28'), ('ff0000', '53 BFD'), ('ff0000', '35'), ('ff0000', '44'), ('bbbb00', '35'), ('bbbb00', '38'), ('8b4513', '26'), ('8b4513', '38'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '10'), ('8b4513', '4'), ('bbbb00', '46'), ('bbbb00', '17'), ('bbbb00', '37'), ('bbbb00', '35'), ('0000ff', '42'), ('0000ff', '34'), ('8b4513', '13'), ('8b4513', '23'), ('8b4513', '53 DSQ'), ('8b4513', '15'), ('8b4513', '26'), ('8b4513', '10'), ('bbbb00', '46'), ('bbbb00', '17'), ('bbbb00', '37'), ('bbbb00', '35'), ('0000ff', '42'), ('0000ff', '34'), ('8b4513', '13'), ('8b4513', '23'), ('8b4513', '53 DSQ'), ('8b4513', '15'), ('8b4513', '26'), ('8b4513', '10'), ('ff0000', '4'), ('ff0000', '30'), ('0000ff', '40'), ('0000ff', '39'), ('0000ff', '45'), ('0000ff', '46'), ('8b4513', '12'), ('8b4513', '43'), ('8b4513', '15'), ('8b4513', '12'), ('8b4513', '32'), ('8b4513', '26'), ('ff0000', '4'), ('ff0000', '30'), ('0000ff', '40'), ('0000ff', '39'), ('0000ff', '45'), ('0000ff', '46'), ('8b4513', '12'), ('8b4513', '43'), ('8b4513', '15'), ('8b4513', '12'), ('8b4513', '32'), ('8b4513', '26'), ('0000ff', '39'), ('0000ff', '42'), ('0000ff', '33'), ('0000ff', '38'), ('ff0000', '42'), ('ff0000', '37'), ('8b4513', '23'), ('8b4513', '37'), ('8b4513', '22'), ('8b4513', '1'), ('8b4513', '4'), ('8b4513', '23'), ('0000ff', '39'), ('0000ff', '42'), ('0000ff', '33'), ('0000ff', '38'), ('ff0000', '42'), ('ff0000', '37'), ('8b4513', '23'), ('8b4513', '37'), ('8b4513', '22'), ('8b4513', '1'), ('8b4513', '4'), ('8b4513', '23'), ('bbbb00', '33'), ('bbbb00', '33'), ('bbbb00', '39'), ('bbbb00', '36'), ('ff0000', '44'), ('ff0000', '33'), ('8b4513', '19'), ('8b4513', '8'), ('8b4513', '33'), ('8b4513', '27'), ('8b4513', '18'), ('8b4513', '17'), ('bbbb00', '33'), ('bbbb00', '33'), ('bbbb00', '39'), ('bbbb00', '36'), ('ff0000', '44'), ('ff0000', '33'), ('8b4513', '19'), ('8b4513', '8'), ('8b4513', '33'), ('8b4513', '27'), ('8b4513', '18'), ('8b4513', '17'), ('ff0000', '37'), ('ff0000', '53 BFD'), ('0000ff', '32'), ('0000ff', '46'), ('bbbb00', '40'), ('bbbb00', '42'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '9'), ('8b4513', '7'), ('8b4513', '33'), ('8b4513', '25'), ('ff0000', '37'), ('ff0000', '53 BFD'), ('0000ff', '32'), ('0000ff', '46'), ('bbbb00', '40'), ('bbbb00', '42'), ('8b4513', '16'), ('8b4513', '19'), ('8b4513', '9'), ('8b4513', '7'), ('8b4513', '33'), ('8b4513', '25'), ('bbbb00', '10'), ('bbbb00', '53 BFD'), ('0000ff', '42'), ('0000ff', '51.6 DPI'), ('bbbb00', '48'), ('bbbb00', '46'), ('8b4513', '18'), ('8b4513', '30'), ('8b4513', '19'), ('8b4513', '5'), ('8b4513', '29'), ('8b4513', '19'), ('bbbb00', '10'), ('bbbb00', '53 BFD'), ('0000ff', '42'), ('0000ff', '51.6 DPI'), ('bbbb00', '48'), ('bbbb00', '46'), ('8b4513', '18'), ('8b4513', '30'), ('8b4513', '19'), ('8b4513', '5'), ('8b4513', '29'), ('8b4513', '19'), ('0000ff', '35'), ('0000ff', '53 BFD'), ('ff0000', '45'), ('ff0000', '38'), ('ff0000', '38'), ('ff0000', '39'), ('8b4513', '20'), ('8b4513', '5'), ('8b4513', '20'), ('8b4513', '11'), ('8b4513', '15'), ('8b4513', '53'), ('0000ff', '35'), ('0000ff', '53 BFD'), ('ff0000', '45'), ('ff0000', '38'), ('ff0000', '38'), ('ff0000', '39'), ('8b4513', '20'), ('8b4513', '5'), ('8b4513', '20'), ('8b4513', '11'), ('8b4513', '15'), ('8b4513', '53'), ('bbbb00', '43'), ('bbbb00', '36'), ('0000ff', '21'), ('0000ff', '40'), ('ff0000', '22'), ('ff0000', '36'), ('8b4513', '25'), ('8b4513', '10'), ('8b4513', '31'), ('8b4513', '38'), ('8b4513', '27'), ('8b4513', '24'), ('bbbb00', '43'), ('bbbb00', '36'), ('0000ff', '21'), ('0000ff', '40'), ('ff0000', '22'), ('ff0000', '36'), ('8b4513', '25'), ('8b4513', '10'), ('8b4513', '31'), ('8b4513', '38'), ('8b4513', '27'), ('8b4513', '24'), ('ff0000', '44'), ('ff0000', '53 BFD'), ('bbbb00', '44'), ('bbbb00', '44'), ('bbbb00', '47'), ('bbbb00', '40'), ('8b4513', '14'), ('8b4513', '13'), ('8b4513', '6'), ('8b4513', '8'), ('8b4513', '30'), ('8b4513', '29'), ('ff0000', '44'), ('ff0000', '53 BFD'), ('bbbb00', '44'), ('bbbb00', '44'), ('bbbb00', '47'), ('bbbb00', '40'), ('8b4513', '14'), ('8b4513', '13'), ('8b4513', '6'), ('8b4513', '8'), ('8b4513', '30'), ('8b4513', '29'), ('ff0000', '43'), ('ff0000', '31'), ('bbbb00', '45'), ('bbbb00', '43'), ('0000ff', '48'), ('0000ff', '45'), ('8b4513', '2'), ('8b4513', '12'), ('8b4513', '34'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '30'), ('ff0000', '43'), ('ff0000', '31'), ('bbbb00', '45'), ('bbbb00', '43'), ('0000ff', '48'), ('0000ff', '45'), ('8b4513', '2'), ('8b4513', '12'), ('8b4513', '34'), ('8b4513', '13'), ('8b4513', '20'), ('8b4513', '30'), ('bbbb00', '15'), ('bbbb00', '26'), ('ff0000', '53 DNF'), ('ff0000', '45'), ('0000ff', '43'), ('0000ff', '39'), ('8b4513', '22'), ('8b4513', '45'), ('8b4513', '32'), ('8b4513', '28'), ('8b4513', '11'), ('8b4513', '20'), ('bbbb00', '15'), ('bbbb00', '26'), ('ff0000', '53 DNF'), ('ff0000', '45'), ('0000ff', '43'), ('0000ff', '39'), ('8b4513', '22'), ('8b4513', '45'), ('8b4513', '32'), ('8b4513', '28'), ('8b4513', '11'), ('8b4513', '20'), ('0000ff', '10'), ('0000ff', '41'), ('0000ff', '38'), ('0000ff', '44'), ('bbbb00', '39'), ('bbbb00', '36'), ('8b4513', '53 RET'), ('8b4513', '32'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '21'), ('8b4513', '28'), ('0000ff', '10'), ('0000ff', '41'), ('0000ff', '38'), ('0000ff', '44'), ('bbbb00', '39'), ('bbbb00', '36'), ('8b4513', '53 RET'), ('8b4513', '32'), ('8b4513', '18'), ('8b4513', '24'), ('8b4513', '21'), ('8b4513', '28'), ('ff0000', '41'), ('ff0000', '37'), ('bbbb00', '42'), ('bbbb00', '40'), ('ff0000', '45'), ('ff0000', '43'), ('8b4513', '36'), ('8b4513', '31'), ('8b4513', '24'), ('8b4513', '23'), ('8b4513', '23'), ('8b4513', '27'), ('ff0000', '41'), ('ff0000', '37'), ('bbbb00', '42'), ('bbbb00', '40'), ('ff0000', '45'), ('ff0000', '43'), ('8b4513', '36'), ('8b4513', '31'), ('8b4513', '24'), ('8b4513', '23'), ('8b4513', '23'), ('8b4513', '27'), ('ff0000', '39'), ('ff0000', '53 BFD'), ('bbbb00', '40'), ('bbbb00', '45'), ('bbbb00', '46'), ('bbbb00', '47'), ('8b4513', '31'), ('8b4513', '2'), ('8b4513', '53 RET'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '33'), ('ff0000', '39'), ('ff0000', '53 BFD'), ('bbbb00', '40'), ('bbbb00', '45'), ('bbbb00', '46'), ('bbbb00', '47'), ('8b4513', '31'), ('8b4513', '2'), ('8b4513', '53 RET'), ('8b4513', '14'), ('8b4513', '31'), ('8b4513', '33'), ('bbbb00', '39'), ('bbbb00', '38'), ('ff0000', '44'), ('ff0000', '46'), ('0000ff', '47'), ('0000ff', '49'), ('8b4513', '30'), ('8b4513', '15'), ('8b4513', '37'), ('8b4513', '32'), ('8b4513', '34'), ('8b4513', '21'), ('bbbb00', '39'), ('bbbb00', '38'), ('ff0000', '44'), ('ff0000', '46'), ('0000ff', '47'), ('0000ff', '49'), ('8b4513', '30'), ('8b4513', '15'), ('8b4513', '37'), ('8b4513', '32'), ('8b4513', '34'), ('8b4513', '21'), ('ff0000', '34'), ('ff0000', '35'), ('bbbb00', '47'), ('bbbb00', '42'), ('0000ff', '49'), ('0000ff', '47'), ('8b4513', '21'), ('8b4513', '27'), ('8b4513', '38'), ('8b4513', '40'), ('8b4513', '37'), ('8b4513', '34'), ('ff0000', '34'), ('ff0000', '35'), ('bbbb00', '47'), ('bbbb00', '42'), ('0000ff', '49'), ('0000ff', '47'), ('8b4513', '21'), ('8b4513', '27'), ('8b4513', '38'), ('8b4513', '40'), ('8b4513', '37'), ('8b4513', '34'), ('0000ff', '47'), ('0000ff', '44'), ('0000ff', '41'), ('0000ff', '48'), ('0000ff', '46'), ('0000ff', '50'), ('8b4513', '33'), ('8b4513', '17'), ('8b4513', '30'), ('8b4513', '42'), ('8b4513', '25'), ('8b4513', '32'), ('0000ff', '47'), ('0000ff', '44'), ('0000ff', '41'), ('0000ff', '48'), ('0000ff', '46'), ('0000ff', '50'), ('8b4513', '33'), ('8b4513', '17'), ('8b4513', '30'), ('8b4513', '42'), ('8b4513', '25'), ('8b4513', '32'), ('0000ff', '48'), ('0000ff', '45'), ('ff0000', '47'), ('ff0000', '47'), ('0000ff', '41'), ('0000ff', '43'), ('8b4513', '41'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '30'), ('8b4513', '17'), ('8b4513', '31'), ('0000ff', '48'), ('0000ff', '45'), ('ff0000', '47'), ('ff0000', '47'), ('0000ff', '41'), ('0000ff', '43'), ('8b4513', '41'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '30'), ('8b4513', '17'), ('8b4513', '31'), ('ff0000', '49'), ('ff0000', '33'), ('bbbb00', '49'), ('bbbb00', '48'), ('ff0000', '46'), ('ff0000', '44'), ('8b4513', '40'), ('8b4513', '29'), ('8b4513', '5'), ('8b4513', '43'), ('8b4513', '36'), ('8b4513', '36'), ('ff0000', '49'), ('ff0000', '33'), ('bbbb00', '49'), ('bbbb00', '48'), ('ff0000', '46'), ('ff0000', '44'), ('8b4513', '40'), ('8b4513', '29'), ('8b4513', '5'), ('8b4513', '43'), ('8b4513', '36'), ('8b4513', '36'), ('0000ff', '42'), ('0000ff', '47'), ('bbbb00', '46'), ('bbbb00', '47'), ('0000ff', '50'), ('0000ff', '50.6 DPI'), ('8b4513', '34'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '38'), ('8b4513', '35'), ('0000ff', '42'), ('0000ff', '47'), ('bbbb00', '46'), ('bbbb00', '47'), ('0000ff', '50'), ('0000ff', '50.6 DPI'), ('8b4513', '34'), ('8b4513', '16'), ('8b4513', '35'), ('8b4513', '29'), ('8b4513', '38'), ('8b4513', '35'), ('bbbb00', '47'), ('bbbb00', '40'), ('0000ff', '36'), ('0000ff', '33'), ('ff0000', '43'), ('ff0000', '35'), ('8b4513', '37'), ('8b4513', '50'), ('8b4513', '36'), ('8b4513', '35'), ('8b4513', '39'), ('8b4513', '37'), ('bbbb00', '47'), ('bbbb00', '40'), ('0000ff', '36'), ('0000ff', '33'), ('ff0000', '43'), ('ff0000', '35'), ('8b4513', '37'), ('8b4513', '50'), ('8b4513', '36'), ('8b4513', '35'), ('8b4513', '39'), ('8b4513', '37'), ('bbbb00', '51'), ('bbbb00', '43'), ('ff0000', '48'), ('ff0000', '48'), ('ff0000', '47'), ('ff0000', '45'), ('8b4513', '35'), ('8b4513', '36'), ('8b4513', '25'), ('8b4513', '33'), ('8b4513', '41'), ('8b4513', '39'), ('bbbb00', '51'), ('bbbb00', '43'), ('ff0000', '48'), ('ff0000', '48'), ('ff0000', '47'), ('ff0000', '45'), ('8b4513', '35'), ('8b4513', '36'), ('8b4513', '25'), ('8b4513', '33'), ('8b4513', '41'), ('8b4513', '39'), ('ff0000', '45'), ('ff0000', '34'), ('bbbb00', '43'), ('bbbb00', '48.6 DPI'), ('bbbb00', '44'), ('bbbb00', '48'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '41'), ('8b4513', '37'), ('8b4513', '40'), ('8b4513', '38'), ('ff0000', '45'), ('ff0000', '34'), ('bbbb00', '43'), ('bbbb00', '48.6 DPI'), ('bbbb00', '44'), ('bbbb00', '48'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '41'), ('8b4513', '37'), ('8b4513', '40'), ('8b4513', '38'), ('0000ff', '49'), ('0000ff', '48'), ('0000ff', '53 BFD'), ('0000ff', '50'), ('ff0000', '48'), ('ff0000', '46'), ('8b4513', '29'), ('8b4513', '41'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '47'), ('8b4513', '41'), ('0000ff', '49'), ('0000ff', '48'), ('0000ff', '53 BFD'), ('0000ff', '50'), ('ff0000', '48'), ('ff0000', '46'), ('8b4513', '29'), ('8b4513', '41'), ('8b4513', '39'), ('8b4513', '39'), ('8b4513', '47'), ('8b4513', '41'), ('ff0000', '52'), ('ff0000', '36'), ('ff0000', '49'), ('ff0000', '49'), ('bbbb00', '50'), ('bbbb00', '51'), ('8b4513', '38'), ('8b4513', '26'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('ff0000', '52'), ('ff0000', '36'), ('ff0000', '49'), ('ff0000', '49'), ('bbbb00', '50'), ('bbbb00', '51'), ('8b4513', '38'), ('8b4513', '26'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('ff0000', '50'), ('ff0000', '32'), ('bbbb00', '50'), ('bbbb00', '51'), ('ff0000', '49'), ('ff0000', '47'), ('8b4513', '42'), ('8b4513', '34'), ('8b4513', '45'), ('8b4513', '44'), ('8b4513', '49'), ('8b4513', '42'), ('ff0000', '50'), ('ff0000', '32'), ('bbbb00', '50'), ('bbbb00', '51'), ('ff0000', '49'), ('ff0000', '47'), ('8b4513', '42'), ('8b4513', '34'), ('8b4513', '45'), ('8b4513', '44'), ('8b4513', '49'), ('8b4513', '42'), ('bbbb00', '50'), ('bbbb00', '42'), ('bbbb00', '52'), ('bbbb00', '52'), ('ff0000', '50'), ('ff0000', '49'), ('8b4513', '43'), ('8b4513', '42'), ('8b4513', '42'), ('8b4513', '41'), ('8b4513', '45'), ('8b4513', '48'), ('bbbb00', '50'), ('bbbb00', '42'), ('bbbb00', '52'), ('bbbb00', '52'), ('ff0000', '50'), ('ff0000', '49'), ('8b4513', '43'), ('8b4513', '42'), ('8b4513', '42'), ('8b4513', '41'), ('8b4513', '45'), ('8b4513', '48'), ('0000ff', '46'), ('0000ff', '46'), ('0000ff', '44'), ('0000ff', '52'), ('ff0000', '51'), ('ff0000', '48'), ('8b4513', '45'), ('8b4513', '40'), ('8b4513', '40'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '45'), ('0000ff', '46'), ('0000ff', '46'), ('0000ff', '44'), ('0000ff', '52'), ('ff0000', '51'), ('ff0000', '48'), ('8b4513', '45'), ('8b4513', '40'), ('8b4513', '40'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '45'), ('bbbb00', '52'), ('bbbb00', '44'), ('bbbb00', '48'), ('bbbb00', '49'), ('bbbb00', '49'), ('bbbb00', '49'), ('8b4513', '49'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '45'), ('8b4513', '42'), ('8b4513', '40'), ('bbbb00', '52'), ('bbbb00', '44'), ('bbbb00', '48'), ('bbbb00', '49'), ('bbbb00', '49'), ('bbbb00', '49'), ('8b4513', '49'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '45'), ('8b4513', '42'), ('8b4513', '40'), ('bbbb00', '49'), ('bbbb00', '41'), ('ff0000', '50'), ('ff0000', '51'), ('bbbb00', '51'), ('bbbb00', '50'), ('8b4513', '47'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('8b4513', '44'), ('bbbb00', '49'), ('bbbb00', '41'), ('ff0000', '50'), ('ff0000', '51'), ('bbbb00', '51'), ('bbbb00', '50'), ('8b4513', '47'), ('8b4513', '46'), ('8b4513', '47'), ('8b4513', '48'), ('8b4513', '43'), ('8b4513', '44'), ('bbbb00', '40'), ('bbbb00', '16'), ('0000ff', '53 BFD'), ('0000ff', '41'), ('ff0000', '53 DNC'), ('ff0000', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53'), ('bbbb00', '40'), ('bbbb00', '16'), ('0000ff', '53 BFD'), ('0000ff', '41'), ('ff0000', '53 DNC'), ('ff0000', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53 DNC'), ('8b4513', '53'), ('0000ff', '51'), ('0000ff', '50'), ('bbbb00', '51'), ('bbbb00', '50'), ('bbbb00', '52'), ('bbbb00', '52'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '43'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '53'), ('0000ff', '51'), ('0000ff', '50'), ('bbbb00', '51'), ('bbbb00', '50'), ('bbbb00', '52'), ('bbbb00', '52'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '43'), ('8b4513', '46'), ('8b4513', '44'), ('8b4513', '53'), ('ff0000', '48'), ('ff0000', '53 BFD'), ('ff0000', '51'), ('ff0000', '50'), ('0000ff', '51'), ('0000ff', '52'), ('8b4513', '48'), ('8b4513', '48'), ('8b4513', '44'), ('8b4513', '50'), ('8b4513', '46'), ('8b4513', '47'), ('ff0000', '48'), ('ff0000', '53 BFD'), ('ff0000', '51'), ('ff0000', '50'), ('0000ff', '51'), ('0000ff', '52'), ('8b4513', '48'), ('8b4513', '48'), ('8b4513', '44'), ('8b4513', '50'), ('8b4513', '46'), ('8b4513', '47'), ('0000ff', '52'), ('0000ff', '49'), ('0000ff', '43'), ('0000ff', '51'), ('0000ff', '52'), ('0000ff', '51'), ('8b4513', '44'), ('8b4513', '51'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '50'), ('8b4513', '46'), ('0000ff', '52'), ('0000ff', '49'), ('0000ff', '43'), ('0000ff', '51'), ('0000ff', '52'), ('0000ff', '51'), ('8b4513', '44'), ('8b4513', '51'), ('8b4513', '49'), ('8b4513', '51'), ('8b4513', '50'), ('8b4513', '46')]
def blah():
tstname='worldChamps2019.csv'
df = pd.read_csv(tstname)
QR1_blue = []
QR1_red = []
QR1_yellow = []
QR2_blue = []
QR2_red = []
QR2_yellow = []
QR3_blue = []
QR3_red = []
QR3_yellow = []
QR4_blue = []
QR4_red = []
QR4_yellow = []
QR5_blue = []
QR5_red = []
QR5_yellow = []
QR6_blue = []
QR6_red = []
QR6_yellow = []
FR7_gold = []
FR7_silver = []
FR7_bronze = []
FR8_gold = []
FR8_silver = []
FR8_bronze = []
FR9_gold = []
FR9_silver = []
FR9_bronze = []
FR10_gold = []
FR10_silver = []
FR10_bronze = []
FR11_gold = []
FR11_silver = []
FR11_bronze = []
FR12_gold = []
FR12_silver = []
FR12_bronze = []
lstQRs=[ QR1_blue , \
QR1_red , \
QR1_yellow , \
QR2_blue , \
QR2_red , \
QR2_yellow , \
QR3_blue , \
QR3_red , \
QR3_yellow , \
QR4_blue , \
QR4_red , \
QR4_yellow , \
QR5_blue , \
QR5_red , \
QR5_yellow , \
QR6_blue , \
QR6_red , \
QR6_yellow]
lstFRs=[ FR7_gold , \
FR7_silver , \
FR7_bronze , \
FR8_gold , \
FR8_silver , \
FR8_bronze , \
FR9_gold , \
FR9_silver , \
FR9_bronze , \
FR10_gold , \
FR10_silver , \
FR10_bronze , \
FR11_gold , \
FR11_silver , \
FR11_bronze , \
FR12_gold , \
FR12_silver , \
FR12_bronze ]
colors = {'0000ff': 'blue', 'bbbb00': 'yellow', '999900': 'gold',
'ff0000': 'red', '999999':'silver', '8b4513': 'bronze'}
for index, row in df.iterrows():
lstIndex = index*24
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'blue':
QR1_blue.append([row['Name'],row['QR1']])
QR2_blue.append([row['Name'],row['QR2']])
elif colors.get(colorKey) == 'red':
QR1_red.append([row['Name'],row['QR1']])
QR2_red.append([row['Name'],row['QR2']])
elif colors.get(colorKey) == 'yellow':
QR1_yellow.append([row['Name'],row['QR1']])
QR2_yellow.append([row['Name'],row['QR2']])
lstIndex += 2
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'blue':
QR3_blue.append([row['Name'],row['QR3']])
QR4_blue.append([row['Name'],row['QR4']])
elif colors.get(colorKey) == 'red':
QR3_red.append([row['Name'],row['QR3']])
QR4_red.append([row['Name'],row['QR4']])
elif colors.get(colorKey) == 'yellow':
QR3_yellow.append([row['Name'],row['QR3']])
QR4_yellow.append([row['Name'],row['QR4']])
lstIndex += 2
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'blue':
QR5_blue.append([row['Name'],row['QR5']])
QR6_blue.append([row['Name'],row['QR6']])
elif colors.get(colorKey) == 'red':
QR5_red.append([row['Name'],row['QR5']])
QR6_red.append([row['Name'],row['QR6']])
elif colors.get(colorKey) == 'yellow':
QR5_yellow.append([row['Name'],row['QR5']])
QR6_yellow.append([row['Name'],row['QR6']])
lstIndex += 2
colorKey = colorlst[lstIndex][0]
if colors.get(colorKey) == 'gold':
FR7_gold.append([row['Name'],row['FR7']])
FR8_gold.append([row['Name'],row['FR8']])
FR9_gold.append([row['Name'],row['FR9']])
FR10_gold.append([row['Name'],row['FR10']])
FR11_gold.append([row['Name'],row['FR11']])
FR12_gold.append([row['Name'],row['FR12']])
elif colors.get(colorKey) == 'silver':
FR7_silver.append([row['Name'],row['FR7']])
FR8_silver.append([row['Name'],row['FR8']])
FR9_silver.append([row['Name'],row['FR9']])
FR10_silver.append([row['Name'],row['FR10']])
FR11_silver.append([row['Name'],row['FR11']])
FR12_silver.append([row['Name'],row['FR12']])
elif colors.get(colorKey) == 'bronze':
FR7_bronze.append([row['Name'],row['FR7']])
FR8_bronze.append([row['Name'],row['FR8']])
FR9_bronze.append([row['Name'],row['FR9']])
FR10_bronze.append([row['Name'],row['FR10']])
FR11_bronze.append([row['Name'],row['FR11']])
FR12_bronze.append([row['Name'],row['FR12']])
indexColors = {0:'blue', 1:'red', 2:'yellow'}
for i in range(18):
qr = lstQRs[i]
qr.sort(key = lambda sailor:sailor[1])
currentColor = indexColors.get(i % 3)
QRnum = int(i / 3) + 1
print(qr)
for j in range(len(qr)):
result = qr[j]
sailor = result[0]
rr = result[1]
sailorIndex = df.loc[df['Name']==sailor].index
colName = 'QR{}_{}'.format(str(QRnum),str(currentColor))
print ("{:39} had result {:3} in race {}".format(\
sailor,rr,colName))
try:
df.at[sailorIndex,colName] = rr
except Exception as e:
df[colName] = np.nan
df.at[sailorIndex,colName] = rr
indexColors = {0:'gold', 1:'silver', 2:'bronze'}
for i in range(18):
fr = lstFRs[i]
fr.sort(key = lambda sailor:sailor[1])
currentColor = indexColors.get(i % 3)
FRnum = int(i / 3) + 1
print(fr)
for j in range(len(fr)):
result = fr[j]
sailor = result[0]
rr = result[1]
sailorIndex = df.loc[df['Name']==sailor].index
colName = 'FR{}_{}'.format(str(FRnum + 6),str(currentColor))
print ("{:39} had result {:3} in race {}".format(\
sailor,rr,colName))
try:
df.at[sailorIndex,colName] = rr
except Exception as e:
df[colName] = np.nan
df.at[sailorIndex,colName] = rr
df.to_csv('MOD' + tstname, index=False)
def blah2():
Regex=re.compile(r'color=" #([0-9a-f]{6})">(?:<s>)?(\d{1,2}|\d{1,2}\.?\d?\s[A-Z]{3})\s?(?:<\/s>)?<\/font><\/td>')
print(Regex.findall(rawHTML))
def wc2020():
df = pd.read_csv('WorldChamps2020.csv')
df.to_csv('WorldChamps2020.csv',index=False)
def hempelWCmiami2019():
df = pd.read_csv('HempelWCMiami2019Overall.csv')
races = ['QR1_yellow','QR2_yellow','QR3_blue','QR4_blue','FR5_gold','FR6_gold','FR7_gold','FR8_gold','FR9_gold','FR10_gold','FR11_gold','FR_medal','QR3_yellow','QR4_yellow','QR1_blue','QR2_blue','FR5_silver','FR6_silver','FR7_silver','FR8_silver','FR9_silver','FR10_silver']
for race in races:
inputFile = 'HempelWCMiami2019{}.csv'.format(race)
dfTmp = pd.read_csv(inputFile)
for index, row in dfTmp.iterrows():
sailor = row['Crew']
rr = row['Race Points']
dfIndex = df.loc[df['Name']==sailor].index
try:
df.at[dfIndex,race] = rr
except Exception as e:
df[race] = np.nan
df.at[dfIndex,race] = rr
df.to_csv('HempelWCMiami2019.csv')
| true | true |
1c2f0de6e086382425de3ac8164941b1a7edefa0 | 266 | py | Python | hackerrank/Algorithms/Tower Breakers, Revisited!/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerrank/Algorithms/Tower Breakers, Revisited!/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerrank/Algorithms/Tower Breakers, Revisited!/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.towerBreakers([1, 2]), 1)
self.assertEqual(solution.towerBreakers([1, 2, 3]), 2)
if __name__ == '__main__':
unittest.main()
| 19 | 62 | 0.680451 | import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.towerBreakers([1, 2]), 1)
self.assertEqual(solution.towerBreakers([1, 2, 3]), 2)
if __name__ == '__main__':
unittest.main()
| true | true |
1c2f0ff2032887fc139b15fb0d61839cb7f204ba | 13,023 | py | Python | game2.py | calthecoder/zealous-fibula | b1c3fb3426ec6711a474948f7d820289e1039fca | [
"MIT"
] | null | null | null | game2.py | calthecoder/zealous-fibula | b1c3fb3426ec6711a474948f7d820289e1039fca | [
"MIT"
] | null | null | null | game2.py | calthecoder/zealous-fibula | b1c3fb3426ec6711a474948f7d820289e1039fca | [
"MIT"
] | null | null | null | """
Beta 0.0.9 - attack() class function does not work; returns None
Beta 0.1.0 - Fixed Issue 0.0.9. Working but missing a few things
Beta 0.1.1 - Renamed self.sees() to self.act(); added a passive function self.act() to Item
Beta 0.1.2 - Renamed namelist to enemylist and added itemlist
Beta 0.2.1 - Added items to pick up
Beta 0.2.2 - Greatly shortened code
Beta 0.2.3 - Added 'dex' variable in enemies.py - read docstring where defined
Beta 0.2.4 - Programmed 'dex' to be multiplied by 'damage' to give a final damage. Needed to change Enemy.act() and Enemy.attack()
Beta 0.2.5 - Fixed an assignment error
Beta 0.3.1 - Nicely formatted output for inventory
Beta 0.3.2 - Moved the main loop into Adventure1(); allows expansion
Beta 0.3.3 - Added startScreen() function
Beta 0.3.4 - 'Quit' now works to exit
Beta 0.3.5 - Lowered difference of (item.damage multiplied by item.dex) and enemy.hp
Beta 0.3.6 - Lowered battle time to 5 seconds
Beta 0.3.7 - Moved main loop into keyHandle(grid); avoids repition
Beta 0.3.8 - Made new maze option, grid2
Beta 0.3.9 - Added 'win spot' x and y vars
Beta 0.4.1 - Changed x,y order for classes in items.py and enemies.py to y,x (to fit with python list standards)
Beta 0.4.2 - Edited README.md to include a changelog
Beta 0.4.3 - Fixed changelog formatting
Beta 0.4.4 - Moved changelog to CHANGELOG
Beta 0.4.5 - Added the player editor
Beta 0.4.6 - Added switching the weapon out from the inventory
Beta 0.4.7 - Changed grid2; added 'xy' keystroke
Beta 0.4.8 - Added switch() function for moving monsters!
Beta 0.4.9 - Added store()
Beta 0.5.1 - Added a new starting dialouge
Beta 0.5.2 - Added a new visual aid: mapg
Beta 0.5.3 - Shortened store()
Beta 0.5.4 - Made mapg() more detailed
Beta 0.5.5 - Started before_grid2
Beta 0.5.6 - Started working on interactives.py - a new library for interaction!!
Beta 0.5.7 - Fixed up interactives.py and added some weapons for use in interactives
Beta 0.5.8 - Added a map key
Beta 0.5.9 - Removed HumanInt class
Beta 0.6.1 - Fixed mapg() error
Beta 0.6.2 - Fixed error that happened when you pressed something other than "m" or "q" in Enemy.act()
Beta 0.6.3 - Moved dialogue and maps to world.py
Beta 0.6.4 - Added accuracy variable to weapons
Beta 0.6.5 - Included music (2 soundtracks)
Beta 0.6.6 - Added Inverted control option
Beta 0.6.7 - Added more music and sound effects
Beta 0.6.8 - Fixed pyinstaller music problem
Beta 0.6.9 - Added Fletcher
Beta 0.7.1 - Village is new "store"; can be visited after every level
Beta 0.7.2 - Added level 3
Beta 0.7.3 - Fixed interactive problem
"""
import player, sys, random
from enemies import *
from world import *
from items import *
from interactives import *
######################################################
#when more levels are added, edit lines 292, 314, 319#
######################################################
me=player.Player(0,0)
helplist="""
Keylist:
Type `h:` followed by a specific keystroke for help on a certain function
w = forward
a = left
d = right
s = backward
q = attack
i = inventory
h = help
p = player editor (change weapon, name...)
xy = displays coordinates
wallet = display your wallet
map = display map
hp = health
quit = quit
"""
yp, ym = 1, -1 #for inverted controls
oldx, oldy = 0,0
win_statement = """
#*******************#
#******YOU WIN******#
#*******************#
"""
musc = False
try:
print('Loading music...')
from pygame import mixer # Load the required library
mixer.init()
m_chan = mixer.Channel(0)
s_chan = mixer.Channel(1)
if sys.platform.startswith('darwin'):
seffect1 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/seffect1.ogg')
seffect2 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/seffect2.ogg')
strack1 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/A Glimmer in the North.ogg')
strack2 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/strack2.ogg')
strack3 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Down Down to Goblin-town.ogg')
strack4 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Far Ahead the Road Has Gone.ogg')
strack5 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Hammerhand.ogg')
strack6 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Lament for Oakenshield.ogg')
strack7 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Oakenshield.ogg')
strack8 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Shadows of Angmar.ogg')
strack9 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/The Creeping Gloom.ogg')
strack10 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/The Ice Bay.ogg')
strack11 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/The Road to War.ogg')
strack12 = mixer.Sound('/Users/calvin/Documents/zealous-fibula-master/zealous-fibula/resources/Where Will Wants Not.ogg')
else:
seffect1 = mixer.Sound('resources/seffect1.ogg')
seffect2 = mixer.Sound('resources/seffect2.ogg')
strack1 = mixer.Sound('resources/A Glimmer in the North.ogg')
strack2 = mixer.Sound('resources/strack2.ogg')
strack3 = mixer.Sound('resources/Down Down to Goblin-town.ogg')
strack4 = mixer.Sound('resources/Far Ahead the Road Has Gone.ogg')
strack5 = mixer.Sound('resources/Hammerhand.ogg')
strack6 = mixer.Sound('resources/Lament for Oakenshield.ogg')
strack7 = mixer.Sound('resources/Oakenshield.ogg')
strack8 = mixer.Sound('resources/Shadows of Angmar.ogg')
strack9 = mixer.Sound('resources/The Creeping Gloom.ogg')
strack10 = mixer.Sound('resources/The Ice Bay.ogg')
strack11 = mixer.Sound('resources/The Road to War.ogg')
strack12 = mixer.Sound('resources/Where Will Wants Not.ogg')
playlist = [strack1,strack2,strack3,strack4,strack5,strack6,strack7,strack8,strack9,strack10,strack11,strack12]
musc = True
except:
print("Music not compatible")
musc = False
def save():
f = open('resources/save1.txt','r+')
f.write(str(me.x)+'\n')
f.write(str(me.y)+'\n')
f.write(me.name+'\n')
f.write(str(me.hp)+'\n')
f.write(me.weapon.name+'\n')
for l in range(0,len(me.invent)):
f.write(me.invent[l]+'\n')
f.write(str(me.wallet)+'\n')
f.write(str(me.skill))
def mapg(l):
tmp = l
print('')
old = tmp[me.y][me.x]
tmp[me.y][me.x] = me
if l == grid2:
yr = 9
elif l == grid1:
yr = 12
elif l == village:
yr=6 #Don't Forget to change
elif l == grid3:
yr=6 #Don't Forget to change
for y in range(0,yr):
for x in range(0,len(tmp[y])):
try:
if tmp[y][x].name == me.name:
print(' Y', end='')
elif tmp[y][x].name in enemylist and tmp[y][x].hp >= 1:
print(' +', end='')
elif tmp[y][x].name == 'bspace' and tmp[y][x].hp == -1:
print(' x',end='')
elif tmp[y][x].name in itemlist:
print(' !',end='')
elif tmp[y][x].name == 'level':
print(' '+str(tmp[y][x].num),end='')
else:
print(' #',end='')
except:
print(' *', end='')
print('')
print('\nY = You\n+ = Live Monster\nx = Dead Monster\n! = Item\n# = Blank Space\nAny number = Level Gateway\n* = You cannot go here')
tmp[me.y][me.x] = old
def atthandle(l,x,y,playhp):
ret = l[y][x].act(playhp)
return ret
def switch(l,p1y,p1x,p2y,p2x):
old = l[p2y][p2x]
l[p2y][p2x] = l[p1y][p1x]
l[p1y][p1x] = old
"""
def store(call):
global callfrom
dash = "-"*50
print(dash+'\nIn the marketplace')
me.x,me.y = 0,0
callfrom = call
keyHandle(village,-1,-1,-1,'store')
"""
def keyHandle(grid, pasy, pasx,next_lev,call): #pasy and pasx = spot to win
while True:
i = input('\nAction: ')
if i == 'w' or i == 'W':
me.y+=yp
try:
print('You walk forward and see '+grid[me.y][me.x].pview, end='')
except:
me.y+=ym
print("Bonk! You can't go that way.")
elif i == 's' or i == 'S':
me.y+=ym
try:
if me.y>=0:
print('You take a few steps backward and turn around. You see '+grid[me.y][me.x].pview, end='')
else:
me.y+=yp
print("Bonk! You can't go that way!")
except:
me.y+=yp
print("Bonk! You can't go that way.")
elif i == 'd' or i == 'D':
me.x+=1
try:
print('You walk to the rightmost room and see '+grid[me.y][me.x].pview, end='')
except:
me.x-=1
print("Bonk! You can't go that way.")
elif i == 'a' or i == 'A':
me.x-=1
try:
if me.x>=0:
print('You turn around and walk to the left. In front of you, you see '+grid[me.y][me.x].pview, end='')
else:
me.x+=1
print("Bonk! You can't go that way.")
except:
me.x+=1
print("Bonk! You can't go that way.")
############
elif i == 'hp' or i == 'HP':
me.printHP()
elif i == 'h' or i == 'H':
print(helplist)
elif i == 'i' or i == 'I':
me.printInvent()
elif i == 'p' or i == 'P':
i = input('Welcome to the Player Editor!\nWhat would you like to change? (w = weapon, n = name) ')
if i == 'w':
ct = 0
for tp in range(0, len(me.invent)):
if me.invent[tp].name in weaponlist:
print(str(tp)+') '+me.invent[tp].name)
ct += 1
if ct == 0:
print('Sorry, you have no weapons in your inventory to choose from.')
else:
i = input('Type weapon number: ')
old_weap = me.weapon
me.weapon = me.invent[int(i)]
del me.invent[int(i)]
me.invent.append(old_weap)
print('Weapon Changed!')
elif i == 'n':
i = input('Type your name: ')
me.name = i
print('Name Changed!')
print('You: \n\nName: '+me.name+'\nHP: '+str(me.hp)+'\nWeapon: '+me.weapon.name)
elif i == 'xy':
print('\nX: '+str(me.x)+'\nY: '+str(me.y))
elif i == 'wallet':
print(str(me.wallet)+' Gold')
elif i == 'quit':
sys.exit()
elif i == 'map':
mapg(grid)
else:
print('Huh?')
############
if me.hp<=0:
break
if grid[me.y][me.x].name in enemylist:#!= 'bspace':
if musc == True:
m_chan.pause()
s_chan.play(seffect2)
me.hp = atthandle(grid,me.x,me.y,me)
s_chan.stop()
m_chan.unpause()
else:
me.hp = atthandle(grid,me.x,me.y,me)
elif grid[me.y][me.x].name in itemlist:
inp = input(' Pick up? (Y/n) ')
if inp == 'Y' or inp == 'y':
if grid[me.y][me.x].name != 'Gold':
me.invent.append(grid[me.y][me.x])
else:
me.wallet += grid[me.y][me.x].amt
grid[me.y][me.x] = bspace5(me.x,me.y)
print('Item added to inventory')
elif grid[me.y][me.x].name in interlist:
me.wallet = grid[me.y][me.x].act(me)
elif grid[me.y][me.x].name == 'level':
print("")
print("-"*80)
if grid[me.y][me.x].num == 1 and grid[me.y][me.x].locked == False:
Adventure1(0,0,True)
elif grid[me.y][me.x].num == 2 and grid[me.y][me.x].locked == False:
Adventure2(0,0,True)
elif grid[me.y][me.x].num == 3 and grid[me.y][me.x].locked == False:
Adventure3(0,0,True)
else:
print("That level is locked")
#add more for more levels
#music
if m_chan.get_busy() == False and musc == True:
randnum = random.randint(0,11)
m_chan.play(playlist[randnum])
if me.x == pasx and me.y == pasy:
print('')
print("-"*80)
me.hp = 100
me.x = 0
me.y = 0
if next_lev == 2:
village[5][1].locked = False
elif next_lev == 3:
village[5][2].locked = False#add more for more levels
print("LEVEL BEAT! NEXT LEVEL UNLOCKED!")
print("-"*80)
i = input('Continue story? (Y/n) ')
if i == 'Y' or i == 'y':
if next_lev == 2:
Adventure2(0,0,True)
elif next_lev == 3:
Adventure3(0,0,True)
elif next_lev == 4:
Adventure2(0,0,True)
elif next_lev == 5:
Adventure2(0,0,True)
elif next_lev == 6:
Adventure2(0,0,True)
elif next_lev == 7:
Adventure2(0,0,True)
else:
print('In the Village')
Village()
def Adventure1(ox,oy,mess):
#print('In the Caverns has been started.\n')
me.x, me.y = oldx, oldy
if mess == True:
print(before_grid1)
keyHandle(grid1,11,4,2,'adventure1')
def Adventure2(ox,oy,mess):
me.x, me.y = oldx, oldy
#print('A realllly hard maze has been started.\n')
if mess == True:
print(before_grid2)
keyHandle(grid2,2,7,3,'adventure2')
def Adventure3(ox,oy,mess):
me.x, me.y = oldx, oldy
#print('A realllly hard maze has been started.\n')
if mess == True:
print(before_grid3)
keyHandle(grid3,5,2,4,'adventure3')
def Village():
me.x, me.y = 1, 0
keyHandle(village,-1,-1,-1,'village')
def startScreen():
randnum = random.randint(0,11)
m_chan.play(playlist[randnum])
print('\nWelcome to Zealous Fibula.\n\nCredits:\n Program: Starfleet Software\n Music: Turbine, Inc\n\nPress "h" for help\n')
#village[5][2].locked = False
#me.wallet = 80
Adventure1(0,0,True)
#Village()
inp = input('Inverted controls? (Y,n) ')
if inp == 'Y' or inp == 'y':
yp, ym = 1, -1
else:
yp, ym = -1, 1
startScreen() | 34.452381 | 134 | 0.646088 | import player, sys, random
from enemies import *
from world import *
from items import *
from interactives import *
k8 = mixer.Sound('resources/Shadows of Angmar.ogg')
strack9 = mixer.Sound('resources/The Creeping Gloom.ogg')
strack10 = mixer.Sound('resources/The Ice Bay.ogg')
strack11 = mixer.Sound('resources/The Road to War.ogg')
strack12 = mixer.Sound('resources/Where Will Wants Not.ogg')
playlist = [strack1,strack2,strack3,strack4,strack5,strack6,strack7,strack8,strack9,strack10,strack11,strack12]
musc = True
except:
print("Music not compatible")
musc = False
def save():
f = open('resources/save1.txt','r+')
f.write(str(me.x)+'\n')
f.write(str(me.y)+'\n')
f.write(me.name+'\n')
f.write(str(me.hp)+'\n')
f.write(me.weapon.name+'\n')
for l in range(0,len(me.invent)):
f.write(me.invent[l]+'\n')
f.write(str(me.wallet)+'\n')
f.write(str(me.skill))
def mapg(l):
tmp = l
print('')
old = tmp[me.y][me.x]
tmp[me.y][me.x] = me
if l == grid2:
yr = 9
elif l == grid1:
yr = 12
elif l == village:
yr=6
elif l == grid3:
yr=6 #Don't Forget to change
for y in range(0,yr):
for x in range(0,len(tmp[y])):
try:
if tmp[y][x].name == me.name:
print(' Y', end='')
elif tmp[y][x].name in enemylist and tmp[y][x].hp >= 1:
print(' +', end='')
elif tmp[y][x].name == 'bspace' and tmp[y][x].hp == -1:
print(' x',end='')
elif tmp[y][x].name in itemlist:
print(' !',end='')
elif tmp[y][x].name == 'level':
print(' '+str(tmp[y][x].num),end='')
else:
print(' #',end='')
except:
print(' *', end='')
print('')
print('\nY = You\n+ = Live Monster\nx = Dead Monster\n! = Item\n# = Blank Space\nAny number = Level Gateway\n* = You cannot go here')
tmp[me.y][me.x] = old
def atthandle(l,x,y,playhp):
ret = l[y][x].act(playhp)
return ret
def switch(l,p1y,p1x,p2y,p2x):
old = l[p2y][p2x]
l[p2y][p2x] = l[p1y][p1x]
l[p1y][p1x] = old
def keyHandle(grid, pasy, pasx,next_lev,call):
while True:
i = input('\nAction: ')
if i == 'w' or i == 'W':
me.y+=yp
try:
print('You walk forward and see '+grid[me.y][me.x].pview, end='')
except:
me.y+=ym
print("Bonk! You can't go that way.")
elif i == 's' or i == 'S':
me.y+=ym
try:
if me.y>=0:
print('You take a few steps backward and turn around. You see '+grid[me.y][me.x].pview, end='')
else:
me.y+=yp
print("Bonk! You can't go that way!")
except:
me.y+=yp
print("Bonk! You can't go that way.")
elif i == 'd' or i == 'D':
me.x+=1
try:
print('You walk to the rightmost room and see '+grid[me.y][me.x].pview, end='')
except:
me.x-=1
print("Bonk! You can't go that way.")
elif i == 'a' or i == 'A':
me.x-=1
try:
if me.x>=0:
print('You turn around and walk to the left. In front of you, you see '+grid[me.y][me.x].pview, end='')
else:
me.x+=1
print("Bonk! You can't go that way.")
except:
me.x+=1
print("Bonk! You can't go that way.")
print(helplist)
elif i == 'i' or i == 'I':
me.printInvent()
elif i == 'p' or i == 'P':
i = input('Welcome to the Player Editor!\nWhat would you like to change? (w = weapon, n = name) ')
if i == 'w':
ct = 0
for tp in range(0, len(me.invent)):
if me.invent[tp].name in weaponlist:
print(str(tp)+') '+me.invent[tp].name)
ct += 1
if ct == 0:
print('Sorry, you have no weapons in your inventory to choose from.')
else:
i = input('Type weapon number: ')
old_weap = me.weapon
me.weapon = me.invent[int(i)]
del me.invent[int(i)]
me.invent.append(old_weap)
print('Weapon Changed!')
elif i == 'n':
i = input('Type your name: ')
me.name = i
print('Name Changed!')
print('You: \n\nName: '+me.name+'\nHP: '+str(me.hp)+'\nWeapon: '+me.weapon.name)
elif i == 'xy':
print('\nX: '+str(me.x)+'\nY: '+str(me.y))
elif i == 'wallet':
print(str(me.wallet)+' Gold')
elif i == 'quit':
sys.exit()
elif i == 'map':
mapg(grid)
else:
print('Huh?')
t:
if musc == True:
m_chan.pause()
s_chan.play(seffect2)
me.hp = atthandle(grid,me.x,me.y,me)
s_chan.stop()
m_chan.unpause()
else:
me.hp = atthandle(grid,me.x,me.y,me)
elif grid[me.y][me.x].name in itemlist:
inp = input(' Pick up? (Y/n) ')
if inp == 'Y' or inp == 'y':
if grid[me.y][me.x].name != 'Gold':
me.invent.append(grid[me.y][me.x])
else:
me.wallet += grid[me.y][me.x].amt
grid[me.y][me.x] = bspace5(me.x,me.y)
print('Item added to inventory')
elif grid[me.y][me.x].name in interlist:
me.wallet = grid[me.y][me.x].act(me)
elif grid[me.y][me.x].name == 'level':
print("")
print("-"*80)
if grid[me.y][me.x].num == 1 and grid[me.y][me.x].locked == False:
Adventure1(0,0,True)
elif grid[me.y][me.x].num == 2 and grid[me.y][me.x].locked == False:
Adventure2(0,0,True)
elif grid[me.y][me.x].num == 3 and grid[me.y][me.x].locked == False:
Adventure3(0,0,True)
else:
print("That level is locked")
if m_chan.get_busy() == False and musc == True:
randnum = random.randint(0,11)
m_chan.play(playlist[randnum])
if me.x == pasx and me.y == pasy:
print('')
print("-"*80)
me.hp = 100
me.x = 0
me.y = 0
if next_lev == 2:
village[5][1].locked = False
elif next_lev == 3:
village[5][2].locked = False
print("LEVEL BEAT! NEXT LEVEL UNLOCKED!")
print("-"*80)
i = input('Continue story? (Y/n) ')
if i == 'Y' or i == 'y':
if next_lev == 2:
Adventure2(0,0,True)
elif next_lev == 3:
Adventure3(0,0,True)
elif next_lev == 4:
Adventure2(0,0,True)
elif next_lev == 5:
Adventure2(0,0,True)
elif next_lev == 6:
Adventure2(0,0,True)
elif next_lev == 7:
Adventure2(0,0,True)
else:
print('In the Village')
Village()
def Adventure1(ox,oy,mess):
me.x, me.y = oldx, oldy
if mess == True:
print(before_grid1)
keyHandle(grid1,11,4,2,'adventure1')
def Adventure2(ox,oy,mess):
me.x, me.y = oldx, oldy
if mess == True:
print(before_grid2)
keyHandle(grid2,2,7,3,'adventure2')
def Adventure3(ox,oy,mess):
me.x, me.y = oldx, oldy
if mess == True:
print(before_grid3)
keyHandle(grid3,5,2,4,'adventure3')
def Village():
me.x, me.y = 1, 0
keyHandle(village,-1,-1,-1,'village')
def startScreen():
randnum = random.randint(0,11)
m_chan.play(playlist[randnum])
print('\nWelcome to Zealous Fibula.\n\nCredits:\n Program: Starfleet Software\n Music: Turbine, Inc\n\nPress "h" for help\n')
Adventure1(0,0,True)
inp = input('Inverted controls? (Y,n) ')
if inp == 'Y' or inp == 'y':
yp, ym = 1, -1
else:
yp, ym = -1, 1
startScreen() | true | true |
1c2f10bfd88983bb0563efeb649c2e699009e717 | 7,933 | py | Python | sagemaker-debugger/pytorch_iterative_model_pruning/model_resnet.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
] | 2,610 | 2020-10-01T14:14:53.000Z | 2022-03-31T18:02:31.000Z | sagemaker-debugger/pytorch_iterative_model_pruning/model_resnet.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
] | 1,959 | 2020-09-30T20:22:42.000Z | 2022-03-31T23:58:37.000Z | sagemaker-debugger/pytorch_iterative_model_pruning/model_resnet.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
] | 2,052 | 2020-09-30T22:11:46.000Z | 2022-03-31T23:02:51.000Z | import numpy as np
import smdebug
import torch
import torch.nn as nn
import torchvision
from smdebug import modes
from torchvision import models
# list of ordered tensor names
activation_outputs = [
#'relu_ReLU_output_0',
"layer1.0.relu_0_output_0",
"layer1.1.relu_0_output_0",
"layer2.0.relu_0_output_0",
"layer2.1.relu_0_output_0",
"layer3.0.relu_0_output_0",
"layer3.1.relu_0_output_0",
"layer4.0.relu_0_output_0",
"layer4.1.relu_0_output_0",
]
gradients = [
#'gradient/relu_ReLU_output',
"gradient/layer1.0.relu_ReLU_output",
"gradient/layer1.1.relu_ReLU_output",
"gradient/layer2.0.relu_ReLU_output",
"gradient/layer2.1.relu_ReLU_output",
"gradient/layer3.0.relu_ReLU_output",
"gradient/layer3.1.relu_ReLU_output",
"gradient/layer4.0.relu_ReLU_output",
"gradient/layer4.1.relu_ReLU_output",
]
# function to prune layers
def prune(model, filters_list, trial, step):
# dict that has a list of filters to be pruned per layer
filters_dict = {}
for layer_name, channel, _ in filters_list:
if layer_name not in filters_dict:
filters_dict[layer_name] = []
filters_dict[layer_name].append(channel)
counter = 0
in_channels_dense = 0
exclude_filters = None
in_channels = 3
exclude = False
# iterate over layers in the ResNet model
for named_module in model.named_modules():
layer_name = named_module[0]
layer = named_module[1]
# check if current layer is a convolutional layer
if isinstance(layer, torch.nn.modules.conv.Conv2d):
# remember the output channels of non-pruned convolution (needed for pruning first fc layer)
in_channels_dense = layer.out_channels
# create key to find right weights/bias/filters for the corresponding layer
weight_name = "ResNet_" + layer_name + ".weight"
# get weight values from last available training step
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# we need to adjust the number of input channels,
# if previous covolution has been pruned
# print( "current:", layer.in_channels, "previous", in_channels, layer_name, weight_name)
if "conv1" in layer_name or "conv2" in layer_name:
if layer.in_channels != in_channels:
layer.in_channels = in_channels
weight = np.delete(weight, exclude_filters, axis=1)
exclude_filters = None
# if current layer is in the list of filters to be pruned
if "conv1" in layer_name:
layer_id = layer_name.strip("conv1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce output channels for conv layer",
layer_id,
"from",
layer.out_channels,
"to",
layer.out_channels - len(filters_dict[key]),
)
# set new output channels
layer.out_channels = layer.out_channels - len(filters_dict[key])
# remove corresponding filters from weights and bias
# convolution weights have dimension: filter x channel x kernel x kernel
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
break
# remember new size of output channels, because we need to prune subsequent convolution
in_channels = layer.out_channels
# set pruned weight and bias
layer.weight.data = torch.from_numpy(weight)
if isinstance(layer, torch.nn.modules.batchnorm.BatchNorm2d):
# get weight values from last available training step
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# get bias values from last available training step
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
# get running_mean values from last available training step
mean_name = layer_name + ".running_mean_output_0"
mean = trial.tensor(mean_name).value(step, mode=modes.TRAIN)
# get running_var values from last available training step
var_name = layer_name + ".running_var_output_0"
var = trial.tensor(var_name).value(step, mode=modes.TRAIN)
# if current layer is in the list of filters to be pruned
if "bn1" in layer_name:
layer_id = layer_name.strip("bn1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce bn layer",
layer_id,
"from",
weight.shape[0],
"to",
weight.shape[0] - len(filters_dict[key]),
)
# remove corresponding filters from weights and bias
# convolution weights have dimension: filter x channel x kernel x kernel
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
bias = np.delete(bias, exclude_filters, axis=0)
mean = np.delete(mean, exclude_filters, axis=0)
var = np.delete(var, exclude_filters, axis=0)
break
# set pruned weight and bias
layer.weight.data = torch.from_numpy(weight)
layer.bias.data = torch.from_numpy(bias)
layer.running_mean.data = torch.from_numpy(mean)
layer.running_var.data = torch.from_numpy(var)
layer.num_features = weight.shape[0]
in_channels = weight.shape[0]
if isinstance(layer, torch.nn.modules.linear.Linear):
# get weight values from last available training step
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# get bias values from last available training step
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
# prune first fc layer
if exclude_filters is not None:
# in_channels_dense is the number of output channels of last non-pruned convolution layer
params = int(layer.in_features / in_channels_dense)
# prune weights of first fc layer
indexes = []
for i in exclude_filters:
indexes.extend(np.arange(i * params, (i + 1) * params))
if indexes[-1] > weight.shape[1]:
indexes.extend(np.arange(weight.shape[1] - params, weight.shape[1]))
weight = np.delete(weight, indexes, axis=1)
print(
"Reduce weights for first linear layer from",
layer.in_features,
"to",
weight.shape[1],
)
# set new in_features
layer.in_features = weight.shape[1]
exclude_filters = None
# set weights
layer.weight.data = torch.from_numpy(weight)
# set bias
layer.bias.data = torch.from_numpy(bias)
return model
| 40.065657 | 105 | 0.571411 | import numpy as np
import smdebug
import torch
import torch.nn as nn
import torchvision
from smdebug import modes
from torchvision import models
activation_outputs = [
"layer1.0.relu_0_output_0",
"layer1.1.relu_0_output_0",
"layer2.0.relu_0_output_0",
"layer2.1.relu_0_output_0",
"layer3.0.relu_0_output_0",
"layer3.1.relu_0_output_0",
"layer4.0.relu_0_output_0",
"layer4.1.relu_0_output_0",
]
gradients = [
"gradient/layer1.0.relu_ReLU_output",
"gradient/layer1.1.relu_ReLU_output",
"gradient/layer2.0.relu_ReLU_output",
"gradient/layer2.1.relu_ReLU_output",
"gradient/layer3.0.relu_ReLU_output",
"gradient/layer3.1.relu_ReLU_output",
"gradient/layer4.0.relu_ReLU_output",
"gradient/layer4.1.relu_ReLU_output",
]
def prune(model, filters_list, trial, step):
filters_dict = {}
for layer_name, channel, _ in filters_list:
if layer_name not in filters_dict:
filters_dict[layer_name] = []
filters_dict[layer_name].append(channel)
counter = 0
in_channels_dense = 0
exclude_filters = None
in_channels = 3
exclude = False
for named_module in model.named_modules():
layer_name = named_module[0]
layer = named_module[1]
if isinstance(layer, torch.nn.modules.conv.Conv2d):
in_channels_dense = layer.out_channels
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
if "conv1" in layer_name or "conv2" in layer_name:
if layer.in_channels != in_channels:
layer.in_channels = in_channels
weight = np.delete(weight, exclude_filters, axis=1)
exclude_filters = None
if "conv1" in layer_name:
layer_id = layer_name.strip("conv1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce output channels for conv layer",
layer_id,
"from",
layer.out_channels,
"to",
layer.out_channels - len(filters_dict[key]),
)
layer.out_channels = layer.out_channels - len(filters_dict[key])
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
break
in_channels = layer.out_channels
layer.weight.data = torch.from_numpy(weight)
if isinstance(layer, torch.nn.modules.batchnorm.BatchNorm2d):
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
mean_name = layer_name + ".running_mean_output_0"
mean = trial.tensor(mean_name).value(step, mode=modes.TRAIN)
var_name = layer_name + ".running_var_output_0"
var = trial.tensor(var_name).value(step, mode=modes.TRAIN)
if "bn1" in layer_name:
layer_id = layer_name.strip("bn1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce bn layer",
layer_id,
"from",
weight.shape[0],
"to",
weight.shape[0] - len(filters_dict[key]),
)
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
bias = np.delete(bias, exclude_filters, axis=0)
mean = np.delete(mean, exclude_filters, axis=0)
var = np.delete(var, exclude_filters, axis=0)
break
layer.weight.data = torch.from_numpy(weight)
layer.bias.data = torch.from_numpy(bias)
layer.running_mean.data = torch.from_numpy(mean)
layer.running_var.data = torch.from_numpy(var)
layer.num_features = weight.shape[0]
in_channels = weight.shape[0]
if isinstance(layer, torch.nn.modules.linear.Linear):
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
if exclude_filters is not None:
params = int(layer.in_features / in_channels_dense)
indexes = []
for i in exclude_filters:
indexes.extend(np.arange(i * params, (i + 1) * params))
if indexes[-1] > weight.shape[1]:
indexes.extend(np.arange(weight.shape[1] - params, weight.shape[1]))
weight = np.delete(weight, indexes, axis=1)
print(
"Reduce weights for first linear layer from",
layer.in_features,
"to",
weight.shape[1],
)
layer.in_features = weight.shape[1]
exclude_filters = None
layer.weight.data = torch.from_numpy(weight)
layer.bias.data = torch.from_numpy(bias)
return model
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.