hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7905dea82ab2b8864ab7722f158b90a224ebe103
| 18,434
|
py
|
Python
|
apps/amo/log.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
apps/amo/log.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
apps/amo/log.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {0} for {addon} deleted.')
editor_format = _(u'{user} deleted {0} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class MAKE_PREMIUM(_LOG):
id = 50
format = _(u'{addon} changed to premium.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class REFUND_REQUESTED(_LOG):
id = 56
format = _(u'Refund requested for {addon}')
class REFUND_DECLINED(_LOG):
id = 57
format = _(u'Refund declined for {addon} for {0}.')
class REFUND_GRANTED(_LOG):
id = 58
format = _(u'Refund granted for {addon} for {0}.')
class REFUND_INSTANT(_LOG):
id = 59
format = _(u'Instant refund granted for {addon}.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class RECEIPT_CHECKED(_LOG):
id = 65
format = _(u'Valid receipt was checked for {addon}.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATED_HIGH_REFUNDS(_LOG):
id = 69
format = _(u'{addon} escalated because of high number of refund requests.')
short = _(u'High Refund Requests')
keep = True
review_queue = True
class REREVIEW_MANIFEST_CHANGE(_LOG):
id = 70
format = _(u'{addon} re-reviewed because of manifest change.')
short = _(u'Manifest Change')
keep = True
review_queue = True
class REREVIEW_PREMIUM_TYPE_UPGRADE(_LOG):
id = 71
format = _(u'{addon} re-reviewed because app upgraded premium type.')
short = _(u'Premium Type Upgrade')
keep = True
review_queue = True
class REREVIEW_CLEARED(_LOG):
id = 72
format = _(u'Re-review cleared for {addon}.')
short = _(u'Re-review cleared')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class WEBAPP_RESUBMIT(_LOG):
id = 77
format = _(u'{addon} resubmitted for review.')
short = _(u'App Resubmission')
keep = True
review_queue = True
class ESCALATION_VIP_APP(_LOG):
id = 78
format = _(u'{addon} auto-escalated because its a VIP app.')
short = _(u'VIP auto-escalation')
keep = True
review_queue = True
class REREVIEW_MANIFEST_URL_CHANGE(_LOG):
id = 79
format = _(u'{addon} re-reviewed because of manifest URL change.')
short = _(u'Manifest URL Change')
keep = True
review_queue = True
class ESCALATION_PRERELEASE_APP(_LOG):
id = 80
format = _(u'{addon} auto-escalated because its a prerelease app.')
short = _(u'Prerelease auto-escalation')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class PRIORITY_REVIEW_REQUESTED(_LOG):
id = 128
format = _(u'Priority review requested for {addon}.')
short = _(u'Priority Review')
keep = True
review_queue = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from amo import get_user, logger_log
from mkt.developers.models import (ActivityLog, ActivityLogAttachment,
AppLog, CommentLog, GroupLog, UserLog,
VersionLog)
from mkt.access.models import Group
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from mkt.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
if 'attachments' in kw:
formset = kw['attachments']
storage = get_storage_class()()
for form in formset:
data = form.cleaned_data
if 'attachment' in data:
attachment = data['attachment']
storage.save('%s/%s' % (settings.REVIEWER_ATTACHMENTS_PATH,
attachment.name), attachment)
ActivityLogAttachment(activity_log=al,
description=data['description'],
mimetype=attachment.content_type,
filepath=attachment.name).save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Webapp:
AppLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
# Webapp first since Webapp subclasses Addon.
if isinstance(arg, Webapp):
AppLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
| 23.909209
| 79
| 0.628241
|
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
id = 36
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
id = 41
action_class = 'review'
format = _(u'Review {0} for {addon} deleted.')
editor_format = _(u'{user} deleted {0} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class MAKE_PREMIUM(_LOG):
id = 50
format = _(u'{addon} changed to premium.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class REFUND_REQUESTED(_LOG):
id = 56
format = _(u'Refund requested for {addon}')
class REFUND_DECLINED(_LOG):
id = 57
format = _(u'Refund declined for {addon} for {0}.')
class REFUND_GRANTED(_LOG):
id = 58
format = _(u'Refund granted for {addon} for {0}.')
class REFUND_INSTANT(_LOG):
id = 59
format = _(u'Instant refund granted for {addon}.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class RECEIPT_CHECKED(_LOG):
id = 65
format = _(u'Valid receipt was checked for {addon}.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATED_HIGH_REFUNDS(_LOG):
id = 69
format = _(u'{addon} escalated because of high number of refund requests.')
short = _(u'High Refund Requests')
keep = True
review_queue = True
class REREVIEW_MANIFEST_CHANGE(_LOG):
id = 70
format = _(u'{addon} re-reviewed because of manifest change.')
short = _(u'Manifest Change')
keep = True
review_queue = True
class REREVIEW_PREMIUM_TYPE_UPGRADE(_LOG):
id = 71
format = _(u'{addon} re-reviewed because app upgraded premium type.')
short = _(u'Premium Type Upgrade')
keep = True
review_queue = True
class REREVIEW_CLEARED(_LOG):
id = 72
format = _(u'Re-review cleared for {addon}.')
short = _(u'Re-review cleared')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class WEBAPP_RESUBMIT(_LOG):
id = 77
format = _(u'{addon} resubmitted for review.')
short = _(u'App Resubmission')
keep = True
review_queue = True
class ESCALATION_VIP_APP(_LOG):
id = 78
format = _(u'{addon} auto-escalated because its a VIP app.')
short = _(u'VIP auto-escalation')
keep = True
review_queue = True
class REREVIEW_MANIFEST_URL_CHANGE(_LOG):
id = 79
format = _(u'{addon} re-reviewed because of manifest URL change.')
short = _(u'Manifest URL Change')
keep = True
review_queue = True
class ESCALATION_PRERELEASE_APP(_LOG):
id = 80
format = _(u'{addon} auto-escalated because its a prerelease app.')
short = _(u'Prerelease auto-escalation')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class PRIORITY_REVIEW_REQUESTED(_LOG):
id = 128
format = _(u'Priority review requested for {addon}.')
short = _(u'Priority Review')
keep = True
review_queue = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
from amo import get_user, logger_log
from mkt.developers.models import (ActivityLog, ActivityLogAttachment,
AppLog, CommentLog, GroupLog, UserLog,
VersionLog)
from mkt.access.models import Group
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from mkt.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
if 'created' in kw:
al.created = kw['created']
al.save()
if 'attachments' in kw:
formset = kw['attachments']
storage = get_storage_class()()
for form in formset:
data = form.cleaned_data
if 'attachment' in data:
attachment = data['attachment']
storage.save('%s/%s' % (settings.REVIEWER_ATTACHMENTS_PATH,
attachment.name), attachment)
ActivityLogAttachment(activity_log=al,
description=data['description'],
mimetype=attachment.content_type,
filepath=attachment.name).save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Webapp:
AppLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
if isinstance(arg, Webapp):
AppLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
UserLog(activity_log=al, user=user).save()
return al
| true
| true
|
7905dfc7c425e90394fdec286d7de03051c3f68f
| 543
|
py
|
Python
|
yatube/manage.py
|
igredk/hw05_final
|
7232cd789886bf21a85d2a9ea3c5f0ad7e4a676f
|
[
"MIT"
] | null | null | null |
yatube/manage.py
|
igredk/hw05_final
|
7232cd789886bf21a85d2a9ea3c5f0ad7e4a676f
|
[
"MIT"
] | null | null | null |
yatube/manage.py
|
igredk/hw05_final
|
7232cd789886bf21a85d2a9ea3c5f0ad7e4a676f
|
[
"MIT"
] | null | null | null |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yatube.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 25.857143
| 73
| 0.672192
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yatube.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
7905e0444ba3fed64a961b6e513973ffcc55751e
| 5,249
|
py
|
Python
|
autotest/ogr/ogr_edigeo.py
|
drons/gdal
|
333b9071b98c2651bded9a4087511031499a8232
|
[
"MIT"
] | 1
|
2019-12-20T09:17:19.000Z
|
2019-12-20T09:17:19.000Z
|
autotest/ogr/ogr_edigeo.py
|
GISerliang/gdal
|
63bf84c3477f09d30037e7c8d70d4c20c1475e6d
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_edigeo.py
|
GISerliang/gdal
|
63bf84c3477f09d30037e7c8d70d4c20c1475e6d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for OGR EDIGEO driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2011, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import ogr
sys.path.append('../pymod')
import gdaltest
import ogrtest
###############################################################################
def ogr_edigeo_1():
filelist = ['E000AB01.THF',
'EDAB01S1.VEC',
'EDAB01SE.DIC',
'EDAB01SE.GEN',
'EDAB01SE.GEO',
'EDAB01SE.QAL',
'EDAB01SE.SCD',
'EDAB01T1.VEC',
'EDAB01T2.VEC',
'EDAB01T3.VEC']
# base_url = 'http://svn.geotools.org/trunk/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/'
base_url = 'https://raw.githubusercontent.com/geotools/geotools/master/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/'
for filename in filelist:
if not gdaltest.download_file(base_url + filename, filename):
return 'skip'
try:
for filename in filelist:
os.stat('tmp/cache/' + filename)
except OSError:
return 'skip'
ds = ogr.Open('tmp/cache/E000AB01.THF')
if ds.GetLayerCount() != 24:
print(ds.GetLayerCount())
return 'fail'
layers = [('BATIMENT_id', ogr.wkbPolygon, 107),
('BORNE_id', ogr.wkbPoint, 5),
('COMMUNE_id', ogr.wkbPolygon, 1),
('LIEUDIT_id', ogr.wkbPolygon, 3),
('NUMVOIE_id', ogr.wkbPoint, 43),
('PARCELLE_id', ogr.wkbPolygon, 155),
('SECTION_id', ogr.wkbPolygon, 1),
('SUBDFISC_id', ogr.wkbPolygon, 1),
('SUBDSECT_id', ogr.wkbPolygon, 1),
('SYMBLIM_id', ogr.wkbPoint, 29),
('TLINE_id', ogr.wkbLineString, 134),
('TPOINT_id', ogr.wkbPoint, 1),
('TRONFLUV_id', ogr.wkbPolygon, 3),
('TRONROUTE_id', ogr.wkbPolygon, 1),
('TSURF_id', ogr.wkbPolygon, 3),
('ZONCOMMUNI_id', ogr.wkbLineString, 15),
('ID_S_OBJ_Z_1_2_2', ogr.wkbPoint, 248),
]
for l in layers:
lyr = ds.GetLayerByName(l[0])
if lyr.GetLayerDefn().GetGeomType() != l[1]:
return 'fail'
if lyr.GetFeatureCount() != l[2]:
print(lyr.GetFeatureCount())
return 'fail'
if l[1] != ogr.wkbNone:
if lyr.GetSpatialRef().ExportToWkt().find('Lambert_Conformal_Conic_1SP') == -1:
print(lyr.GetSpatialRef().ExportToWkt())
return 'fail'
lyr = ds.GetLayerByName('BORNE_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POINT (877171.28 72489.22)'):
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('BATIMENT_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POLYGON ((877206.16 71888.82,877193.14 71865.51,877202.95 71860.07,877215.83 71883.5,877206.16 71888.82))'):
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('ZONCOMMUNI_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'LINESTRING (877929.8 71656.39,877922.38 71663.72,877911.48 71669.51,877884.23 71675.64,877783.07 71694.04,877716.31 71706.98,877707.45 71709.71,877702.0 71713.79,877696.89 71719.58,877671.69 71761.82,877607.99 71865.03,877545.32 71959.04,877499.22 72026.82)'):
feat.DumpReadable()
return 'fail'
ds.Destroy()
return 'success'
gdaltest_list = [
ogr_edigeo_1]
if __name__ == '__main__':
gdaltest.setup_run('ogr_edigeo')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| 38.036232
| 305
| 0.600495
| true
| true
|
|
7905e13da1b41684a3b14e4463dadd2f3d84b3d5
| 11,140
|
py
|
Python
|
formulas/legendre_polynomial.py
|
pascalmolin/fungrim
|
f498ad76a385fe7a3b932a314747b7aa2ff475da
|
[
"MIT"
] | null | null | null |
formulas/legendre_polynomial.py
|
pascalmolin/fungrim
|
f498ad76a385fe7a3b932a314747b7aa2ff475da
|
[
"MIT"
] | null | null | null |
formulas/legendre_polynomial.py
|
pascalmolin/fungrim
|
f498ad76a385fe7a3b932a314747b7aa2ff475da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .expr import *
def_Topic(
Title("Legendre polynomials"),
Section("Particular values"),
Entries(
"9bdf22",
"217521",
"d77f0a",
"9b7f05",
"a17386",
"13f971",
"a7ac51",
"3df748",
"674afa",
"85eebc",
),
Section("Recurrence and functional equations"),
Entries(
"0010f3",
"367ac2",
"27688e",
"925fdf",
),
Section("Generating functions"),
Entries(
"d84519",
),
Section("Rodrigues' formula"),
Entries(
"4cfeac",
),
Section("Integrals"),
Entries(
"e36542",
),
Section("Sum representations"),
Entries(
"c5dd9b",
"f0569a",
"7a85b7",
),
Section("Hypergeometric representations"),
Entries(
"9395fc",
"f55f0a",
"3c87b9",
"6cd4a1",
"859445",
),
Section("Bounds and inequalities"),
Entries(
"1ba9a5",
"155343",
"ef4b53",
"b786ad",
"60ac50",
"59e5df",
"3b175b",
"6476bd",
),
Section("Analytic properties"),
Entries(
"40fa59",
"d36fd7",
"99e62f",
"7680d3",
"22a42f",
"415911",
"df439e",
"0745ee",
"b2d723",
),
Section("Gauss-Legendre quadrature"),
SeeTopics("Gaussian quadrature"),
Entries(
"ea4754",
"47b181",
),
Section("Bounds and inequalities"),
Subsection("Turán's inequalities"),
Entries(
"c8d10e",
"227d60",
),
)
make_entry(ID("0010f3"),
Formula(Equal(LegendrePolynomial(n,-z), (-1)**n * LegendrePolynomial(n,z))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("367ac2"),
Formula(Equal((n+1)*LegendrePolynomial(n+1,z) - (2*n+1)*z*LegendrePolynomial(n,z) + n*LegendrePolynomial(n-1,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(z, CC))))
make_entry(ID("27688e"),
Formula(Equal((1-z**2)*Derivative(LegendrePolynomial(n,z), Tuple(z,z,2)) - 2*z*Derivative(LegendrePolynomial(n,z), Tuple(z,z,1)) + n*(n+1)*LegendrePolynomial(n,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("925fdf"),
Formula(Equal((1-z**2)*Derivative(LegendrePolynomial(n,z), Tuple(z,z,1)) + n*z*LegendrePolynomial(n,z) - n*LegendrePolynomial(n-1,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(z, CC))))
make_entry(ID("9bdf22"),
Formula(Equal(LegendrePolynomial(0,z), 1)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("217521"),
Formula(Equal(LegendrePolynomial(1,z), z)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("d77f0a"),
Formula(Equal(LegendrePolynomial(2,z), Div(1,2)*(3*z**2 - 1))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("9b7f05"),
Formula(Equal(LegendrePolynomial(3,z), Div(1,2)*(5*z**3 - 3*z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a17386"),
Formula(Equal(LegendrePolynomial(4,z), Div(1,8)*(35*z**4 - 30*z**2 + 3))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("13f971"),
Formula(Equal(LegendrePolynomial(5,z), Div(1,8)*(63*z**5 - 70*z**3 + 15*z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a7ac51"),
Formula(Equal(LegendrePolynomial(n,1), 1)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("3df748"),
Formula(Equal(LegendrePolynomial(n,-1), (-1)**n)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("674afa"),
Formula(Equal(LegendrePolynomial(2*n,0), ((-1)**n / 4**n) * Binomial(2*n,n))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("85eebc"),
Formula(Equal(LegendrePolynomial(2*n+1,0), 0)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("d84519"),
Formula(Equal(Sum(LegendrePolynomial(n,x) * z**n, Tuple(n, 0, Infinity)),
1 / Sqrt(1 - 2*x*z + z**2))),
Variables(x, z),
Assumptions(And(Element(x, ClosedInterval(-1,1)), Element(z, CC), Less(Abs(z), 1))))
make_entry(ID("4cfeac"),
Formula(Equal(LegendrePolynomial(n,z),
Div(1,2**n * Factorial(n)) * Derivative((t**2-1)**n, Tuple(t, z, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0))), Element(z, CC)))
make_entry(ID("e36542"),
Formula(Equal(Integral(LegendrePolynomial(n, x) * LegendrePolynomial(m, x), Tuple(x, -1, 1)), Div(2,2*n+1) * KroneckerDelta(n, m))),
Variables(n, m),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(m, ZZGreaterEqual(0)))))
make_entry(ID("c5dd9b"),
Formula(Equal(LegendrePolynomial(n, z), Div(1,2**n) * Sum(Binomial(n,k)**2 * (z-1)**(n-k) * (z+1)**k, Tuple(k, 0, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("f0569a"),
Formula(Equal(LegendrePolynomial(n, z), Sum(Binomial(n,k) * Binomial(n+k,k) * Div(z-1,2)**k, Tuple(k, 0, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("7a85b7"),
Formula(Equal(LegendrePolynomial(n, z), Div(1,2**n) * Sum((-1)**k * Binomial(n,k) * Binomial(2*n-2*k,n) * z**(n-2*k), Tuple(k, 0, Floor(n/2))))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("9395fc"),
Formula(Equal(LegendrePolynomial(n, z), Hypergeometric2F1(-n, n+1, 1, (1-z)/2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("f55f0a"),
Formula(Equal(LegendrePolynomial(n, z), Binomial(2*n,n) * (z/2)**n * Hypergeometric2F1(-(n/2), (1-n)/2, Div(1,2)-n, 1/z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("3c87b9"),
Formula(Equal(LegendrePolynomial(n, z), Div(z-1,2)**n * Hypergeometric2F1(-n, -n, 1, (z+1)/(z-1)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC, Set(1))))))
make_entry(ID("6cd4a1"),
Formula(Equal(LegendrePolynomial(2*n, z), Div((-1)**n, 4**n) * Binomial(2*n,n) * Hypergeometric2F1(-n, n+Div(1,2), Div(1,2), z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC)))))
make_entry(ID("859445"),
Formula(Equal(LegendrePolynomial(2*n+1, z), Div((-1)**n, 4**n) * (2*n+1) * Binomial(2*n,n) * z * Hypergeometric2F1(-n, n+Div(3,2), Div(3,2), z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC)))))
make_entry(ID("1ba9a5"),
Formula(LessEqual(Abs(LegendrePolynomial(n,x)), 1)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
# todo: also valid on CC?
make_entry(ID("155343"),
Formula(LessEqual(Abs(LegendrePolynomial(n,x)), 2*BesselI(0,2*n*Sqrt(Abs(x-1)/2)), 2*Exp(2*n*Sqrt(Abs(x-1)/2)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(x, RR))))
make_entry(ID("ef4b53"),
Formula(LessEqual(Abs(LegendrePolynomial(n,z)), Abs(LegendrePolynomial(n, Abs(z)*ConstI)), (Abs(z)+Sqrt(1+Abs(z)**2))**n)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("b786ad"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 1))), (n*(n+1))/2)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("60ac50"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 1))), (2**Div(3,2) / Sqrt(ConstPi)) * (n**Div(1,2) / (1 - x**2)**Div(3,4)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("59e5df"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 2))), ((n-1)*n*(n+1)*(n+2))/8)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("3b175b"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 2))), (2**Div(5,2) / Sqrt(ConstPi)) * (n**Div(3,2) / (1 - x**2)**Div(5,4)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("6476bd"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, r))), (2**(r+Div(1,2)) / Sqrt(ConstPi)) * (n**(r-Div(1,2)) / (1 - x**2)**((2*n+1)/4)))),
Variables(n, r, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(r, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("40fa59"),
Formula(Equal(HolomorphicDomain(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), CC)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
make_entry(ID("d36fd7"),
Formula(Equal(Poles(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set(UnsignedInfinity))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
make_entry(ID("99e62f"),
Formula(Equal(EssentialSingularities(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("7680d3"),
Formula(Equal(BranchPoints(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("22a42f"),
Formula(Equal(BranchCuts(LegendrePolynomial(n,z), z, CC), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("415911"),
Formula(Equal(Cardinality(Zeros(LegendrePolynomial(n,z), z, Element(z, CC))), n)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("df439e"),
Formula(Subset(Zeros(LegendrePolynomial(n,z), z, Element(z, CC)), OpenInterval(-1,1))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("0745ee"),
Formula(Equal(Zeros(LegendrePolynomial(n,z), z, Element(z, CC)), SetBuilder(LegendrePolynomialZero(n,k), k, Element(k, ZZBetween(1, n))))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("b2d723"),
Formula(Equal(LegendrePolynomial(n, Conjugate(z)), Conjugate(LegendrePolynomial(n, z)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
# Bounds and inequalities
make_entry(ID("c8d10e"),
Formula(GreaterEqual(Parentheses(LegendrePolynomial(n, x))**2 - LegendrePolynomial(n-1, x) * LegendrePolynomial(n+1, x), 0)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(x, ClosedInterval(-1, 1)))))
make_entry(ID("227d60"),
Formula(Greater(Parentheses(LegendrePolynomial(n, x))**2 - LegendrePolynomial(n-1, x) * LegendrePolynomial(n+1, x), 0)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(x, OpenInterval(-1, 1)))))
| 35.141956
| 172
| 0.621095
|
from .expr import *
def_Topic(
Title("Legendre polynomials"),
Section("Particular values"),
Entries(
"9bdf22",
"217521",
"d77f0a",
"9b7f05",
"a17386",
"13f971",
"a7ac51",
"3df748",
"674afa",
"85eebc",
),
Section("Recurrence and functional equations"),
Entries(
"0010f3",
"367ac2",
"27688e",
"925fdf",
),
Section("Generating functions"),
Entries(
"d84519",
),
Section("Rodrigues' formula"),
Entries(
"4cfeac",
),
Section("Integrals"),
Entries(
"e36542",
),
Section("Sum representations"),
Entries(
"c5dd9b",
"f0569a",
"7a85b7",
),
Section("Hypergeometric representations"),
Entries(
"9395fc",
"f55f0a",
"3c87b9",
"6cd4a1",
"859445",
),
Section("Bounds and inequalities"),
Entries(
"1ba9a5",
"155343",
"ef4b53",
"b786ad",
"60ac50",
"59e5df",
"3b175b",
"6476bd",
),
Section("Analytic properties"),
Entries(
"40fa59",
"d36fd7",
"99e62f",
"7680d3",
"22a42f",
"415911",
"df439e",
"0745ee",
"b2d723",
),
Section("Gauss-Legendre quadrature"),
SeeTopics("Gaussian quadrature"),
Entries(
"ea4754",
"47b181",
),
Section("Bounds and inequalities"),
Subsection("Turán's inequalities"),
Entries(
"c8d10e",
"227d60",
),
)
make_entry(ID("0010f3"),
Formula(Equal(LegendrePolynomial(n,-z), (-1)**n * LegendrePolynomial(n,z))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("367ac2"),
Formula(Equal((n+1)*LegendrePolynomial(n+1,z) - (2*n+1)*z*LegendrePolynomial(n,z) + n*LegendrePolynomial(n-1,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(z, CC))))
make_entry(ID("27688e"),
Formula(Equal((1-z**2)*Derivative(LegendrePolynomial(n,z), Tuple(z,z,2)) - 2*z*Derivative(LegendrePolynomial(n,z), Tuple(z,z,1)) + n*(n+1)*LegendrePolynomial(n,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("925fdf"),
Formula(Equal((1-z**2)*Derivative(LegendrePolynomial(n,z), Tuple(z,z,1)) + n*z*LegendrePolynomial(n,z) - n*LegendrePolynomial(n-1,z), 0)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(z, CC))))
make_entry(ID("9bdf22"),
Formula(Equal(LegendrePolynomial(0,z), 1)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("217521"),
Formula(Equal(LegendrePolynomial(1,z), z)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("d77f0a"),
Formula(Equal(LegendrePolynomial(2,z), Div(1,2)*(3*z**2 - 1))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("9b7f05"),
Formula(Equal(LegendrePolynomial(3,z), Div(1,2)*(5*z**3 - 3*z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a17386"),
Formula(Equal(LegendrePolynomial(4,z), Div(1,8)*(35*z**4 - 30*z**2 + 3))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("13f971"),
Formula(Equal(LegendrePolynomial(5,z), Div(1,8)*(63*z**5 - 70*z**3 + 15*z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a7ac51"),
Formula(Equal(LegendrePolynomial(n,1), 1)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("3df748"),
Formula(Equal(LegendrePolynomial(n,-1), (-1)**n)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("674afa"),
Formula(Equal(LegendrePolynomial(2*n,0), ((-1)**n / 4**n) * Binomial(2*n,n))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("85eebc"),
Formula(Equal(LegendrePolynomial(2*n+1,0), 0)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("d84519"),
Formula(Equal(Sum(LegendrePolynomial(n,x) * z**n, Tuple(n, 0, Infinity)),
1 / Sqrt(1 - 2*x*z + z**2))),
Variables(x, z),
Assumptions(And(Element(x, ClosedInterval(-1,1)), Element(z, CC), Less(Abs(z), 1))))
make_entry(ID("4cfeac"),
Formula(Equal(LegendrePolynomial(n,z),
Div(1,2**n * Factorial(n)) * Derivative((t**2-1)**n, Tuple(t, z, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0))), Element(z, CC)))
make_entry(ID("e36542"),
Formula(Equal(Integral(LegendrePolynomial(n, x) * LegendrePolynomial(m, x), Tuple(x, -1, 1)), Div(2,2*n+1) * KroneckerDelta(n, m))),
Variables(n, m),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(m, ZZGreaterEqual(0)))))
make_entry(ID("c5dd9b"),
Formula(Equal(LegendrePolynomial(n, z), Div(1,2**n) * Sum(Binomial(n,k)**2 * (z-1)**(n-k) * (z+1)**k, Tuple(k, 0, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("f0569a"),
Formula(Equal(LegendrePolynomial(n, z), Sum(Binomial(n,k) * Binomial(n+k,k) * Div(z-1,2)**k, Tuple(k, 0, n)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("7a85b7"),
Formula(Equal(LegendrePolynomial(n, z), Div(1,2**n) * Sum((-1)**k * Binomial(n,k) * Binomial(2*n-2*k,n) * z**(n-2*k), Tuple(k, 0, Floor(n/2))))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("9395fc"),
Formula(Equal(LegendrePolynomial(n, z), Hypergeometric2F1(-n, n+1, 1, (1-z)/2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("f55f0a"),
Formula(Equal(LegendrePolynomial(n, z), Binomial(2*n,n) * (z/2)**n * Hypergeometric2F1(-(n/2), (1-n)/2, Div(1,2)-n, 1/z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("3c87b9"),
Formula(Equal(LegendrePolynomial(n, z), Div(z-1,2)**n * Hypergeometric2F1(-n, -n, 1, (z+1)/(z-1)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC, Set(1))))))
make_entry(ID("6cd4a1"),
Formula(Equal(LegendrePolynomial(2*n, z), Div((-1)**n, 4**n) * Binomial(2*n,n) * Hypergeometric2F1(-n, n+Div(1,2), Div(1,2), z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC)))))
make_entry(ID("859445"),
Formula(Equal(LegendrePolynomial(2*n+1, z), Div((-1)**n, 4**n) * (2*n+1) * Binomial(2*n,n) * z * Hypergeometric2F1(-n, n+Div(3,2), Div(3,2), z**2))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, SetMinus(CC)))))
make_entry(ID("1ba9a5"),
Formula(LessEqual(Abs(LegendrePolynomial(n,x)), 1)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("155343"),
Formula(LessEqual(Abs(LegendrePolynomial(n,x)), 2*BesselI(0,2*n*Sqrt(Abs(x-1)/2)), 2*Exp(2*n*Sqrt(Abs(x-1)/2)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(x, RR))))
make_entry(ID("ef4b53"),
Formula(LessEqual(Abs(LegendrePolynomial(n,z)), Abs(LegendrePolynomial(n, Abs(z)*ConstI)), (Abs(z)+Sqrt(1+Abs(z)**2))**n)),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("b786ad"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 1))), (n*(n+1))/2)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("60ac50"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 1))), (2**Div(3,2) / Sqrt(ConstPi)) * (n**Div(1,2) / (1 - x**2)**Div(3,4)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("59e5df"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 2))), ((n-1)*n*(n+1)*(n+2))/8)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), LessEqual(-1, x, 1))))
make_entry(ID("3b175b"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, 2))), (2**Div(5,2) / Sqrt(ConstPi)) * (n**Div(3,2) / (1 - x**2)**Div(5,4)))),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("6476bd"),
Formula(LessEqual(Abs(Derivative(LegendrePolynomial(n,x), Tuple(x, x, r))), (2**(r+Div(1,2)) / Sqrt(ConstPi)) * (n**(r-Div(1,2)) / (1 - x**2)**((2*n+1)/4)))),
Variables(n, r, x),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(r, ZZGreaterEqual(0)), Less(-1, x, 1))))
make_entry(ID("40fa59"),
Formula(Equal(HolomorphicDomain(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), CC)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
make_entry(ID("d36fd7"),
Formula(Equal(Poles(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set(UnsignedInfinity))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
make_entry(ID("99e62f"),
Formula(Equal(EssentialSingularities(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("7680d3"),
Formula(Equal(BranchPoints(LegendrePolynomial(n,z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("22a42f"),
Formula(Equal(BranchCuts(LegendrePolynomial(n,z), z, CC), Set())),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("415911"),
Formula(Equal(Cardinality(Zeros(LegendrePolynomial(n,z), z, Element(z, CC))), n)),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("df439e"),
Formula(Subset(Zeros(LegendrePolynomial(n,z), z, Element(z, CC)), OpenInterval(-1,1))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("0745ee"),
Formula(Equal(Zeros(LegendrePolynomial(n,z), z, Element(z, CC)), SetBuilder(LegendrePolynomialZero(n,k), k, Element(k, ZZBetween(1, n))))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(0))))
make_entry(ID("b2d723"),
Formula(Equal(LegendrePolynomial(n, Conjugate(z)), Conjugate(LegendrePolynomial(n, z)))),
Variables(n, z),
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(z, CC))))
make_entry(ID("c8d10e"),
Formula(GreaterEqual(Parentheses(LegendrePolynomial(n, x))**2 - LegendrePolynomial(n-1, x) * LegendrePolynomial(n+1, x), 0)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(x, ClosedInterval(-1, 1)))))
make_entry(ID("227d60"),
Formula(Greater(Parentheses(LegendrePolynomial(n, x))**2 - LegendrePolynomial(n-1, x) * LegendrePolynomial(n+1, x), 0)),
Variables(n, x),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(x, OpenInterval(-1, 1)))))
| true
| true
|
7905e1c552bc7111741db6f3273e57b8ac22efa6
| 2,607
|
py
|
Python
|
dbestclient/ml/modeltrainer.py
|
horeapinca/DBEstClient
|
6ccbb24853c31f2a8cc567e03c09ca7aa31e2d26
|
[
"BSD-2-Clause"
] | null | null | null |
dbestclient/ml/modeltrainer.py
|
horeapinca/DBEstClient
|
6ccbb24853c31f2a8cc567e03c09ca7aa31e2d26
|
[
"BSD-2-Clause"
] | null | null | null |
dbestclient/ml/modeltrainer.py
|
horeapinca/DBEstClient
|
6ccbb24853c31f2a8cc567e03c09ca7aa31e2d26
|
[
"BSD-2-Clause"
] | 1
|
2020-09-28T14:22:54.000Z
|
2020-09-28T14:22:54.000Z
|
# Created by Qingzhi Ma at 2019-07-24
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
from dbestclient.ml.density import DBEstDensity
from dbestclient.ml.modelwraper import SimpleModelWrapper, GroupByModelWrapper
from dbestclient.ml.regression import DBEstReg
from dbestclient.tools.dftools import convert_df_to_yx
import numpy as np
class SimpleModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, n_total_point, n_sample_point,groupby_attribute=None, groupby_value=None):
self.xheader = xheader
self.yheader = yheader
self.simpe_model_wrapper = SimpleModelWrapper(mdl, tbl, xheader, y=yheader, n_total_point=n_total_point,
n_sample_point=n_sample_point, groupby_attribute=groupby_attribute, groupby_value=groupby_value)
def fit(self, x, y):
reg = DBEstReg().fit(x, y)
density = DBEstDensity().fit(x)
self.simpe_model_wrapper.load_model(density, reg)
return self.simpe_model_wrapper
def fit_from_df(self, df):
y, x = convert_df_to_yx(df, self.xheader, self.yheader)
return self.fit(x, y)
class GroupByModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, groupby_attribute, n_total_point, n_sample_point,
x_min_value=-np.inf, x_max_value=np.inf):
self.groupby_model_wrapper = GroupByModelWrapper(mdl, tbl, xheader, yheader, groupby_attribute,
x_min_value=x_min_value, x_max_value=x_max_value)
self.groupby_attribute = groupby_attribute
self.mdl = mdl
self.tbl = tbl
self.xheader = xheader
self.yheader = yheader
self.n_total_point = n_total_point
self.n_sample_point = n_sample_point
self.x_min_value = x_min_value
self.x_max_value = x_max_value
def fit_from_df(self,df):
sample_grouped = df.groupby(by=self.groupby_attribute)
for name, group in sample_grouped:
print("training " +name )
simple_model_wrapper = SimpleModelTrainer(self.mdl, self.tbl, self.xheader, self.yheader,
self.n_total_point[name], self.n_sample_point[name],
groupby_attribute=self.groupby_attribute, groupby_value=name).fit_from_df(group)
self.groupby_model_wrapper.add_simple_model(simple_model_wrapper)
# print(self.groupby_model_wrapper)
return self.groupby_model_wrapper
| 44.186441
| 150
| 0.67127
|
from dbestclient.ml.density import DBEstDensity
from dbestclient.ml.modelwraper import SimpleModelWrapper, GroupByModelWrapper
from dbestclient.ml.regression import DBEstReg
from dbestclient.tools.dftools import convert_df_to_yx
import numpy as np
class SimpleModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, n_total_point, n_sample_point,groupby_attribute=None, groupby_value=None):
self.xheader = xheader
self.yheader = yheader
self.simpe_model_wrapper = SimpleModelWrapper(mdl, tbl, xheader, y=yheader, n_total_point=n_total_point,
n_sample_point=n_sample_point, groupby_attribute=groupby_attribute, groupby_value=groupby_value)
def fit(self, x, y):
reg = DBEstReg().fit(x, y)
density = DBEstDensity().fit(x)
self.simpe_model_wrapper.load_model(density, reg)
return self.simpe_model_wrapper
def fit_from_df(self, df):
y, x = convert_df_to_yx(df, self.xheader, self.yheader)
return self.fit(x, y)
class GroupByModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, groupby_attribute, n_total_point, n_sample_point,
x_min_value=-np.inf, x_max_value=np.inf):
self.groupby_model_wrapper = GroupByModelWrapper(mdl, tbl, xheader, yheader, groupby_attribute,
x_min_value=x_min_value, x_max_value=x_max_value)
self.groupby_attribute = groupby_attribute
self.mdl = mdl
self.tbl = tbl
self.xheader = xheader
self.yheader = yheader
self.n_total_point = n_total_point
self.n_sample_point = n_sample_point
self.x_min_value = x_min_value
self.x_max_value = x_max_value
def fit_from_df(self,df):
sample_grouped = df.groupby(by=self.groupby_attribute)
for name, group in sample_grouped:
print("training " +name )
simple_model_wrapper = SimpleModelTrainer(self.mdl, self.tbl, self.xheader, self.yheader,
self.n_total_point[name], self.n_sample_point[name],
groupby_attribute=self.groupby_attribute, groupby_value=name).fit_from_df(group)
self.groupby_model_wrapper.add_simple_model(simple_model_wrapper)
return self.groupby_model_wrapper
| true
| true
|
7905e265e3a23f6356c81958a75c9da793f3554e
| 3,080
|
py
|
Python
|
src/posts/models.py
|
zulune/Just-Django-Blog
|
b0b63ab76702194489958f832e84a0b933fa3e37
|
[
"MIT"
] | null | null | null |
src/posts/models.py
|
zulune/Just-Django-Blog
|
b0b63ab76702194489958f832e84a0b933fa3e37
|
[
"MIT"
] | null | null | null |
src/posts/models.py
|
zulune/Just-Django-Blog
|
b0b63ab76702194489958f832e84a0b933fa3e37
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from tinymce import HTMLField
# Create your models here.
User = get_user_model()
class PostView(models.Model):
user = models.ForeignKey(User, verbose_name=_(
"User"), on_delete=models.CASCADE)
post = models.ForeignKey('Post', verbose_name=_(
"Post"), on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Author(models.Model):
user = models.OneToOneField(User, verbose_name=_(
"Author"), on_delete=models.CASCADE)
profile_picture = models.ImageField(_("Profile picture"))
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(_("Title"), max_length=50)
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(
User, verbose_name=_("User"), on_delete=models.CASCADE)
timestamp = models.DateTimeField(_("Timestamp"), auto_now_add=True)
content = models.TextField(_("Comment text"))
post = models.ForeignKey('Post', verbose_name=_(
"Post"), related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Post(models.Model):
title = models.CharField(_("Title"), max_length=50)
overview = models.TextField(_("Overview"))
timestamp = models.DateTimeField(
_("Timestamp"), auto_now=False, auto_now_add=True)
content = HTMLField()
# comment_count = models.IntegerField(_("Comment count"), default=0)
# view_count = models.IntegerField(_("View count"), default=0)
author = models.ForeignKey(Author, verbose_name=_(
"Author"), on_delete=models.CASCADE)
thumbnail = models.ImageField(_("Thumbnail"))
categories = models.ManyToManyField(Category, verbose_name=_("Categories"))
featured = models.BooleanField(_("Featured"), default=False)
previous_post = models.ForeignKey("self", verbose_name=_(
"Previous post"), related_name='previous',
on_delete=models.SET_NULL, blank=True, null=True)
next_post = models.ForeignKey("self", verbose_name=_(
"Next post"), related_name='next',
on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("post-detail", kwargs={"pk": self.pk})
def get_update_url(self):
return reverse("post-update", kwargs={"pk": self.pk})
def get_delete_url(self):
return reverse("post-delete", kwargs={"pk": self.pk})
@property
def get_comments(self):
return self.comments.all().order_by('-timestamp')
@property
def comment_count(self):
return Comment.objects.filter(post=self).count()
@property
def view_count(self):
return PostView.objects.filter(post=self).count()
| 33.11828
| 80
| 0.667208
|
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from tinymce import HTMLField
User = get_user_model()
class PostView(models.Model):
user = models.ForeignKey(User, verbose_name=_(
"User"), on_delete=models.CASCADE)
post = models.ForeignKey('Post', verbose_name=_(
"Post"), on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Author(models.Model):
user = models.OneToOneField(User, verbose_name=_(
"Author"), on_delete=models.CASCADE)
profile_picture = models.ImageField(_("Profile picture"))
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(_("Title"), max_length=50)
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(
User, verbose_name=_("User"), on_delete=models.CASCADE)
timestamp = models.DateTimeField(_("Timestamp"), auto_now_add=True)
content = models.TextField(_("Comment text"))
post = models.ForeignKey('Post', verbose_name=_(
"Post"), related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Post(models.Model):
title = models.CharField(_("Title"), max_length=50)
overview = models.TextField(_("Overview"))
timestamp = models.DateTimeField(
_("Timestamp"), auto_now=False, auto_now_add=True)
content = HTMLField()
author = models.ForeignKey(Author, verbose_name=_(
"Author"), on_delete=models.CASCADE)
thumbnail = models.ImageField(_("Thumbnail"))
categories = models.ManyToManyField(Category, verbose_name=_("Categories"))
featured = models.BooleanField(_("Featured"), default=False)
previous_post = models.ForeignKey("self", verbose_name=_(
"Previous post"), related_name='previous',
on_delete=models.SET_NULL, blank=True, null=True)
next_post = models.ForeignKey("self", verbose_name=_(
"Next post"), related_name='next',
on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("post-detail", kwargs={"pk": self.pk})
def get_update_url(self):
return reverse("post-update", kwargs={"pk": self.pk})
def get_delete_url(self):
return reverse("post-delete", kwargs={"pk": self.pk})
@property
def get_comments(self):
return self.comments.all().order_by('-timestamp')
@property
def comment_count(self):
return Comment.objects.filter(post=self).count()
@property
def view_count(self):
return PostView.objects.filter(post=self).count()
| true
| true
|
7905e26922f7308b806416ab5d42cce6c45c8f84
| 304
|
py
|
Python
|
Python/PycharmProjects/aula 8/1.py
|
MarcelaSamili/Desafios-do-curso-de-Python
|
f331e91821c0c25b3e32d2075254ef650292f280
|
[
"MIT"
] | null | null | null |
Python/PycharmProjects/aula 8/1.py
|
MarcelaSamili/Desafios-do-curso-de-Python
|
f331e91821c0c25b3e32d2075254ef650292f280
|
[
"MIT"
] | null | null | null |
Python/PycharmProjects/aula 8/1.py
|
MarcelaSamili/Desafios-do-curso-de-Python
|
f331e91821c0c25b3e32d2075254ef650292f280
|
[
"MIT"
] | null | null | null |
#modo indireto
'''import math
num = int(input('Digite um número: '))
raiz = math.sqrt(num)
print('A raiz de {} é {}'.format(num, math.ceil(raiz)))'''
#modo direto
from math import sqrt, floor
num = int(input('Digite um número:'))
raiz = sqrt(num)
print('A raiz de {} é {:.2f}'.format(num, floor(raiz)))
| 25.333333
| 58
| 0.651316
|
from math import sqrt, floor
num = int(input('Digite um número:'))
raiz = sqrt(num)
print('A raiz de {} é {:.2f}'.format(num, floor(raiz)))
| true
| true
|
7905e2765b295dcc4b11151752c0c203cbc906a2
| 43,555
|
py
|
Python
|
NabBot-master/utils/tibia.py
|
LadyKeladry/Guardian-Bot
|
c7cf061b8502aa7b91fa98396160861e0c0fb715
|
[
"Apache-2.0"
] | null | null | null |
NabBot-master/utils/tibia.py
|
LadyKeladry/Guardian-Bot
|
c7cf061b8502aa7b91fa98396160861e0c0fb715
|
[
"Apache-2.0"
] | null | null | null |
NabBot-master/utils/tibia.py
|
LadyKeladry/Guardian-Bot
|
c7cf061b8502aa7b91fa98396160861e0c0fb715
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import io
from PIL import Image
from PIL import ImageDraw
from discord import Colour
import datetime
import urllib
import urllib.request
import aiohttp
import re
from datetime import datetime, date, timedelta
from calendar import timegm
import time
from utils.database import userDatabase, tibiaDatabase
from config import highscores_categories, network_retry_delay
from utils.messages import EMOJI
from .general import log, global_online_list, get_local_timezone
# Constants
ERROR_NETWORK = 0
ERROR_DOESNTEXIST = 1
ERROR_NOTINDATABASE = 2
# Tibia.com URLs:
url_character = "https://secure.tibia.com/community/?subtopic=characters&name="
url_guild = "https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName="
url_guild_online = "https://secure.tibia.com/community/?subtopic=guilds&page=view&onlyshowonline=1&"
url_house = "https://secure.tibia.com/community/?subtopic=houses&page=view&houseid={id}&world={world}"
url_highscores = "https://secure.tibia.com/community/?subtopic=highscores&world={0}&list={1}&profession={2}¤tpage={3}"
KNIGHT = ["knight", "elite knight", "ek", "k", "kina", "eliteknight","elite"]
PALADIN = ["paladin", "royal paladin", "rp", "p", "pally", "royalpaladin", "royalpally"]
DRUID = ["druid", "elder druid", "ed", "d", "elderdruid", "elder"]
SORCERER = ["sorcerer", "master sorcerer", "ms", "s", "sorc", "mastersorcerer", "master"]
MAGE = DRUID + SORCERER + ["mage"]
NO_VOCATION = ["no vocation", "no voc", "novoc", "nv", "n v", "none", "no", "n", "noob", "noobie", "rook", "rookie"]
highscore_format = {"achievements": "{0} __achievement points__ are **{1}**, on rank **{2}**",
"axe": "{0} __axe fighting__ level is **{1}**, on rank **{2}**",
"club": "{0} __club fighting__ level is **{1}**, on rank **{2}**",
"distance": "{0} __distance fighting__ level is **{1}**, on rank **{2}**",
"fishing": "{0} __fishing__ level is **{1}**, on rank **{2}**",
"fist": "{0} __fist fighting__ level is **{1}**, on rank **{2}**",
"loyalty": "{0} __loyalty points__ are **{1}**, on rank **{2}**",
"magic": "{0} __magic level__ is **{1}**, on rank **{2}**",
"magic_ek": "{0} __magic level__ is **{1}**, on rank **{2}** (knights)",
"magic_rp": "{0} __magic level__ is **{1}**, on rank **{2}** (paladins)",
"shielding": "{0} __shielding__ level is **{1}**, on rank **{2}**",
"sword": "{0} __sword fighting__ level is **{1}**, on rank **{2}**"}
tibia_worlds = ["Amera", "Antica", "Astera", "Aurera", "Aurora", "Bellona", "Belobra", "Beneva", "Calmera", "Calva",
"Calvera", "Candia", "Celesta", "Chrona", "Danera", "Dolera", "Efidia", "Eldera", "Ferobra", "Fidera",
"Fortera", "Garnera", "Guardia", "Harmonia", "Honera", "Hydera", "Inferna", "Iona", "Irmada", "Julera",
"Justera", "Kenora", "Kronera", "Laudera", "Luminera", "Magera", "Menera", "Morta", "Mortera",
"Neptera", "Nerana", "Nika", "Olympa", "Osera", "Pacera", "Premia", "Pythera", "Guilia", "Refugia",
"Rowana", "Secura", "Serdebra", "Shivera", "Silvera", "Solera", "Tavara", "Thera", "Umera", "Unitera",
"Veludera", "Verlana", "Xantera", "Xylana", "Yanara", "Zanera", "Zeluna", "Honbra", "Noctera", "Vita",
"Duna", "Relembra", "Helera", "Tortura", "Macabra"]
def get_character_url(name):
"""Gets a character's tibia.com URL"""
return url_character + urllib.parse.quote(name.encode('iso-8859-1'))
@asyncio.coroutine
def get_highscores(server,category,pagenum, profession=0, tries=5):
"""Gets a specific page of the highscores
Each list element is a dictionary with the following keys: rank, name, value.
May return ERROR_NETWORK"""
url = url_highscores.format(server, category, profession, pagenum)
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
# Trimming content to reduce load
try:
start_index = content.index('<td style="width: 20%;" >Vocation</td>')
end_index = content.index('<div style="float: left;"><b>» Pages:')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
if category == "loyalty":
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
else:
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
return scoreList
@asyncio.coroutine
def get_server_online(server, tries=5):
"""Returns a list of all the online players in current server.
Each list element is a dictionary with the following keys: name, level"""
server = server.capitalize()
url = 'https://secure.tibia.com/community/?subtopic=worlds&world=' + server
onlineList = []
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
while not content and tries > 0:
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
tries -= 1
# Trimming content to reduce load
try:
start_index = content.index('<div class="BoxContent"')
end_index = content.index('<div id="ThemeboxesColumn" >')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete due to a network error
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
regex_members = r'<a href="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)" >.+?</a></td><td style="width:10%;" >(.+?)</td>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
# Check if list is empty
if m:
# Building dictionary list from online players
for (name, level) in m:
name = urllib.parse.unquote_plus(name)
onlineList.append({'name': name, 'level': int(level)})
return onlineList
@asyncio.coroutine
def get_guild_online(guildname, titlecase=True, tries=5):
"""Returns a guild's world and online member list in a dictionary.
The dictionary contains the following keys: name, logo_url, world and members.
The key members contains a list where each element is a dictionary with the following keys:
rank, name, title, vocation, level, joined.
Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly."""
gstats_url = 'http://guildstats.eu/guild?guild=' + urllib.parse.quote(guildname)
guild = {}
# Fix casing using guildstats.eu if needed
# Sorry guildstats.eu :D
if not titlecase:
# Fetch website
try:
page = yield from aiohttp.get(gstats_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Make sure we got a healthy fetch
try:
content.index('<div class="footer">')
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
if "<div>Sorry!" in content:
return ERROR_DOESNTEXIST
# Failsafe in case guildstats.eu changes their websites format
try:
content.index("General info")
content.index("Recruitment")
except Exception:
log.error("getGuildOnline: -IMPORTANT- guildstats.eu seems to have changed their websites format.")
return ERROR_NETWORK
startIndex = content.index("General info")
endIndex = content.index("Recruitment")
content = content[startIndex:endIndex]
m = re.search(r'<a href="set=(.+?)"', content)
if m:
guildname = urllib.parse.unquote_plus(m.group(1))
else:
guildname = guildname.title()
tibia_url = 'https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName=' + urllib.parse.quote(
guildname) + '&onlyshowonline=1'
# Fetch website
try:
page = yield from aiohttp.get(tibia_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Trimming content to reduce load and making sure we got a healthy fetch
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index('<div id="ThemeboxesColumn" >')
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
# Tibia.com has no search function, so there's no guild doesn't exist page cause you're not supposed to get to a
# guild that doesn't exists. So the message displayed is "An internal error has ocurred. Please try again later!".
if '<div class="Text" >Error</div>' in content:
if titlecase:
ret = yield from get_guild_online(guildname, False)
return ret
else:
return ERROR_DOESNTEXIST
# Regex pattern to fetch world, guildhall and founding date
m = re.search(r'founded on (\w+) on ([^.]+)', content)
if m:
guild['world'] = m.group(1)
m = re.search(r'Their home on \w+ is ([^\.]+)', content)
if m:
guild["guildhall"] = m.group(1)
# Logo URL
m = re.search(r'<IMG SRC=\"([^\"]+)\" W', content)
if m:
guild['logo_url'] = m.group(1)
# Regex pattern to fetch members
regex_members = r'<TR BGCOLOR=#[\dABCDEF]+><TD>(.+?)</TD>\s</td><TD><A HREF="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)">.+?</A> *\(*(.*?)\)*</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
guild['members'] = []
# Check if list is empty
if m:
# Building dictionary list from members
for (rank, name, title, vocation, level, joined) in m:
rank = '' if (rank == ' ') else rank
name = urllib.parse.unquote_plus(name)
joined = joined.replace(' ', '-')
guild['members'].append({'rank': rank, 'name': name, 'title': title,
'vocation': vocation, 'level': level, 'joined': joined})
guild['name'] = guildname
return guild
@asyncio.coroutine
def get_character(name, tries=5):
"""Returns a dictionary with a player's info
The dictionary contains the following keys: name, deleted, level, vocation, world, residence,
married, gender, guild, last,login, chars*.
*chars is list that contains other characters in the same account (if not hidden).
Each list element is dictionary with the keys: name, world.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly."""
try:
url = url_character + urllib.parse.quote(name.encode('iso-8859-1'))
except UnicodeEncodeError:
return ERROR_DOESNTEXIST
char = dict()
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Trimming content to reduce load
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index("<B>Search Character</B>")
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Check if player exists
if "Name:</td><td>" not in content:
return ERROR_DOESNTEXIST
# TODO: Is there a way to reduce this part?
# Name
m = re.search(r'Name:</td><td>([^<,]+)', content)
if m:
char['name'] = m.group(1).strip()
# Deleted
m = re.search(r', will be deleted at ([^<]+)', content)
if m:
char['deleted'] = True
# Vocation
m = re.search(r'Vocation:</td><td>([^<]+)', content)
if m:
char['vocation'] = m.group(1)
# Level
m = re.search(r'Level:</td><td>(\d+)', content)
if m:
char['level'] = int(m.group(1))
# Use database levels for online characters
for onchar in global_online_list:
if onchar.split("_", 1)[1] == char['name']:
c = userDatabase.cursor()
c.execute("SELECT last_level FROM chars WHERE name LIKE ?", (char['name'],))
result = c.fetchone()
if result:
char['level'] = abs(result["last_level"])
c.close()
break
# World
m = re.search(r'World:</td><td>([^<]+)', content)
if m:
char['world'] = m.group(1)
# Residence (City)
m = re.search(r'Residence:</td><td>([^<]+)', content)
if m:
char['residence'] = m.group(1)
# Marriage
m = re.search(r'Married To:</td><td>?.+name=([^"]+)', content)
if m:
char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1')
# Sex
m = re.search(r'Sex:</td><td>([^<]+)', content)
if m:
if m.group(1) == 'male':
char['gender'] = 'male'
else:
char['gender'] = 'female'
# Guild rank
m = re.search(r'Membership:</td><td>([^<]+)\sof the', content)
if m:
char['rank'] = m.group(1)
# Guild membership
m = re.search(r'GuildName=.*?([^&]+).+', content)
if m:
char['guild'] = urllib.parse.unquote_plus(m.group(1))
# House
m = re.search(r'House:</td><td> <a href=\"https://secure\.tibia\.com/community/\?subtopic=houses.+houseid=(\d+)'
r'&character=(?:[^&]+)&action=characters\" >([^<]+)</a> \(([^(]+)\) is paid until '
r'([A-z]+).*?;(\d+).*?;(\d+)', content)
if m:
char["house_id"] = m.group(1)
char["house"] = m.group(2)
char["house_town"] = m.group(3)
# Last login
m = re.search(r'Last Login:</td><td>([^<]+)', content)
if m:
lastLogin = m.group(1).replace(" ", " ").replace(",", "")
if "never" in lastLogin:
char['last_login'] = None
else:
char['last_login'] = lastLogin
# Discord owner
c = userDatabase.cursor()
c.execute("SELECT user_id FROM chars WHERE name LIKE ?", (char["name"],))
result = c.fetchone()
char["owner_id"] = None if result is None else result["user_id"]
# Update name, vocation and world for chars in database if necessary
c = userDatabase.cursor()
c.execute("SELECT vocation, name, id, world FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result["vocation"] != char['vocation']:
c.execute("UPDATE chars SET vocation = ? WHERE id = ?", (char['vocation'], result["id"],))
log.info("{0}'s vocation was set to {1} from {2} during get_character()".format(char['name'],
char['vocation'],
result["vocation"]))
if result["name"] != char["name"]:
c.execute("UPDATE chars SET name = ? WHERE id = ?", (char['name'], result["id"],))
log.info("{0} was renamed to {1} during get_character()".format(result["name"], char['name']))
if result["world"] != char["world"]:
c.execute("UPDATE chars SET world = ? WHERE id = ?", (char['world'], result["id"],))
log.info("{0}'s world was set to {1} from {2} during get_character()".format(char['name'],
char['world'],
result["world"]))
#Skills from highscores
c = userDatabase.cursor()
for category in highscores_categories:
c.execute("SELECT "+category+","+category+"_rank FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result[category] is not None and result[category+'_rank'] is not None:
char[category] = result[category]
char[category+'_rank'] = result[category+'_rank']
char["deaths"] = []
regex_deaths = r'valign="top" >([^<]+)</td><td>(.+?)</td></tr>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
for m in matches:
death_time = m[0].replace(' ', ' ').replace(",", "")
death_level = ""
death_killer = ""
death_by_player = False
if m[1].find("Died") != -1:
regex_deathinfo_monster = r'Level (\d+) by ([^.]+)'
pattern = re.compile(regex_deathinfo_monster, re.MULTILINE + re.S)
m_deathinfo_monster = re.search(pattern, m[1])
if m_deathinfo_monster:
death_level = m_deathinfo_monster.group(1)
death_killer = m_deathinfo_monster.group(2)
else:
regex_deathinfo_player = r'Level (\d+) by .+?name=([^"]+)'
pattern = re.compile(regex_deathinfo_player, re.MULTILINE + re.S)
m_deathinfo_player = re.search(pattern, m[1])
if m_deathinfo_player:
death_level = m_deathinfo_player.group(1)
death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2))
death_by_player = True
try:
char["deaths"].append({'time': death_time, 'level': int(death_level), 'killer': death_killer,
'byPlayer': death_by_player})
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue
# Other chars
# note that an empty char list means the character is hidden
# otherwise you'd have at least the same char in the list
char['chars'] = []
try:
# See if there is a character list
startIndex = content.index("<B>Characters</B>")
content = content[startIndex:]
# Find characters
regex_chars = r'<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\"([^\"]+)'
pattern = re.compile(regex_chars, re.MULTILINE + re.S)
m = re.findall(pattern, content)
if m:
for (world, name) in m:
name = urllib.parse.unquote_plus(name)
char['chars'].append({'name': name, 'world': world})
except Exception:
pass
return char
def get_rashid_city() -> str:
"""Returns the city Rashid is currently in."""
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
return ["Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
def get_monster(name):
"""Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,
elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,
arm, image."""
# Reading monster database
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM Creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%"+name+"%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
monster = result[0]
else:
return [x['title'] for x in result]
try:
if monster['health'] is None or monster['health'] < 1:
monster['health'] = None
c.execute("SELECT Items.title as name, percentage, min, max "
"FROM CreatureDrops, Items "
"WHERE Items.id = CreatureDrops.itemid AND creatureid = ? "
"ORDER BY percentage DESC",
(monster["id"],))
monster["loot"] = c.fetchall()
return monster
finally:
c.close()
def get_item(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM Items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
item = result[0]
else:
return [x['title'] for x in result]
try:
# Checking if item exists
if item is not None:
# Checking NPCs that buy the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, SellItems, NPCs "
"WHERE Items.name LIKE ? AND SellItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value DESC", (name,))
npcs = []
value_sell = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_sell is None:
value_sell = npc["value"]
elif npc["value"] != value_sell:
break
# Replacing cities for special npcs and adding colors
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
item["color"] = Colour.green()
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
item["color"] = Colour.blue()
elif name == 'Rashid':
city = get_rashid_city()
item["color"] = Colour(0xF0E916)
elif name == 'Yasir':
city = 'his boat'
elif name == 'Briasol':
item["color"] = Colour(0xA958C4)
npcs.append({"name": name, "city": city})
item['npcs_sold'] = npcs
item['value_sell'] = value_sell
# Checking NPCs that sell the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, BuyItems, NPCs "
"WHERE Items.name LIKE ? AND BuyItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value ASC", (name,))
npcs = []
value_buy = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_buy is None:
value_buy = npc["value"]
elif npc["value"] != value_buy:
break
# Replacing cities for special npcs
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
elif name == 'Rashid':
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
city = [
"Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
elif name == 'Yasir':
city = 'his boat'
npcs.append({"name": name, "city": city})
item['npcs_bought'] = npcs
item['value_buy'] = value_buy
# Get creatures that drop it
c.execute("SELECT Creatures.title as name, CreatureDrops.percentage "
"FROM CreatureDrops, Creatures "
"WHERE CreatureDrops.creatureid = Creatures.id AND CreatureDrops.itemid = ? "
"ORDER BY percentage DESC", (item["id"],))
item["dropped_by"] = c.fetchall()
# Checking quest rewards:
c.execute("SELECT Quests.title FROM Quests, QuestRewards "
"WHERE Quests.id = QuestRewards.questid and itemid = ?", (item["id"],))
quests = c.fetchall()
item["quests"] = list()
for quest in quests:
item["quests"].append(quest["title"])
return item
finally:
c.close()
return
def parse_tibia_time(tibia_time: str) -> datetime:
"""Gets a time object from a time string from tibia.com"""
tibia_time = tibia_time.replace(",","").replace(" ", " ")
# Getting local time and GMT
t = time.localtime()
u = time.gmtime(time.mktime(t))
# UTC Offset
local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60)
# Extracting timezone
tz = tibia_time[-4:].strip()
try:
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S")
except ValueError:
log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time))
return None
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time))
return None
# Add/subtract hours to get the real time
return t + timedelta(hours=(local_utc_offset - utc_offset))
def get_stats(level: int, vocation: str):
"""Returns a dictionary with the stats for a character of a certain vocation and level.
The dictionary has the following keys: vocation, hp, mp, cap."""
try:
level = int(level)
except ValueError:
return "bad level"
if level <= 0:
return "low level"
elif level > 2000:
return "high level"
vocation = vocation.lower().strip()
if vocation in KNIGHT:
hp = (level - 8) * 15 + 185
mp = (level - 0) * 5 + 50
cap = (level - 8) * 25 + 470
vocation = "knight"
elif vocation in PALADIN:
hp = (level - 8) * 10 + 185
mp = (level - 8) * 15 + 90
cap = (level - 8) * 20 + 470
vocation = "paladin"
elif vocation in MAGE:
hp = (level - 0) * 5 + 145
mp = (level - 8) * 30 + 90
cap = (level - 0) * 10 + 390
vocation = "mage"
elif vocation in NO_VOCATION:
vocation = "no vocation"
else:
return "bad vocation"
if level < 8 or vocation == "no vocation":
hp = (level - 0) * 5 + 145
mp = (level - 0) * 5 + 50
cap = (level - 0) * 10 + 390
exp = (50*pow(level, 3)/3) - 100*pow(level, 2) + (850*level/3) - 200
exp_tnl = 50*level*level - 150 * level + 200
return {"vocation": vocation, "hp": hp, "mp": mp, "cap": cap, "exp": int(exp), "exp_tnl": exp_tnl}
def get_share_range(level: int):
"""Returns the share range for a specific level
The returned value is a list with the lower limit and the upper limit in that order."""
return int(round(level * 2 / 3, 0)), int(round(level * 3 / 2, 0))
# TODO: Improve formatting to match /monster and /item
def get_spell(name):
"""Returns a dictionary containing a spell's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
c.execute("""SELECT * FROM Spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15""",
("%" + name + "%", "%" + name + "%"))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or result[0]["words"].lower() == name.lower() or len(result) == 1:
spell = result[0]
else:
return ["{name} ({words})".format(**x) for x in result]
spell["npcs"] = []
c.execute("""SELECT NPCs.title as name, NPCs.city, SpellNPCs.knight, SpellNPCs.paladin,
SpellNPCs.sorcerer, SpellNPCs.druid FROM NPCs, SpellNPCs
WHERE SpellNPCs.spellid = ? AND SpellNPCs.npcid = NPCs.id""", (spell["id"],))
result = c.fetchall()
for npc in result:
npc["city"] = npc["city"].title()
spell["npcs"].append(npc)
return spell
finally:
c.close()
def get_npc(name):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT * FROM NPCs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower or len(result) == 1:
npc = result[0]
else:
return [x["title"] for x in result]
npc["image"] = 0
c.execute("SELECT Items.name, Items.category, BuyItems.value FROM BuyItems, Items "
"WHERE Items.id = BuyItems.itemid AND BuyItems.vendorid = ?", (npc["id"],))
npc["sell_items"] = c.fetchall()
c.execute("SELECT Items.name, Items.category, SellItems.value FROM SellItems, Items "
"WHERE Items.id = SellItems.itemid AND SellItems.vendorid = ?", (npc["id"],))
npc["buy_items"] = c.fetchall()
return npc
finally:
c.close()
@asyncio.coroutine
def get_house(name, world = None):
"""Returns a dictionary containing a house's info, a list of possible matches or None.
If world is specified, it will also find the current status of the house in that world."""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Houses WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
house = result[0]
else:
return [x['name'] for x in result]
if world is None or world not in tibia_worlds:
house["fetch"] = False
return house
house["world"] = world
house["url"] = url_house.format(id=house["id"], world=world)
tries = 5
while True:
try:
page = yield from aiohttp.get(house["url"])
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
# Trimming content to reduce load
try:
start_index = content.index("\"BoxContent\"")
end_index = content.index("</TD></TR></TABLE>")
content = content[start_index:end_index]
except ValueError:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
house["fetch"] = True
m = re.search(r'monthly rent is <B>(\d+)', content)
if m:
house['rent'] = int(m.group(1))
if "rented" in content:
house["status"] = "rented"
m = re.search(r'rented by <A?.+name=([^\"]+).+e has paid the rent until <B>([^<]+)</B>', content)
if m:
house["owner"] = urllib.parse.unquote_plus(m.group(1))
house["until"] = m.group(2).replace(" ", " ")
if "move out" in content:
house["status"] = "transferred"
m = re.search(r'will move out on <B>([^<]+)</B> \(time of daily server save\) and will pass the '
r'house to <A.+name=([^\"]+).+ for <B>(\d+) gold', content)
if m:
house["transfer_date"] =house["until"] = m.group(1).replace(" ", " ")
house["transferee"] = urllib.parse.unquote_plus(m.group(2))
house["transfer_price"] = int(m.group(3))
elif "auctioned" in content:
house["status"] = "auctioned"
if ". No bid has" in content:
house["status"] = "empty"
break
m = re.search(r'The auction will end at <B>([^\<]+)</B>\. '
r'The highest bid so far is <B>(\d+).+ by .+name=([^\"]+)\"', content)
if m:
house["auction_end"] = m.group(1).replace(" ", " ")
house["top_bid"] = int(m.group(2))
house["top_bidder"] = urllib.parse.unquote_plus(m.group(3))
break
return house
finally:
c.close()
def get_achievement(name):
"""Returns an achievement (dictionary), a list of possible matches or none"""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
return result[0]
else:
return [x['name'] for x in result]
finally:
c.close()
def get_tibia_time_zone() -> int:
"""Returns Germany's timezone, considering their daylight saving time dates"""
# Find date in Germany
gt = datetime.utcnow() + timedelta(hours=1)
germany_date = date(gt.year, gt.month, gt.day)
dst_start = date(gt.year, 3, (31 - (int(((5 * gt.year) / 4) + 4) % int(7))))
dst_end = date(gt.year, 10, (31 - (int(((5 * gt.year) / 4) + 1) % int(7))))
if dst_start < germany_date < dst_end:
return 2
return 1
def get_voc_abb(vocation: str) -> str:
"""Given a vocation name, it returns an abbreviated string"""
abbrev = {'none': 'N', 'druid': 'D', 'sorcerer': 'S', 'paladin': 'P', 'knight': 'K', 'elder druid': 'ED',
'master sorcerer': 'MS', 'royal paladin': 'RP', 'elite knight': 'EK'}
try:
return abbrev[vocation.lower()]
except KeyError:
return 'N'
def get_voc_emoji(vocation: str) -> str:
"""Given a vocation name, returns a emoji representing it"""
emoji = {'none': EMOJI[":hatching_chick:"], 'druid': EMOJI[":snowflake:"], 'sorcerer': EMOJI[":flame:"], 'paladin': EMOJI[":archery:"],
'knight': EMOJI[":shield:"], 'elder druid': EMOJI[":snowflake:"],
'master sorcerer': EMOJI[":flame:"], 'royal paladin': EMOJI[":archery:"],
'elite knight': EMOJI[":shield:"]}
try:
return emoji[vocation.lower()]
except KeyError:
return EMOJI[":question:"]
def get_pronouns(gender: str):
"""Gets a list of pronouns based on the gender given. Only binary genders supported, sorry."""
gender = gender.lower()
if gender == "female":
pronoun = ["she", "her", "her"]
elif gender == "male":
pronoun = ["he", "his", "him"]
else:
pronoun = ["it", "its", "it"]
return pronoun
def get_map_area(x, y, z, size=15, scale=8, crosshair=True):
"""Gets a minimap picture of a map area
size refers to the radius of the image in actual tibia sqm
scale is how much the image will be streched (1 = 1 sqm = 1 pixel)"""
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM WorldMap WHERE z LIKE ?", (z,))
result = c.fetchone()
im = Image.open(io.BytesIO(bytearray(result['image'])))
im = im.crop((x-size, y-size, x+size, y+size))
im = im.resize((size*scale, size*scale))
if crosshair:
draw = ImageDraw.Draw(im)
width, height = im.size
draw.line((0, height/2, width, height/2), fill=128)
draw.line((width/2, 0, width/2, height), fill=128)
img_byte_arr = io.BytesIO()
im.save(img_byte_arr, format='png')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
| 43.123762
| 225
| 0.536678
|
import asyncio
import io
from PIL import Image
from PIL import ImageDraw
from discord import Colour
import datetime
import urllib
import urllib.request
import aiohttp
import re
from datetime import datetime, date, timedelta
from calendar import timegm
import time
from utils.database import userDatabase, tibiaDatabase
from config import highscores_categories, network_retry_delay
from utils.messages import EMOJI
from .general import log, global_online_list, get_local_timezone
ERROR_NETWORK = 0
ERROR_DOESNTEXIST = 1
ERROR_NOTINDATABASE = 2
url_character = "https://secure.tibia.com/community/?subtopic=characters&name="
url_guild = "https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName="
url_guild_online = "https://secure.tibia.com/community/?subtopic=guilds&page=view&onlyshowonline=1&"
url_house = "https://secure.tibia.com/community/?subtopic=houses&page=view&houseid={id}&world={world}"
url_highscores = "https://secure.tibia.com/community/?subtopic=highscores&world={0}&list={1}&profession={2}¤tpage={3}"
KNIGHT = ["knight", "elite knight", "ek", "k", "kina", "eliteknight","elite"]
PALADIN = ["paladin", "royal paladin", "rp", "p", "pally", "royalpaladin", "royalpally"]
DRUID = ["druid", "elder druid", "ed", "d", "elderdruid", "elder"]
SORCERER = ["sorcerer", "master sorcerer", "ms", "s", "sorc", "mastersorcerer", "master"]
MAGE = DRUID + SORCERER + ["mage"]
NO_VOCATION = ["no vocation", "no voc", "novoc", "nv", "n v", "none", "no", "n", "noob", "noobie", "rook", "rookie"]
highscore_format = {"achievements": "{0} __achievement points__ are **{1}**, on rank **{2}**",
"axe": "{0} __axe fighting__ level is **{1}**, on rank **{2}**",
"club": "{0} __club fighting__ level is **{1}**, on rank **{2}**",
"distance": "{0} __distance fighting__ level is **{1}**, on rank **{2}**",
"fishing": "{0} __fishing__ level is **{1}**, on rank **{2}**",
"fist": "{0} __fist fighting__ level is **{1}**, on rank **{2}**",
"loyalty": "{0} __loyalty points__ are **{1}**, on rank **{2}**",
"magic": "{0} __magic level__ is **{1}**, on rank **{2}**",
"magic_ek": "{0} __magic level__ is **{1}**, on rank **{2}** (knights)",
"magic_rp": "{0} __magic level__ is **{1}**, on rank **{2}** (paladins)",
"shielding": "{0} __shielding__ level is **{1}**, on rank **{2}**",
"sword": "{0} __sword fighting__ level is **{1}**, on rank **{2}**"}
tibia_worlds = ["Amera", "Antica", "Astera", "Aurera", "Aurora", "Bellona", "Belobra", "Beneva", "Calmera", "Calva",
"Calvera", "Candia", "Celesta", "Chrona", "Danera", "Dolera", "Efidia", "Eldera", "Ferobra", "Fidera",
"Fortera", "Garnera", "Guardia", "Harmonia", "Honera", "Hydera", "Inferna", "Iona", "Irmada", "Julera",
"Justera", "Kenora", "Kronera", "Laudera", "Luminera", "Magera", "Menera", "Morta", "Mortera",
"Neptera", "Nerana", "Nika", "Olympa", "Osera", "Pacera", "Premia", "Pythera", "Guilia", "Refugia",
"Rowana", "Secura", "Serdebra", "Shivera", "Silvera", "Solera", "Tavara", "Thera", "Umera", "Unitera",
"Veludera", "Verlana", "Xantera", "Xylana", "Yanara", "Zanera", "Zeluna", "Honbra", "Noctera", "Vita",
"Duna", "Relembra", "Helera", "Tortura", "Macabra"]
def get_character_url(name):
return url_character + urllib.parse.quote(name.encode('iso-8859-1'))
@asyncio.coroutine
def get_highscores(server,category,pagenum, profession=0, tries=5):
url = url_highscores.format(server, category, profession, pagenum)
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
# Trimming content to reduce load
try:
start_index = content.index('<td style="width: 20%;" >Vocation</td>')
end_index = content.index('<div style="float: left;"><b>» Pages:')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
if category == "loyalty":
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
else:
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
return scoreList
@asyncio.coroutine
def get_server_online(server, tries=5):
server = server.capitalize()
url = 'https://secure.tibia.com/community/?subtopic=worlds&world=' + server
onlineList = []
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
while not content and tries > 0:
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
tries -= 1
# Trimming content to reduce load
try:
start_index = content.index('<div class="BoxContent"')
end_index = content.index('<div id="ThemeboxesColumn" >')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete due to a network error
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
regex_members = r'<a href="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)" >.+?</a></td><td style="width:10%;" >(.+?)</td>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
if m:
for (name, level) in m:
name = urllib.parse.unquote_plus(name)
onlineList.append({'name': name, 'level': int(level)})
return onlineList
@asyncio.coroutine
def get_guild_online(guildname, titlecase=True, tries=5):
gstats_url = 'http://guildstats.eu/guild?guild=' + urllib.parse.quote(guildname)
guild = {}
if not titlecase:
try:
page = yield from aiohttp.get(gstats_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Make sure we got a healthy fetch
try:
content.index('<div class="footer">')
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
if "<div>Sorry!" in content:
return ERROR_DOESNTEXIST
# Failsafe in case guildstats.eu changes their websites format
try:
content.index("General info")
content.index("Recruitment")
except Exception:
log.error("getGuildOnline: -IMPORTANT- guildstats.eu seems to have changed their websites format.")
return ERROR_NETWORK
startIndex = content.index("General info")
endIndex = content.index("Recruitment")
content = content[startIndex:endIndex]
m = re.search(r'<a href="set=(.+?)"', content)
if m:
guildname = urllib.parse.unquote_plus(m.group(1))
else:
guildname = guildname.title()
tibia_url = 'https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName=' + urllib.parse.quote(
guildname) + '&onlyshowonline=1'
# Fetch website
try:
page = yield from aiohttp.get(tibia_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index('<div id="ThemeboxesColumn" >')
content = content[startIndex:endIndex]
except ValueError:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
# guild that doesn't exists. So the message displayed is "An internal error has ocurred. Please try again later!".
if '<div class="Text" >Error</div>' in content:
if titlecase:
ret = yield from get_guild_online(guildname, False)
return ret
else:
return ERROR_DOESNTEXIST
m = re.search(r'founded on (\w+) on ([^.]+)', content)
if m:
guild['world'] = m.group(1)
m = re.search(r'Their home on \w+ is ([^\.]+)', content)
if m:
guild["guildhall"] = m.group(1)
m = re.search(r'<IMG SRC=\"([^\"]+)\" W', content)
if m:
guild['logo_url'] = m.group(1)
# Regex pattern to fetch members
regex_members = r'<TR BGCOLOR=#[\dABCDEF]+><TD>(.+?)</TD>\s</td><TD><A HREF="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)">.+?</A> *\(*(.*?)\)*</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
guild['members'] = []
# Check if list is empty
if m:
# Building dictionary list from members
for (rank, name, title, vocation, level, joined) in m:
rank = '' if (rank == ' ') else rank
name = urllib.parse.unquote_plus(name)
joined = joined.replace(' ', '-')
guild['members'].append({'rank': rank, 'name': name, 'title': title,
'vocation': vocation, 'level': level, 'joined': joined})
guild['name'] = guildname
return guild
@asyncio.coroutine
def get_character(name, tries=5):
try:
url = url_character + urllib.parse.quote(name.encode('iso-8859-1'))
except UnicodeEncodeError:
return ERROR_DOESNTEXIST
char = dict()
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Trimming content to reduce load
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index("<B>Search Character</B>")
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Check if player exists
if "Name:</td><td>" not in content:
return ERROR_DOESNTEXIST
# TODO: Is there a way to reduce this part?
# Name
m = re.search(r'Name:</td><td>([^<,]+)', content)
if m:
char['name'] = m.group(1).strip()
# Deleted
m = re.search(r', will be deleted at ([^<]+)', content)
if m:
char['deleted'] = True
# Vocation
m = re.search(r'Vocation:</td><td>([^<]+)', content)
if m:
char['vocation'] = m.group(1)
# Level
m = re.search(r'Level:</td><td>(\d+)', content)
if m:
char['level'] = int(m.group(1))
# Use database levels for online characters
for onchar in global_online_list:
if onchar.split("_", 1)[1] == char['name']:
c = userDatabase.cursor()
c.execute("SELECT last_level FROM chars WHERE name LIKE ?", (char['name'],))
result = c.fetchone()
if result:
char['level'] = abs(result["last_level"])
c.close()
break
# World
m = re.search(r'World:</td><td>([^<]+)', content)
if m:
char['world'] = m.group(1)
# Residence (City)
m = re.search(r'Residence:</td><td>([^<]+)', content)
if m:
char['residence'] = m.group(1)
# Marriage
m = re.search(r'Married To:</td><td>?.+name=([^"]+)', content)
if m:
char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1')
m = re.search(r'Sex:</td><td>([^<]+)', content)
if m:
if m.group(1) == 'male':
char['gender'] = 'male'
else:
char['gender'] = 'female'
m = re.search(r'Membership:</td><td>([^<]+)\sof the', content)
if m:
char['rank'] = m.group(1)
m = re.search(r'GuildName=.*?([^&]+).+', content)
if m:
char['guild'] = urllib.parse.unquote_plus(m.group(1))
m = re.search(r'House:</td><td> <a href=\"https://secure\.tibia\.com/community/\?subtopic=houses.+houseid=(\d+)'
r'&character=(?:[^&]+)&action=characters\" >([^<]+)</a> \(([^(]+)\) is paid until '
r'([A-z]+).*?;(\d+).*?;(\d+)', content)
if m:
char["house_id"] = m.group(1)
char["house"] = m.group(2)
char["house_town"] = m.group(3)
m = re.search(r'Last Login:</td><td>([^<]+)', content)
if m:
lastLogin = m.group(1).replace(" ", " ").replace(",", "")
if "never" in lastLogin:
char['last_login'] = None
else:
char['last_login'] = lastLogin
c = userDatabase.cursor()
c.execute("SELECT user_id FROM chars WHERE name LIKE ?", (char["name"],))
result = c.fetchone()
char["owner_id"] = None if result is None else result["user_id"]
c = userDatabase.cursor()
c.execute("SELECT vocation, name, id, world FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result["vocation"] != char['vocation']:
c.execute("UPDATE chars SET vocation = ? WHERE id = ?", (char['vocation'], result["id"],))
log.info("{0}'s vocation was set to {1} from {2} during get_character()".format(char['name'],
char['vocation'],
result["vocation"]))
if result["name"] != char["name"]:
c.execute("UPDATE chars SET name = ? WHERE id = ?", (char['name'], result["id"],))
log.info("{0} was renamed to {1} during get_character()".format(result["name"], char['name']))
if result["world"] != char["world"]:
c.execute("UPDATE chars SET world = ? WHERE id = ?", (char['world'], result["id"],))
log.info("{0}'s world was set to {1} from {2} during get_character()".format(char['name'],
char['world'],
result["world"]))
c = userDatabase.cursor()
for category in highscores_categories:
c.execute("SELECT "+category+","+category+"_rank FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result[category] is not None and result[category+'_rank'] is not None:
char[category] = result[category]
char[category+'_rank'] = result[category+'_rank']
char["deaths"] = []
regex_deaths = r'valign="top" >([^<]+)</td><td>(.+?)</td></tr>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
for m in matches:
death_time = m[0].replace(' ', ' ').replace(",", "")
death_level = ""
death_killer = ""
death_by_player = False
if m[1].find("Died") != -1:
regex_deathinfo_monster = r'Level (\d+) by ([^.]+)'
pattern = re.compile(regex_deathinfo_monster, re.MULTILINE + re.S)
m_deathinfo_monster = re.search(pattern, m[1])
if m_deathinfo_monster:
death_level = m_deathinfo_monster.group(1)
death_killer = m_deathinfo_monster.group(2)
else:
regex_deathinfo_player = r'Level (\d+) by .+?name=([^"]+)'
pattern = re.compile(regex_deathinfo_player, re.MULTILINE + re.S)
m_deathinfo_player = re.search(pattern, m[1])
if m_deathinfo_player:
death_level = m_deathinfo_player.group(1)
death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2))
death_by_player = True
try:
char["deaths"].append({'time': death_time, 'level': int(death_level), 'killer': death_killer,
'byPlayer': death_by_player})
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue
# Other chars
# note that an empty char list means the character is hidden
# otherwise you'd have at least the same char in the list
char['chars'] = []
try:
# See if there is a character list
startIndex = content.index("<B>Characters</B>")
content = content[startIndex:]
# Find characters
regex_chars = r'<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\"([^\"]+)'
pattern = re.compile(regex_chars, re.MULTILINE + re.S)
m = re.findall(pattern, content)
if m:
for (world, name) in m:
name = urllib.parse.unquote_plus(name)
char['chars'].append({'name': name, 'world': world})
except Exception:
pass
return char
def get_rashid_city() -> str:
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
return ["Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
def get_monster(name):
# Reading monster database
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM Creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%"+name+"%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
monster = result[0]
else:
return [x['title'] for x in result]
try:
if monster['health'] is None or monster['health'] < 1:
monster['health'] = None
c.execute("SELECT Items.title as name, percentage, min, max "
"FROM CreatureDrops, Items "
"WHERE Items.id = CreatureDrops.itemid AND creatureid = ? "
"ORDER BY percentage DESC",
(monster["id"],))
monster["loot"] = c.fetchall()
return monster
finally:
c.close()
def get_item(name):
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM Items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
item = result[0]
else:
return [x['title'] for x in result]
try:
# Checking if item exists
if item is not None:
# Checking NPCs that buy the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, SellItems, NPCs "
"WHERE Items.name LIKE ? AND SellItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value DESC", (name,))
npcs = []
value_sell = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_sell is None:
value_sell = npc["value"]
elif npc["value"] != value_sell:
break
# Replacing cities for special npcs and adding colors
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
item["color"] = Colour.green()
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
item["color"] = Colour.blue()
elif name == 'Rashid':
city = get_rashid_city()
item["color"] = Colour(0xF0E916)
elif name == 'Yasir':
city = 'his boat'
elif name == 'Briasol':
item["color"] = Colour(0xA958C4)
npcs.append({"name": name, "city": city})
item['npcs_sold'] = npcs
item['value_sell'] = value_sell
# Checking NPCs that sell the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, BuyItems, NPCs "
"WHERE Items.name LIKE ? AND BuyItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value ASC", (name,))
npcs = []
value_buy = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_buy is None:
value_buy = npc["value"]
elif npc["value"] != value_buy:
break
# Replacing cities for special npcs
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
elif name == 'Rashid':
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
city = [
"Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
elif name == 'Yasir':
city = 'his boat'
npcs.append({"name": name, "city": city})
item['npcs_bought'] = npcs
item['value_buy'] = value_buy
# Get creatures that drop it
c.execute("SELECT Creatures.title as name, CreatureDrops.percentage "
"FROM CreatureDrops, Creatures "
"WHERE CreatureDrops.creatureid = Creatures.id AND CreatureDrops.itemid = ? "
"ORDER BY percentage DESC", (item["id"],))
item["dropped_by"] = c.fetchall()
# Checking quest rewards:
c.execute("SELECT Quests.title FROM Quests, QuestRewards "
"WHERE Quests.id = QuestRewards.questid and itemid = ?", (item["id"],))
quests = c.fetchall()
item["quests"] = list()
for quest in quests:
item["quests"].append(quest["title"])
return item
finally:
c.close()
return
def parse_tibia_time(tibia_time: str) -> datetime:
tibia_time = tibia_time.replace(",","").replace(" ", " ")
# Getting local time and GMT
t = time.localtime()
u = time.gmtime(time.mktime(t))
# UTC Offset
local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60)
# Extracting timezone
tz = tibia_time[-4:].strip()
try:
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S")
except ValueError:
log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time))
return None
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time))
return None
# Add/subtract hours to get the real time
return t + timedelta(hours=(local_utc_offset - utc_offset))
def get_stats(level: int, vocation: str):
try:
level = int(level)
except ValueError:
return "bad level"
if level <= 0:
return "low level"
elif level > 2000:
return "high level"
vocation = vocation.lower().strip()
if vocation in KNIGHT:
hp = (level - 8) * 15 + 185
mp = (level - 0) * 5 + 50
cap = (level - 8) * 25 + 470
vocation = "knight"
elif vocation in PALADIN:
hp = (level - 8) * 10 + 185
mp = (level - 8) * 15 + 90
cap = (level - 8) * 20 + 470
vocation = "paladin"
elif vocation in MAGE:
hp = (level - 0) * 5 + 145
mp = (level - 8) * 30 + 90
cap = (level - 0) * 10 + 390
vocation = "mage"
elif vocation in NO_VOCATION:
vocation = "no vocation"
else:
return "bad vocation"
if level < 8 or vocation == "no vocation":
hp = (level - 0) * 5 + 145
mp = (level - 0) * 5 + 50
cap = (level - 0) * 10 + 390
exp = (50*pow(level, 3)/3) - 100*pow(level, 2) + (850*level/3) - 200
exp_tnl = 50*level*level - 150 * level + 200
return {"vocation": vocation, "hp": hp, "mp": mp, "cap": cap, "exp": int(exp), "exp_tnl": exp_tnl}
def get_share_range(level: int):
return int(round(level * 2 / 3, 0)), int(round(level * 3 / 2, 0))
# TODO: Improve formatting to match /monster and /item
def get_spell(name):
c = tibiaDatabase.cursor()
try:
c.execute("""SELECT * FROM Spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15""",
("%" + name + "%", "%" + name + "%"))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or result[0]["words"].lower() == name.lower() or len(result) == 1:
spell = result[0]
else:
return ["{name} ({words})".format(**x) for x in result]
spell["npcs"] = []
c.execute("""SELECT NPCs.title as name, NPCs.city, SpellNPCs.knight, SpellNPCs.paladin,
SpellNPCs.sorcerer, SpellNPCs.druid FROM NPCs, SpellNPCs
WHERE SpellNPCs.spellid = ? AND SpellNPCs.npcid = NPCs.id""", (spell["id"],))
result = c.fetchall()
for npc in result:
npc["city"] = npc["city"].title()
spell["npcs"].append(npc)
return spell
finally:
c.close()
def get_npc(name):
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT * FROM NPCs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower or len(result) == 1:
npc = result[0]
else:
return [x["title"] for x in result]
npc["image"] = 0
c.execute("SELECT Items.name, Items.category, BuyItems.value FROM BuyItems, Items "
"WHERE Items.id = BuyItems.itemid AND BuyItems.vendorid = ?", (npc["id"],))
npc["sell_items"] = c.fetchall()
c.execute("SELECT Items.name, Items.category, SellItems.value FROM SellItems, Items "
"WHERE Items.id = SellItems.itemid AND SellItems.vendorid = ?", (npc["id"],))
npc["buy_items"] = c.fetchall()
return npc
finally:
c.close()
@asyncio.coroutine
def get_house(name, world = None):
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Houses WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
house = result[0]
else:
return [x['name'] for x in result]
if world is None or world not in tibia_worlds:
house["fetch"] = False
return house
house["world"] = world
house["url"] = url_house.format(id=house["id"], world=world)
tries = 5
while True:
try:
page = yield from aiohttp.get(house["url"])
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
# Trimming content to reduce load
try:
start_index = content.index("\"BoxContent\"")
end_index = content.index("</TD></TR></TABLE>")
content = content[start_index:end_index]
except ValueError:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
house["fetch"] = True
m = re.search(r'monthly rent is <B>(\d+)', content)
if m:
house['rent'] = int(m.group(1))
if "rented" in content:
house["status"] = "rented"
m = re.search(r'rented by <A?.+name=([^\"]+).+e has paid the rent until <B>([^<]+)</B>', content)
if m:
house["owner"] = urllib.parse.unquote_plus(m.group(1))
house["until"] = m.group(2).replace(" ", " ")
if "move out" in content:
house["status"] = "transferred"
m = re.search(r'will move out on <B>([^<]+)</B> \(time of daily server save\) and will pass the '
r'house to <A.+name=([^\"]+).+ for <B>(\d+) gold', content)
if m:
house["transfer_date"] =house["until"] = m.group(1).replace("&
house["transferee"] = urllib.parse.unquote_plus(m.group(2))
house["transfer_price"] = int(m.group(3))
elif "auctioned" in content:
house["status"] = "auctioned"
if ". No bid has" in content:
house["status"] = "empty"
break
m = re.search(r'The auction will end at <B>([^\<]+)</B>\. '
r'The highest bid so far is <B>(\d+).+ by .+name=([^\"]+)\"', content)
if m:
house["auction_end"] = m.group(1).replace("&
house["top_bid"] = int(m.group(2))
house["top_bidder"] = urllib.parse.unquote_plus(m.group(3))
break
return house
finally:
c.close()
def get_achievement(name):
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
return result[0]
else:
return [x['name'] for x in result]
finally:
c.close()
def get_tibia_time_zone() -> int:
# Find date in Germany
gt = datetime.utcnow() + timedelta(hours=1)
germany_date = date(gt.year, gt.month, gt.day)
dst_start = date(gt.year, 3, (31 - (int(((5 * gt.year) / 4) + 4) % int(7))))
dst_end = date(gt.year, 10, (31 - (int(((5 * gt.year) / 4) + 1) % int(7))))
if dst_start < germany_date < dst_end:
return 2
return 1
def get_voc_abb(vocation: str) -> str:
abbrev = {'none': 'N', 'druid': 'D', 'sorcerer': 'S', 'paladin': 'P', 'knight': 'K', 'elder druid': 'ED',
'master sorcerer': 'MS', 'royal paladin': 'RP', 'elite knight': 'EK'}
try:
return abbrev[vocation.lower()]
except KeyError:
return 'N'
def get_voc_emoji(vocation: str) -> str:
emoji = {'none': EMOJI[":hatching_chick:"], 'druid': EMOJI[":snowflake:"], 'sorcerer': EMOJI[":flame:"], 'paladin': EMOJI[":archery:"],
'knight': EMOJI[":shield:"], 'elder druid': EMOJI[":snowflake:"],
'master sorcerer': EMOJI[":flame:"], 'royal paladin': EMOJI[":archery:"],
'elite knight': EMOJI[":shield:"]}
try:
return emoji[vocation.lower()]
except KeyError:
return EMOJI[":question:"]
def get_pronouns(gender: str):
gender = gender.lower()
if gender == "female":
pronoun = ["she", "her", "her"]
elif gender == "male":
pronoun = ["he", "his", "him"]
else:
pronoun = ["it", "its", "it"]
return pronoun
def get_map_area(x, y, z, size=15, scale=8, crosshair=True):
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM WorldMap WHERE z LIKE ?", (z,))
result = c.fetchone()
im = Image.open(io.BytesIO(bytearray(result['image'])))
im = im.crop((x-size, y-size, x+size, y+size))
im = im.resize((size*scale, size*scale))
if crosshair:
draw = ImageDraw.Draw(im)
width, height = im.size
draw.line((0, height/2, width, height/2), fill=128)
draw.line((width/2, 0, width/2, height), fill=128)
img_byte_arr = io.BytesIO()
im.save(img_byte_arr, format='png')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
| true
| true
|
7905e36603609b025ea50a6cd7eb20e7b67226cd
| 2,410
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py
|
ingenioustechie/cookiecutter-django-openshift
|
89c94363ca4a6e5ad7ec16fd33d461c5ec0f0492
|
[
"Apache-2.0"
] | 4
|
2016-10-28T00:34:13.000Z
|
2017-10-20T02:08:09.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py
|
ingenioustechie/cookiecutter-django-openshift
|
89c94363ca4a6e5ad7ec16fd33d461c5ec0f0492
|
[
"Apache-2.0"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py
|
ingenioustechie/cookiecutter-django-openshift
|
89c94363ca4a6e5ad7ec16fd33d461c5ec0f0492
|
[
"Apache-2.0"
] | 1
|
2020-04-07T10:07:07.000Z
|
2020-04-07T10:07:07.000Z
|
{% if cookiecutter.use_celery == 'y' %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
{% if cookiecutter.use_opbeat == 'y' -%}
if hasattr(settings, 'OPBEAT'):
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| 38.253968
| 99
| 0.688797
|
{% if cookiecutter.use_celery == 'y' %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
app = Celery('{{cookiecutter.project_slug}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
{% if cookiecutter.use_opbeat == 'y' -%}
if hasattr(settings, 'OPBEAT'):
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
{% else %}
{% endif -%}
| false
| true
|
7905e54108efe55b750029ad1ec3248f1f786037
| 11,291
|
py
|
Python
|
glue/viewers/matplotlib/state.py
|
nilswagner/glue
|
1e16776f557482cc8444d2b8ecbb813ce691a70d
|
[
"BSD-3-Clause"
] | null | null | null |
glue/viewers/matplotlib/state.py
|
nilswagner/glue
|
1e16776f557482cc8444d2b8ecbb813ce691a70d
|
[
"BSD-3-Clause"
] | null | null | null |
glue/viewers/matplotlib/state.py
|
nilswagner/glue
|
1e16776f557482cc8444d2b8ecbb813ce691a70d
|
[
"BSD-3-Clause"
] | null | null | null |
from echo import CallbackProperty, SelectionCallbackProperty, keep_in_sync, delay_callback
from matplotlib.colors import to_rgba
from glue.core.message import LayerArtistUpdatedMessage
from glue.core.state_objects import State
from glue.viewers.common.state import ViewerState, LayerState
from glue.utils import defer_draw, avoid_circular
__all__ = ['DeferredDrawSelectionCallbackProperty', 'DeferredDrawCallbackProperty',
'MatplotlibDataViewerState', 'MatplotlibLayerState']
class DeferredDrawCallbackProperty(CallbackProperty):
"""
A callback property where drawing is deferred until
after notify has called all callback functions.
"""
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawCallbackProperty, self).notify(*args, **kwargs)
class DeferredDrawSelectionCallbackProperty(SelectionCallbackProperty):
"""
A callback property where drawing is deferred until
after notify has called all callback functions.
"""
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawSelectionCallbackProperty, self).notify(*args, **kwargs)
VALID_WEIGHTS = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
VALID_LOCATIONS = ['draggable', 'best',
'upper right', 'upper left',
'lower left', 'lower right',
'center left', 'center right',
'lower center', 'upper center']
class MatplotlibLegendState(State):
"""The legend state"""
visible = DeferredDrawCallbackProperty(False, docstring="Whether to show the legend")
location = DeferredDrawSelectionCallbackProperty(0, docstring="The location of the legend in the axis")
title = DeferredDrawCallbackProperty("", docstring='The title of the legend')
fontsize = DeferredDrawCallbackProperty(10, docstring='The font size of the title')
alpha = DeferredDrawCallbackProperty(0.6, docstring='Transparency of the legend frame')
frame_color = DeferredDrawCallbackProperty("#ffffff", docstring='Frame color of the legend')
show_edge = DeferredDrawCallbackProperty(True, docstring="Whether to show the edge of the frame ")
text_color = DeferredDrawCallbackProperty("#000000", docstring='Text color of the legend')
def __init__(self, *args, **kwargs):
MatplotlibLegendState.location.set_choices(self, VALID_LOCATIONS)
super().__init__(*args, **kwargs)
self._set_color_choices()
def _set_color_choices(self):
from glue.config import settings
self.frame_color = settings.BACKGROUND_COLOR
self.text_color = settings.FOREGROUND_COLOR
@property
def edge_color(self):
if self.show_edge:
return to_rgba(self.text_color, self.alpha)
else:
return None
@property
def draggable(self):
return self.location == 'draggable'
@property
def mpl_location(self):
if self.location == 'draggable':
return 'best'
else:
return self.location
def update_axes_settings_from(self, state):
self.visible = state.show_legend
self.loc_and_drag = state.loc_and_drag
self.alpha = state.alpha
self.title = state.title
self.fontsize = state.fontsize
self.frame_color = state.frame_color
self.show_edge = state.show_edge
self.text_color = state.text_color
class MatplotlibDataViewerState(ViewerState):
"""
A base class that includes common attributes for viewers based on
Matplotlib.
"""
x_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible x range')
x_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible x range')
y_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible y range')
y_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible y range')
x_log = DeferredDrawCallbackProperty(False, docstring='Whether the x axis is logarithmic')
y_log = DeferredDrawCallbackProperty(False, docstring='Whether the y axis is logarithmic')
aspect = DeferredDrawCallbackProperty('auto', docstring='Aspect ratio for the axes')
show_axes = DeferredDrawCallbackProperty(True, docstring='Whether the axes are shown')
x_axislabel = DeferredDrawCallbackProperty('', docstring='Label for the x-axis')
y_axislabel = DeferredDrawCallbackProperty('', docstring='Label for the y-axis')
x_axislabel_size = DeferredDrawCallbackProperty(10, docstring='Size of the x-axis label')
y_axislabel_size = DeferredDrawCallbackProperty(10, docstring='Size of the y-axis label')
x_axislabel_weight = DeferredDrawSelectionCallbackProperty(1, docstring='Weight of the x-axis label')
y_axislabel_weight = DeferredDrawSelectionCallbackProperty(1, docstring='Weight of the y-axis label')
x_ticklabel_size = DeferredDrawCallbackProperty(8, docstring='Size of the x-axis tick labels')
y_ticklabel_size = DeferredDrawCallbackProperty(8, docstring='Size of the y-axis tick labels')
def __init__(self, *args, **kwargs):
self._axes_aspect_ratio = None
MatplotlibDataViewerState.x_axislabel_weight.set_choices(self, VALID_WEIGHTS)
MatplotlibDataViewerState.y_axislabel_weight.set_choices(self, VALID_WEIGHTS)
super(MatplotlibDataViewerState, self).__init__(*args, **kwargs)
self.legend = MatplotlibLegendState(*args, **kwargs)
self.add_callback('aspect', self._adjust_limits_aspect, priority=10000)
self.add_callback('x_min', self._adjust_limits_aspect_x, priority=10000)
self.add_callback('x_max', self._adjust_limits_aspect_x, priority=10000)
self.add_callback('y_min', self._adjust_limits_aspect_y, priority=10000)
self.add_callback('y_max', self._adjust_limits_aspect_y, priority=10000)
def _set_axes_aspect_ratio(self, value):
"""
Set the aspect ratio of the axes in which the visualization is shown.
This is a private method that is intended only for internal use, and it
allows this viewer state class to adjust the limits accordingly when
the aspect callback property is set to 'equal'
"""
self._axes_aspect_ratio = value
self._adjust_limits_aspect(aspect_adjustable='both')
def _adjust_limits_aspect_x(self, *args):
self._adjust_limits_aspect(aspect_adjustable='y')
def _adjust_limits_aspect_y(self, *args):
self._adjust_limits_aspect(aspect_adjustable='x')
@avoid_circular
def _adjust_limits_aspect(self, *args, **kwargs):
"""
Adjust the limits of the visualization to take into account the aspect
ratio. This only works if `_set_axes_aspect_ratio` has been called
previously.
"""
if self.aspect == 'auto' or self._axes_aspect_ratio is None:
return
if self.x_min is None or self.x_max is None or self.y_min is None or self.y_max is None:
return
aspect_adjustable = kwargs.pop('aspect_adjustable', 'auto')
changed = None
# Find axes aspect ratio
axes_ratio = self._axes_aspect_ratio
# Put the limits in temporary variables so that we only actually change
# them in one go at the end.
x_min, x_max = self.x_min, self.x_max
y_min, y_max = self.y_min, self.y_max
# Find current data ratio
data_ratio = abs(y_max - y_min) / abs(x_max - x_min)
# Only do something if the data ratio is sufficiently different
# from the axes ratio.
if abs(data_ratio - axes_ratio) / (0.5 * (data_ratio + axes_ratio)) > 0.01:
# We now adjust the limits - which ones we adjust depends on
# the adjust keyword. We also make sure we preserve the
# mid-point of the current coordinates.
if aspect_adjustable == 'both':
# We need to adjust both at the same time
x_mid = 0.5 * (x_min + x_max)
x_width = abs(x_max - x_min) * (data_ratio / axes_ratio) ** 0.5
y_mid = 0.5 * (y_min + y_max)
y_width = abs(y_max - y_min) / (data_ratio / axes_ratio) ** 0.5
x_min = x_mid - x_width / 2.
x_max = x_mid + x_width / 2.
y_min = y_mid - y_width / 2.
y_max = y_mid + y_width / 2.
elif (aspect_adjustable == 'auto' and data_ratio > axes_ratio) or aspect_adjustable == 'x':
x_mid = 0.5 * (x_min + x_max)
x_width = abs(y_max - y_min) / axes_ratio
x_min = x_mid - x_width / 2.
x_max = x_mid + x_width / 2.
else:
y_mid = 0.5 * (y_min + y_max)
y_width = abs(x_max - x_min) * axes_ratio
y_min = y_mid - y_width / 2.
y_max = y_mid + y_width / 2.
with delay_callback(self, 'x_min', 'x_max', 'y_min', 'y_max'):
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
def update_axes_settings_from(self, state):
# axis
self.x_axislabel_size = state.x_axislabel_size
self.y_axislabel_size = state.y_axislabel_size
self.x_axislabel_weight = state.x_axislabel_weight
self.y_axislabel_weight = state.y_axislabel_weight
self.x_ticklabel_size = state.x_ticklabel_size
self.y_ticklabel_size = state.y_ticklabel_size
# legend
self.legend.update_axes_settings_from(state.legend)
@defer_draw
def _notify_global(self, *args, **kwargs):
super(MatplotlibDataViewerState, self)._notify_global(*args, **kwargs)
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith('_log'):
return 0.5
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
class MatplotlibLayerState(LayerState):
"""
A base class that includes common attributes for all layers in viewers based
on Matplotlib.
"""
color = DeferredDrawCallbackProperty(docstring='The color used to display '
'the data')
alpha = DeferredDrawCallbackProperty(docstring='The transparency used to '
'display the data')
def __init__(self, viewer_state=None, **kwargs):
super(MatplotlibLayerState, self).__init__(viewer_state=viewer_state, **kwargs)
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
self.add_global_callback(self._notify_layer_update)
def _notify_layer_update(self, **kwargs):
message = LayerArtistUpdatedMessage(self)
if self.layer is not None and self.layer.hub is not None:
self.layer.hub.broadcast(message)
@defer_draw
def _notify_global(self, *args, **kwargs):
super(MatplotlibLayerState, self)._notify_global(*args, **kwargs)
| 38.404762
| 107
| 0.666726
|
from echo import CallbackProperty, SelectionCallbackProperty, keep_in_sync, delay_callback
from matplotlib.colors import to_rgba
from glue.core.message import LayerArtistUpdatedMessage
from glue.core.state_objects import State
from glue.viewers.common.state import ViewerState, LayerState
from glue.utils import defer_draw, avoid_circular
__all__ = ['DeferredDrawSelectionCallbackProperty', 'DeferredDrawCallbackProperty',
'MatplotlibDataViewerState', 'MatplotlibLayerState']
class DeferredDrawCallbackProperty(CallbackProperty):
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawCallbackProperty, self).notify(*args, **kwargs)
class DeferredDrawSelectionCallbackProperty(SelectionCallbackProperty):
@defer_draw
def notify(self, *args, **kwargs):
super(DeferredDrawSelectionCallbackProperty, self).notify(*args, **kwargs)
VALID_WEIGHTS = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
VALID_LOCATIONS = ['draggable', 'best',
'upper right', 'upper left',
'lower left', 'lower right',
'center left', 'center right',
'lower center', 'upper center']
class MatplotlibLegendState(State):
visible = DeferredDrawCallbackProperty(False, docstring="Whether to show the legend")
location = DeferredDrawSelectionCallbackProperty(0, docstring="The location of the legend in the axis")
title = DeferredDrawCallbackProperty("", docstring='The title of the legend')
fontsize = DeferredDrawCallbackProperty(10, docstring='The font size of the title')
alpha = DeferredDrawCallbackProperty(0.6, docstring='Transparency of the legend frame')
frame_color = DeferredDrawCallbackProperty("#ffffff", docstring='Frame color of the legend')
show_edge = DeferredDrawCallbackProperty(True, docstring="Whether to show the edge of the frame ")
text_color = DeferredDrawCallbackProperty("#000000", docstring='Text color of the legend')
def __init__(self, *args, **kwargs):
MatplotlibLegendState.location.set_choices(self, VALID_LOCATIONS)
super().__init__(*args, **kwargs)
self._set_color_choices()
def _set_color_choices(self):
from glue.config import settings
self.frame_color = settings.BACKGROUND_COLOR
self.text_color = settings.FOREGROUND_COLOR
@property
def edge_color(self):
if self.show_edge:
return to_rgba(self.text_color, self.alpha)
else:
return None
@property
def draggable(self):
return self.location == 'draggable'
@property
def mpl_location(self):
if self.location == 'draggable':
return 'best'
else:
return self.location
def update_axes_settings_from(self, state):
self.visible = state.show_legend
self.loc_and_drag = state.loc_and_drag
self.alpha = state.alpha
self.title = state.title
self.fontsize = state.fontsize
self.frame_color = state.frame_color
self.show_edge = state.show_edge
self.text_color = state.text_color
class MatplotlibDataViewerState(ViewerState):
x_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible x range')
x_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible x range')
y_min = DeferredDrawCallbackProperty(docstring='Lower limit of the visible y range')
y_max = DeferredDrawCallbackProperty(docstring='Upper limit of the visible y range')
x_log = DeferredDrawCallbackProperty(False, docstring='Whether the x axis is logarithmic')
y_log = DeferredDrawCallbackProperty(False, docstring='Whether the y axis is logarithmic')
aspect = DeferredDrawCallbackProperty('auto', docstring='Aspect ratio for the axes')
show_axes = DeferredDrawCallbackProperty(True, docstring='Whether the axes are shown')
x_axislabel = DeferredDrawCallbackProperty('', docstring='Label for the x-axis')
y_axislabel = DeferredDrawCallbackProperty('', docstring='Label for the y-axis')
x_axislabel_size = DeferredDrawCallbackProperty(10, docstring='Size of the x-axis label')
y_axislabel_size = DeferredDrawCallbackProperty(10, docstring='Size of the y-axis label')
x_axislabel_weight = DeferredDrawSelectionCallbackProperty(1, docstring='Weight of the x-axis label')
y_axislabel_weight = DeferredDrawSelectionCallbackProperty(1, docstring='Weight of the y-axis label')
x_ticklabel_size = DeferredDrawCallbackProperty(8, docstring='Size of the x-axis tick labels')
y_ticklabel_size = DeferredDrawCallbackProperty(8, docstring='Size of the y-axis tick labels')
def __init__(self, *args, **kwargs):
self._axes_aspect_ratio = None
MatplotlibDataViewerState.x_axislabel_weight.set_choices(self, VALID_WEIGHTS)
MatplotlibDataViewerState.y_axislabel_weight.set_choices(self, VALID_WEIGHTS)
super(MatplotlibDataViewerState, self).__init__(*args, **kwargs)
self.legend = MatplotlibLegendState(*args, **kwargs)
self.add_callback('aspect', self._adjust_limits_aspect, priority=10000)
self.add_callback('x_min', self._adjust_limits_aspect_x, priority=10000)
self.add_callback('x_max', self._adjust_limits_aspect_x, priority=10000)
self.add_callback('y_min', self._adjust_limits_aspect_y, priority=10000)
self.add_callback('y_max', self._adjust_limits_aspect_y, priority=10000)
def _set_axes_aspect_ratio(self, value):
self._axes_aspect_ratio = value
self._adjust_limits_aspect(aspect_adjustable='both')
def _adjust_limits_aspect_x(self, *args):
self._adjust_limits_aspect(aspect_adjustable='y')
def _adjust_limits_aspect_y(self, *args):
self._adjust_limits_aspect(aspect_adjustable='x')
@avoid_circular
def _adjust_limits_aspect(self, *args, **kwargs):
if self.aspect == 'auto' or self._axes_aspect_ratio is None:
return
if self.x_min is None or self.x_max is None or self.y_min is None or self.y_max is None:
return
aspect_adjustable = kwargs.pop('aspect_adjustable', 'auto')
changed = None
axes_ratio = self._axes_aspect_ratio
x_min, x_max = self.x_min, self.x_max
y_min, y_max = self.y_min, self.y_max
data_ratio = abs(y_max - y_min) / abs(x_max - x_min)
if abs(data_ratio - axes_ratio) / (0.5 * (data_ratio + axes_ratio)) > 0.01:
if aspect_adjustable == 'both':
x_mid = 0.5 * (x_min + x_max)
x_width = abs(x_max - x_min) * (data_ratio / axes_ratio) ** 0.5
y_mid = 0.5 * (y_min + y_max)
y_width = abs(y_max - y_min) / (data_ratio / axes_ratio) ** 0.5
x_min = x_mid - x_width / 2.
x_max = x_mid + x_width / 2.
y_min = y_mid - y_width / 2.
y_max = y_mid + y_width / 2.
elif (aspect_adjustable == 'auto' and data_ratio > axes_ratio) or aspect_adjustable == 'x':
x_mid = 0.5 * (x_min + x_max)
x_width = abs(y_max - y_min) / axes_ratio
x_min = x_mid - x_width / 2.
x_max = x_mid + x_width / 2.
else:
y_mid = 0.5 * (y_min + y_max)
y_width = abs(x_max - x_min) * axes_ratio
y_min = y_mid - y_width / 2.
y_max = y_mid + y_width / 2.
with delay_callback(self, 'x_min', 'x_max', 'y_min', 'y_max'):
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
def update_axes_settings_from(self, state):
self.x_axislabel_size = state.x_axislabel_size
self.y_axislabel_size = state.y_axislabel_size
self.x_axislabel_weight = state.x_axislabel_weight
self.y_axislabel_weight = state.y_axislabel_weight
self.x_ticklabel_size = state.x_ticklabel_size
self.y_ticklabel_size = state.y_ticklabel_size
self.legend.update_axes_settings_from(state.legend)
@defer_draw
def _notify_global(self, *args, **kwargs):
super(MatplotlibDataViewerState, self)._notify_global(*args, **kwargs)
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith('_log'):
return 0.5
elif name.endswith(('_min', '_max')):
return 0
else:
return 1
class MatplotlibLayerState(LayerState):
color = DeferredDrawCallbackProperty(docstring='The color used to display '
'the data')
alpha = DeferredDrawCallbackProperty(docstring='The transparency used to '
'display the data')
def __init__(self, viewer_state=None, **kwargs):
super(MatplotlibLayerState, self).__init__(viewer_state=viewer_state, **kwargs)
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
self.add_global_callback(self._notify_layer_update)
def _notify_layer_update(self, **kwargs):
message = LayerArtistUpdatedMessage(self)
if self.layer is not None and self.layer.hub is not None:
self.layer.hub.broadcast(message)
@defer_draw
def _notify_global(self, *args, **kwargs):
super(MatplotlibLayerState, self)._notify_global(*args, **kwargs)
| true
| true
|
7905e573e10479646c055b28a8389b6a9e6ef922
| 15,766
|
py
|
Python
|
train_180131_2.py
|
OsciiArt/Cookpad
|
b2245f84db0650d6282c97c98600de825c6ed6e0
|
[
"MIT"
] | null | null | null |
train_180131_2.py
|
OsciiArt/Cookpad
|
b2245f84db0650d6282c97c98600de825c6ed6e0
|
[
"MIT"
] | null | null | null |
train_180131_2.py
|
OsciiArt/Cookpad
|
b2245f84db0650d6282c97c98600de825c6ed6e0
|
[
"MIT"
] | null | null | null |
import numpy as np # linear algebra
np.random.seed(42)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os, glob
import cv2
# parameters
format = "%H%M"
ts = time.strftime(format)
base_name = os.path.splitext(__file__)[0] + "_ts" + ts
input_size = 128
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, GaussianNoise
from keras.layers import GlobalMaxPooling2D, Reshape, UpSampling3D, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras import backend as K
def get_callbacks(save_path, lr=0.001, patience=64):
csv_logger = CSVLogger(save_path + '_log.csv', append=True)
# check_path = save_path + '_e{epoch:02d}_vl{val_loss:.5f}.hdf5'
check_path = save_path
save_checkpoint = ModelCheckpoint(filepath=check_path, monitor='val_loss', save_best_only=True)
lerning_rate_schedular = ReduceLROnPlateau(patience=8, min_lr=lr * 0.00001)
early_stopping = EarlyStopping(monitor='val_loss',
patience=16,
verbose=1,
min_delta=1e-4,
mode='min')
Callbacks = [csv_logger,
save_checkpoint,
# lerning_rate_schedular,
early_stopping
]
return Callbacks
def swish(x):
return x * K.sigmoid(x)
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
def get_model(num_class):
base_model = VGG16(weights='imagenet', include_top=False,
input_shape=[input_size,input_size,3], classes=1)
x = base_model.get_layer('block5_pool').output
x = GlobalMaxPooling2D()(x)
x = Dense(512, activation='relu', name='fc2')(x)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu', name='fc3')(x)
x = Dropout(0.3)(x)
predictions = Dense(num_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
return model
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image) # sikisou, saido, meido
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1]) # degree
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False):
def eraser(input_img):
img_h, img_w, img_c = input_img.shape
p_1 = np.random.rand()
if p_1 > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w, :] = c
return input_img
return eraser
from multiprocessing import Pool
def load_img(args):
img_path = args
img = cv2.imread(img_path)
# print("img shape", img.shape)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-5, 5),
sat_shift_limit=(-1, 1),
val_shift_limit=(-2, 2),
u=0.5)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.5),
rotate_limit=(-30, 30),
aspect_limit=(-0.2, 0.2),
u=0.5)
img = randomHorizontalFlip(img)
img = randomVerticalFlip(img)
return img
def train_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
pool = Pool()
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img,
[img_dir + '/{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield (batch_x, batch_y)
def get_mixer(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, mask1, mask2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, mask1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
mask1[top:top + h, left:left + w, :] = mask2[top:top + h, left:left + w, :]
return img1, mask1
return mixer
def mix_generator(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 0.2
gen1 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
while True:
batch1 = next(gen1)
batch2 = next(gen2)
current_batch_size = batch1[0].shape[0]
l = np.random.beta(alpha, alpha, current_batch_size)
X_l = l.reshape(current_batch_size, 1, 1, 1)
Y_l = l.reshape(current_batch_size, 1)
batch_x = batch1[0] * X_l + batch2[0] * (1 - X_l)
batch_y = batch1[1] * Y_l + batch2[1] * (1 - Y_l)
yield (batch_x, batch_y)
def test_generator(x_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_x = []
batch_id = index_array[current_index: current_index + current_batch_size]
# print(batch_x_base)
for id in batch_id:
# print(x_train[0])
# print(x_train[id])
# print(img_dir + '/{}'.format(x_train[id]))
img = cv2.imread(img_dir + '/{}'.format(x_train[id]))
# print("img shape", img.shape)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-5, 5),
sat_shift_limit=(-1, 1),
val_shift_limit=(-2, 2),
u=0.5)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.2),
rotate_limit=(-30, 30),
aspect_limit = (-0.2, 0.2),
u=0.5)
img = randomHorizontalFlip(img)
# img =eraser(img)
batch_x.append(img)
batch_x = np.array(batch_x, np.float32) / 255
# batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield batch_x
def load_data(train_path="input/train_master.tsv", test_path="input/sample_submit.tsv"):
train = pd.read_csv(train_path, delimiter="\t", index_col=False)
test = pd.read_csv(test_path, delimiter="\t", index_col=False, header=None)
print("train shape", train.shape)
print(train.head())
X_train = train['file_name'].as_matrix()
y_train = train['category_id'].as_matrix()
# y_train = np.eye(55)[y_train]
# print(y_train[:5])
# print(y_train.shape)
X_test = test.iloc[:,0]
return X_train, y_train, X_test
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import log_loss
def train(epochs, seed):
# parameter
batch_size = 128
num_class = 55
save_path = base_name + "_seed" + str(seed)
model_path = "_"
# Load data
X_train, y_train, X_test = load_data()
# CV
ids_train_split, ids_valid_split = train_test_split(np.arange(X_train.shape[0]),
random_state=42, test_size=0.05,
stratify=y_train)
# data process
X_train_cv = X_train[ids_train_split]
y_train_cv = y_train[ids_train_split]
X_holdout = X_train[ids_valid_split]
Y_holdout = y_train[ids_valid_split]
# print(X_train_cv.head())
# define file path and get callbacks
weight_path = "model/" + save_path + '.hdf5'
callbacks = get_callbacks(weight_path, patience=16)
gen = mix_generator(X_train_cv, y_train_cv, "input/train", batch_size)
gen_val = train_generator(X_holdout, Y_holdout, "input/train", batch_size, shuffle=False)
gen_val_pred = test_generator(X_holdout, "input/train", batch_size, shuffle=False)
gen_tst_pred = test_generator(X_test, "input/test", batch_size, shuffle=False)
model = get_model(num_class)
model.fit_generator(generator=gen,
steps_per_epoch=np.ceil(X_train_cv.shape[0] / batch_size),
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=gen_val,
validation_steps=np.ceil(X_holdout.shape[0] / batch_size),
)
# Getting the Best Model
model.load_weights(filepath=weight_path)
# Getting Training Score
# score = model.evaluate_generator(generator=gen_trn_eval,
# steps=np.ceil(X_train.shape[0]/batch_size))
# print('Train loss:', score[0])
# print('Train accuracy:', score[1])
# Getting Valid Score
score = model.evaluate_generator(generator=gen_val,
steps=np.ceil(X_holdout.shape[0]/batch_size))
print('Valid loss:', score[0])
print('Valid accuracy:', score[1])
# Getting validation prediction
pred_valid = model.predict_generator(generator=gen_val_pred,
steps=np.ceil(X_holdout.shape[0]/batch_size))
# Getting Test prediction
pred_test = model.predict_generator(generator=gen_tst_pred,
steps=np.ceil(X_test.shape[0]/batch_size))
submission = pd.DataFrame({'id': X_test, 'predict': np.argmax(pred_test, axis=1)})
submit_path = "output/submission" + save_path + "_val_loss" + str(score[0]) + "_val_acc" + str(score[1]) + ".tsv"
submission.to_csv(submit_path, index=False, header=False, sep='\t')
np.save("input/" + base_name + "_valid.npy", pred_valid)
np.save("input/" + base_name + "_test.npy", pred_test)
def main():
train(epochs=250, seed=0)
if __name__ == "__main__": main()
| 35.669683
| 117
| 0.571356
|
import numpy as np
np.random.seed(42)
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os, glob
import cv2
format = "%H%M"
ts = time.strftime(format)
base_name = os.path.splitext(__file__)[0] + "_ts" + ts
input_size = 128
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, GaussianNoise
from keras.layers import GlobalMaxPooling2D, Reshape, UpSampling3D, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras import backend as K
def get_callbacks(save_path, lr=0.001, patience=64):
csv_logger = CSVLogger(save_path + '_log.csv', append=True)
check_path = save_path
save_checkpoint = ModelCheckpoint(filepath=check_path, monitor='val_loss', save_best_only=True)
lerning_rate_schedular = ReduceLROnPlateau(patience=8, min_lr=lr * 0.00001)
early_stopping = EarlyStopping(monitor='val_loss',
patience=16,
verbose=1,
min_delta=1e-4,
mode='min')
Callbacks = [csv_logger,
save_checkpoint,
early_stopping
]
return Callbacks
def swish(x):
return x * K.sigmoid(x)
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
def get_model(num_class):
base_model = VGG16(weights='imagenet', include_top=False,
input_shape=[input_size,input_size,3], classes=1)
x = base_model.get_layer('block5_pool').output
x = GlobalMaxPooling2D()(x)
x = Dense(512, activation='relu', name='fc2')(x)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu', name='fc3')(x)
x = Dropout(0.3)(x)
predictions = Dense(num_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
return model
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1])
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False):
def eraser(input_img):
img_h, img_w, img_c = input_img.shape
p_1 = np.random.rand()
if p_1 > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w, :] = c
return input_img
return eraser
from multiprocessing import Pool
def load_img(args):
img_path = args
img = cv2.imread(img_path)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-5, 5),
sat_shift_limit=(-1, 1),
val_shift_limit=(-2, 2),
u=0.5)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.5),
rotate_limit=(-30, 30),
aspect_limit=(-0.2, 0.2),
u=0.5)
img = randomHorizontalFlip(img)
img = randomVerticalFlip(img)
return img
def train_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
eraser = get_random_eraser(v_h=0.)
pool = Pool()
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img,
[img_dir + '/{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
img =eraser(img)
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
yield (batch_x, batch_y)
def get_mixer(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, mask1, mask2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, mask1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
mask1[top:top + h, left:left + w, :] = mask2[top:top + h, left:left + w, :]
return img1, mask1
return mixer
def mix_generator(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 0.2
gen1 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
while True:
batch1 = next(gen1)
batch2 = next(gen2)
current_batch_size = batch1[0].shape[0]
l = np.random.beta(alpha, alpha, current_batch_size)
X_l = l.reshape(current_batch_size, 1, 1, 1)
Y_l = l.reshape(current_batch_size, 1)
batch_x = batch1[0] * X_l + batch2[0] * (1 - X_l)
batch_y = batch1[1] * Y_l + batch2[1] * (1 - Y_l)
yield (batch_x, batch_y)
def test_generator(x_train, img_dir, batch_size, shuffle=True):
batch_index = 0
n = x_train.shape[0]
eraser = get_random_eraser(v_h=0.)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_x = []
batch_id = index_array[current_index: current_index + current_batch_size]
for id in batch_id:
img = cv2.imread(img_dir + '/{}'.format(x_train[id]))
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-5, 5),
sat_shift_limit=(-1, 1),
val_shift_limit=(-2, 2),
u=0.5)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.2),
rotate_limit=(-30, 30),
aspect_limit = (-0.2, 0.2),
u=0.5)
img = randomHorizontalFlip(img)
batch_x.append(img)
batch_x = np.array(batch_x, np.float32) / 255
yield batch_x
def load_data(train_path="input/train_master.tsv", test_path="input/sample_submit.tsv"):
train = pd.read_csv(train_path, delimiter="\t", index_col=False)
test = pd.read_csv(test_path, delimiter="\t", index_col=False, header=None)
print("train shape", train.shape)
print(train.head())
X_train = train['file_name'].as_matrix()
y_train = train['category_id'].as_matrix()
X_test = test.iloc[:,0]
return X_train, y_train, X_test
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import log_loss
def train(epochs, seed):
batch_size = 128
num_class = 55
save_path = base_name + "_seed" + str(seed)
model_path = "_"
X_train, y_train, X_test = load_data()
ids_train_split, ids_valid_split = train_test_split(np.arange(X_train.shape[0]),
random_state=42, test_size=0.05,
stratify=y_train)
X_train_cv = X_train[ids_train_split]
y_train_cv = y_train[ids_train_split]
X_holdout = X_train[ids_valid_split]
Y_holdout = y_train[ids_valid_split]
weight_path = "model/" + save_path + '.hdf5'
callbacks = get_callbacks(weight_path, patience=16)
gen = mix_generator(X_train_cv, y_train_cv, "input/train", batch_size)
gen_val = train_generator(X_holdout, Y_holdout, "input/train", batch_size, shuffle=False)
gen_val_pred = test_generator(X_holdout, "input/train", batch_size, shuffle=False)
gen_tst_pred = test_generator(X_test, "input/test", batch_size, shuffle=False)
model = get_model(num_class)
model.fit_generator(generator=gen,
steps_per_epoch=np.ceil(X_train_cv.shape[0] / batch_size),
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=gen_val,
validation_steps=np.ceil(X_holdout.shape[0] / batch_size),
)
model.load_weights(filepath=weight_path)
score = model.evaluate_generator(generator=gen_val,
steps=np.ceil(X_holdout.shape[0]/batch_size))
print('Valid loss:', score[0])
print('Valid accuracy:', score[1])
pred_valid = model.predict_generator(generator=gen_val_pred,
steps=np.ceil(X_holdout.shape[0]/batch_size))
pred_test = model.predict_generator(generator=gen_tst_pred,
steps=np.ceil(X_test.shape[0]/batch_size))
submission = pd.DataFrame({'id': X_test, 'predict': np.argmax(pred_test, axis=1)})
submit_path = "output/submission" + save_path + "_val_loss" + str(score[0]) + "_val_acc" + str(score[1]) + ".tsv"
submission.to_csv(submit_path, index=False, header=False, sep='\t')
np.save("input/" + base_name + "_valid.npy", pred_valid)
np.save("input/" + base_name + "_test.npy", pred_test)
def main():
train(epochs=250, seed=0)
if __name__ == "__main__": main()
| true
| true
|
7905e5c9e9b982c826e59096d4908cf7e176b040
| 1,208
|
py
|
Python
|
test/functional/rpc_named_arguments.py
|
orobio/gulden-official
|
a329faf163b15eabc7ff1d9f07ea87f66df8d27d
|
[
"MIT"
] | 158
|
2016-01-08T10:38:37.000Z
|
2022-02-01T06:28:05.000Z
|
test/functional/rpc_named_arguments.py
|
orobio/gulden-official
|
a329faf163b15eabc7ff1d9f07ea87f66df8d27d
|
[
"MIT"
] | 196
|
2015-11-19T10:59:24.000Z
|
2021-10-07T14:52:13.000Z
|
test/functional/rpc_named_arguments.py
|
orobio/gulden-official
|
a329faf163b15eabc7ff1d9f07ea87f66df8d27d
|
[
"MIT"
] | 71
|
2016-06-25T23:29:04.000Z
|
2022-03-14T10:57:19.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert h.startswith('getblockchaininfo\n')
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| 34.514286
| 101
| 0.679636
|
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert h.startswith('getblockchaininfo\n')
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| true
| true
|
7905e60159629ca5c376e64caf6d3c85fa260c4a
| 1,016
|
py
|
Python
|
src/schemathesis/cli/output/short.py
|
chr1st1ank/schemathesis
|
f2e160d56c1fdce9eac7fee5875b209c8944f54a
|
[
"MIT"
] | 1
|
2021-06-22T20:01:24.000Z
|
2021-06-22T20:01:24.000Z
|
src/schemathesis/cli/output/short.py
|
RonnyPfannschmidt/schemathesis
|
3542d91d2e7402235e7b2dc995ed7017a0265ff6
|
[
"MIT"
] | null | null | null |
src/schemathesis/cli/output/short.py
|
RonnyPfannschmidt/schemathesis
|
3542d91d2e7402235e7b2dc995ed7017a0265ff6
|
[
"MIT"
] | null | null | null |
import click
from ...runner import events
from . import default
def handle_after_execution(context: events.ExecutionContext, event: events.AfterExecution) -> None:
context.endpoints_processed += 1
default.display_execution_result(context, event)
if context.endpoints_processed == event.schema.endpoints_count:
click.echo()
def handle_event(context: events.ExecutionContext, event: events.ExecutionEvent) -> None:
"""Short output style shows single symbols in the progress bar.
Otherwise, identical to the default output style.
"""
if isinstance(event, events.Initialized):
default.handle_initialized(context, event)
if isinstance(event, events.AfterExecution):
context.hypothesis_output.extend(event.hypothesis_output)
handle_after_execution(context, event)
if isinstance(event, events.Finished):
default.handle_finished(context, event)
if isinstance(event, events.Interrupted):
default.handle_interrupted(context, event)
| 36.285714
| 99
| 0.748031
|
import click
from ...runner import events
from . import default
def handle_after_execution(context: events.ExecutionContext, event: events.AfterExecution) -> None:
context.endpoints_processed += 1
default.display_execution_result(context, event)
if context.endpoints_processed == event.schema.endpoints_count:
click.echo()
def handle_event(context: events.ExecutionContext, event: events.ExecutionEvent) -> None:
if isinstance(event, events.Initialized):
default.handle_initialized(context, event)
if isinstance(event, events.AfterExecution):
context.hypothesis_output.extend(event.hypothesis_output)
handle_after_execution(context, event)
if isinstance(event, events.Finished):
default.handle_finished(context, event)
if isinstance(event, events.Interrupted):
default.handle_interrupted(context, event)
| true
| true
|
7905e71fcd2fade30f66daa8e3f4a6a410a2ba76
| 33,141
|
py
|
Python
|
python/ccxt/async_support/livecoin.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 3
|
2020-06-02T10:48:48.000Z
|
2022-03-12T20:46:01.000Z
|
python/ccxt/async_support/livecoin.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 3
|
2020-09-08T00:13:39.000Z
|
2021-05-08T20:05:48.000Z
|
python/ccxt/async_support/livecoin.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 1
|
2020-03-16T03:22:17.000Z
|
2020-03-16T03:22:17.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class livecoin(Exchange):
def describe(self):
return self.deep_extend(super(livecoin, self).describe(), {
'id': 'livecoin',
'name': 'LiveCoin',
'countries': ['US', 'UK', 'RU'],
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'has': {
'fetchDepositAddress': True,
'fetchDeposits': True,
'CORS': False,
'fetchTickers': True,
'fetchCurrencies': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980768-f22fc424-638a-11e7-89c9-6010a54ff9be.jpg',
'api': 'https://api.livecoin.net',
'www': 'https://www.livecoin.net',
'doc': 'https://www.livecoin.net/api?lang=en',
'referral': 'https://livecoin.net/?from=Livecoin-CQ1hfx44',
},
'api': {
'public': {
'get': [
'exchange/all/order_book',
'exchange/last_trades',
'exchange/maxbid_minask',
'exchange/order_book',
'exchange/restrictions',
'exchange/ticker', # omit params to get all tickers at once
'info/coinInfo',
],
},
'private': {
'get': [
'exchange/client_orders',
'exchange/order',
'exchange/trades',
'exchange/commission',
'exchange/commissionCommonInfo',
'payment/balances',
'payment/balance',
'payment/get/address',
'payment/history/size',
'payment/history/transactions',
],
'post': [
'exchange/buylimit',
'exchange/buymarket',
'exchange/cancellimit',
'exchange/selllimit',
'exchange/sellmarket',
'payment/out/capitalist',
'payment/out/card',
'payment/out/coin',
'payment/out/okpay',
'payment/out/payeer',
'payment/out/perfectmoney',
'payment/voucher/amount',
'payment/voucher/make',
'payment/voucher/redeem',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.18 / 100,
'taker': 0.18 / 100,
},
},
'commonCurrencies': {
'BTCH': 'Bithash',
'CPC': 'Capricoin',
'CPT': 'Cryptos', # conflict with CPT = Contents Protocol https://github.com/ccxt/ccxt/issues/4920 and https://github.com/ccxt/ccxt/issues/6081
'EDR': 'E-Dinar Coin', # conflicts with EDR for Endor Protocol and EDRCoin
'eETT': 'EETT',
'FirstBlood': '1ST',
'FORTYTWO': '42',
'LEO': 'LeoCoin',
'ORE': 'Orectic',
'PLN': 'Plutaneum', # conflict with Polish Zloty
'RUR': 'RUB',
'SCT': 'SpaceCoin',
'TPI': 'ThaneCoin',
'wETT': 'WETT',
'XBT': 'Bricktox',
},
'exceptions': {
'exact': {
'1': ExchangeError,
'10': AuthenticationError,
'100': ExchangeError, # invalid parameters
'101': AuthenticationError,
'102': AuthenticationError,
'103': InvalidOrder, # invalid currency
'104': InvalidOrder, # invalid amount
'105': InvalidOrder, # unable to block funds
'11': AuthenticationError,
'12': AuthenticationError,
'2': AuthenticationError, # "User not found"
'20': AuthenticationError,
'30': AuthenticationError,
'31': NotSupported,
'32': ExchangeError,
'429': DDoSProtection,
'503': ExchangeNotAvailable,
},
'broad': {
'insufficient funds': InsufficientFunds, # https://github.com/ccxt/ccxt/issues/5749
'NOT FOUND': OrderNotFound,
'Cannot find order': OrderNotFound,
'Minimal amount is': InvalidOrder,
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetExchangeTicker(params)
restrictions = await self.publicGetExchangeRestrictions()
restrictionsById = self.index_by(restrictions['restrictions'], 'currencyPair')
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId, quoteId = id.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
coinRestrictions = self.safe_value(restrictionsById, symbol)
precision = {
'price': 5,
'amount': 8,
'cost': 8,
}
limits = {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
}
if coinRestrictions:
precision['price'] = self.safe_integer(coinRestrictions, 'priceScale', 5)
limits['amount']['min'] = self.safe_float(coinRestrictions, 'minLimitQuantity', limits['amount']['min'])
limits['price'] = {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetInfoCoinInfo(params)
currencies = self.safe_value(response, 'info')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.safe_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
walletStatus = self.safe_string(currency, 'walletStatus')
active = (walletStatus == 'normal')
name = self.safe_string(currency, 'name')
fee = self.safe_float(currency, 'withdrawFee')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(currency, 'minOrderAmount'),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': self.safe_float(currency, 'minOrderAmount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(currency, 'minWithdrawAmount'),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_float(currency, 'minDepositAmount'),
'max': None,
},
},
}
result = self.append_fiat_currencies(result)
return result
def append_fiat_currencies(self, result):
precision = 8
defaults = {
'info': None,
'active': True,
'fee': None,
'precision': precision,
'limits': {
'withdraw': {'min': None, 'max': None},
'deposit': {'min': None, 'max': None},
'amount': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
},
}
currencies = [
{'id': 'USD', 'code': 'USD', 'name': 'US Dollar'},
{'id': 'EUR', 'code': 'EUR', 'name': 'Euro'},
# {'id': 'RUR', 'code': 'RUB', 'name': 'Russian ruble'},
]
currencies.append({
'id': 'RUR',
'code': self.safe_currency_code('RUR'),
'name': 'Russian ruble',
})
for i in range(0, len(currencies)):
currency = currencies[i]
code = currency['code']
result[code] = self.extend(defaults, currency)
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetPaymentBalances(params)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'total':
account['total'] = self.safe_float(balance, 'value')
if balance['type'] == 'available':
account['free'] = self.safe_float(balance, 'value')
if balance['type'] == 'trade':
account['used'] = self.safe_float(balance, 'value')
result[code] = account
return self.parse_balance(result)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateGetExchangeCommissionCommonInfo(params)
commission = self.safe_float(response, 'commission')
return {
'info': response,
'maker': commission,
'taker': commission,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
'groupByPrice': 'false',
}
if limit is not None:
request['depth'] = limit # 100
response = await self.publicGetExchangeOrderBook(self.extend(request, params))
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
vwap = self.safe_float(ticker, 'vwap')
baseVolume = self.safe_float(ticker, 'volume')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetExchangeTicker(params)
tickers = self.index_by(response, 'symbol')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
ticker = await self.publicGetExchangeTicker(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "time": 1409935047,
# "id": 99451,
# "price": 350,
# "quantity": 2.85714285,
# "type": "BUY"
# }
#
# fetchMyTrades(private)
#
# {
# "datetime": 1435844369,
# "id": 30651619,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.1,
# "commission": 0,
# "clientorderid": 1472837650
# }
timestamp = self.safe_timestamp_2(trade, 'time', 'datetime')
fee = None
feeCost = self.safe_float(trade, 'commission')
if feeCost is not None:
feeCurrency = market['quote'] if market else None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'clientorderid')
id = self.safe_string(trade, 'id')
side = self.safe_string_lower(trade, 'type')
amount = self.safe_float(trade, 'quantity')
price = self.safe_float(trade, 'price')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
# orderDesc': 'true', # or 'false', if True then new orders will be first, otherwise old orders will be first.
# 'offset': 0, # page offset, position of the first item on the page
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetExchangeTrades(self.extend(request, params))
#
# [
# {
# "datetime": 1435844369,
# "id": 30651619,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.1,
# "commission": 0,
# "clientorderid": 1472837650
# },
# {
# "datetime": 1435844356,
# "id": 30651618,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.2,
# "commission": 0.092,
# "clientorderid": 1472837651
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
response = await self.publicGetExchangeLastTrades(self.extend(request, params))
#
# [
# {
# "time": 1409935047,
# "id": 99451,
# "price": 350,
# "quantity": 2.85714285,
# "type": "BUY"
# },
# {
# "time": 1409934792,
# "id": 99450,
# "price": 350,
# "quantity": 0.57142857,
# "type": "SELL"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderId': id,
}
response = await self.privateGetExchangeOrder(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'OPEN': 'open',
'PARTIALLY_FILLED': 'open',
'EXECUTED': 'closed',
'CANCELLED': 'canceled',
'PARTIALLY_FILLED_AND_CANCELLED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = None
if 'lastModificationTime' in order:
timestamp = self.safe_string(order, 'lastModificationTime')
if timestamp is not None:
if timestamp.find('T') >= 0:
timestamp = self.parse8601(timestamp)
else:
timestamp = self.safe_integer(order, 'lastModificationTime')
# TODO currently not supported by livecoin
# trades = self.parse_trades(order['trades'], market, since, limit)
trades = None
status = self.parse_order_status(self.safe_string_2(order, 'status', 'orderStatus'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'currencyPair')
marketId = self.safe_string(order, 'symbol', marketId)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
type = self.safe_string_lower(order, 'type')
side = None
if type is not None:
orderType = type.split('_')
type = orderType[0]
side = orderType[1]
price = self.safe_float(order, 'price')
# of the next two lines the latter overrides the former, if present in the order structure
remaining = self.safe_float(order, 'remainingQuantity')
remaining = self.safe_float(order, 'remaining_quantity', remaining)
amount = self.safe_float(order, 'quantity', remaining)
filled = None
if remaining is not None:
filled = amount - remaining
cost = None
if filled is not None and price is not None:
cost = filled * price
feeRate = self.safe_float(order, 'commission_rate')
feeCost = None
if cost is not None and feeRate is not None:
feeCost = cost * feeRate
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
return {
'info': order,
'id': order['id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
},
}
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['currencyPair'] = market['id']
if since is not None:
request['issuedFrom'] = int(since)
if limit is not None:
request['endRow'] = limit - 1
response = await self.privateGetExchangeClientOrders(self.extend(request, params))
result = []
rawOrders = []
if response['data']:
rawOrders = response['data']
for i in range(0, len(rawOrders)):
order = rawOrders[i]
result.append(self.parse_order(order, market))
return self.sort_by(result, 'timestamp')
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openClosed': 'OPEN',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openClosed': 'CLOSED',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePostExchange' + self.capitalize(side) + type
market = self.market(symbol)
request = {
'quantity': self.amount_to_precision(symbol, amount),
'currencyPair': market['id'],
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await getattr(self, method)(self.extend(request, params))
result = {
'info': response,
'id': str(response['orderId']),
}
success = self.safe_value(response, 'success')
if success:
result['status'] = 'open'
return result
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'currencyPair': market['id'],
}
response = await self.privatePostExchangeCancellimit(self.extend(request, params))
message = self.safe_string(response, 'message', self.json(response))
if 'success' in response:
if not response['success']:
raise InvalidOrder(message)
elif 'cancelled' in response:
if response['cancelled']:
return {
'status': 'canceled',
'info': response,
}
else:
raise OrderNotFound(message)
raise ExchangeError(self.id + ' cancelOrder() failed: ' + self.json(response))
async def withdraw(self, code, amount, address, tag=None, params={}):
# Sometimes the response with be {key: null} for all keys.
# An example is if you attempt to withdraw more than is allowed when withdrawal fees are considered.
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
wallet = address
if tag is not None:
wallet += '::' + tag
request = {
'amount': self.decimal_to_precision(amount, TRUNCATE, currency['precision'], DECIMAL_PLACES),
'currency': currency['id'],
'wallet': wallet,
}
response = await self.privatePostPaymentOutCoin(self.extend(request, params))
id = self.safe_integer(response, 'id')
if id is None:
raise InsufficientFunds(self.id + ' insufficient funds to cover requested withdrawal amount post fees ' + self.json(response))
return {
'info': response,
'id': id,
}
def parse_transaction(self, transaction, currency=None):
# {
# "id": "c853093d5aa06df1c92d79c2...",(tx on deposits, address on withdrawals)
# "type": "DEPOSIT",
# "date": 1553186482676,
# "amount": 712.61266,
# "fee": 0,
# "fixedCurrency": "XVG",
# "taxCurrency": "XVG",
# "variableAmount": null,
# "variableCurrency": null,
# "external": "Coin",
# "login": "USERNAME",
# "externalKey": "....87diPBy......3hTtuwUT78Yi",(address on deposits, tx on withdrawals)
# "documentId": 1110662453
# },
txid = None
address = None
id = self.safe_string(transaction, 'documentId')
amount = self.safe_float(transaction, 'amount')
timestamp = self.safe_integer(transaction, 'date')
type = self.safe_string_lower(transaction, 'type')
currencyId = self.safe_string(transaction, 'fixedCurrency')
feeCost = self.safe_float(transaction, 'fee')
code = self.safe_currency_code(currencyId, currency)
if type == 'withdrawal':
txid = self.safe_string(transaction, 'externalKey')
address = self.safe_string(transaction, 'id')
elif type == 'deposit':
address = self.safe_string(transaction, 'externalKey')
txid = self.safe_string(transaction, 'id')
status = None
if type == 'deposit':
status = 'ok' # Deposits is not registered until they are in account. Withdrawals are left as None, not entirely sure about theyre status.
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
endtime = 2505600000 # 29 days - exchange has maximum 30 days.
now = self.milliseconds()
request = {
'types': 'DEPOSIT',
'end': now,
'start': int(since) if (since is not None) else now - endtime,
}
currency = None
if code is not None:
currency = self.currency(code)
if limit is not None:
request['limit'] = limit # default is 100
response = await self.privateGetPaymentHistoryTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
endtime = 2505600000 # 29 days - exchange has maximum 30 days.
now = self.milliseconds()
request = {
'types': 'WITHDRAWAL',
'end': now,
'start': int(since) if (since is not None) else now - endtime,
}
currency = None
if code is not None:
currency = self.currency(code)
if limit is not None:
request['limit'] = limit # default is 100
if since is not None:
request['start'] = since
response = await self.privateGetPaymentHistoryTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_deposit_address(self, currency, params={}):
request = {
'currency': currency,
}
response = await self.privateGetPaymentGetAddress(self.extend(request, params))
address = self.safe_string(response, 'wallet')
tag = None
if address.find(':') >= 0:
parts = address.split(':')
address = parts[0]
tag = parts[2]
self.check_address(address)
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
query = self.urlencode(self.keysort(params))
if method == 'GET':
if params:
url += '?' + query
if api == 'private':
self.check_required_credentials()
if method == 'POST':
body = query
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha256)
headers = {
'Api-Key': self.apiKey,
'Sign': signature.upper(),
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if code >= 300:
feedback = self.id + ' ' + body
exact = self.exceptions['exact']
errorCode = self.safe_string(response, 'errorCode')
if errorCode in exact:
raise exact[errorCode](feedback)
else:
raise ExchangeError(feedback)
# returns status code 200 even if success == False
success = self.safe_value(response, 'success', True)
if not success:
feedback = self.id + ' ' + body
broad = self.exceptions['broad']
message = self.safe_string_2(response, 'message', 'exception')
if message is not None:
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback)
| 39.406659
| 160
| 0.50264
|
rt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class livecoin(Exchange):
def describe(self):
return self.deep_extend(super(livecoin, self).describe(), {
'id': 'livecoin',
'name': 'LiveCoin',
'countries': ['US', 'UK', 'RU'],
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'has': {
'fetchDepositAddress': True,
'fetchDeposits': True,
'CORS': False,
'fetchTickers': True,
'fetchCurrencies': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980768-f22fc424-638a-11e7-89c9-6010a54ff9be.jpg',
'api': 'https://api.livecoin.net',
'www': 'https://www.livecoin.net',
'doc': 'https://www.livecoin.net/api?lang=en',
'referral': 'https://livecoin.net/?from=Livecoin-CQ1hfx44',
},
'api': {
'public': {
'get': [
'exchange/all/order_book',
'exchange/last_trades',
'exchange/maxbid_minask',
'exchange/order_book',
'exchange/restrictions',
'exchange/ticker',
'info/coinInfo',
],
},
'private': {
'get': [
'exchange/client_orders',
'exchange/order',
'exchange/trades',
'exchange/commission',
'exchange/commissionCommonInfo',
'payment/balances',
'payment/balance',
'payment/get/address',
'payment/history/size',
'payment/history/transactions',
],
'post': [
'exchange/buylimit',
'exchange/buymarket',
'exchange/cancellimit',
'exchange/selllimit',
'exchange/sellmarket',
'payment/out/capitalist',
'payment/out/card',
'payment/out/coin',
'payment/out/okpay',
'payment/out/payeer',
'payment/out/perfectmoney',
'payment/voucher/amount',
'payment/voucher/make',
'payment/voucher/redeem',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.18 / 100,
'taker': 0.18 / 100,
},
},
'commonCurrencies': {
'BTCH': 'Bithash',
'CPC': 'Capricoin',
'CPT': 'Cryptos',
'EDR': 'E-Dinar Coin',
'eETT': 'EETT',
'FirstBlood': '1ST',
'FORTYTWO': '42',
'LEO': 'LeoCoin',
'ORE': 'Orectic',
'PLN': 'Plutaneum',
'RUR': 'RUB',
'SCT': 'SpaceCoin',
'TPI': 'ThaneCoin',
'wETT': 'WETT',
'XBT': 'Bricktox',
},
'exceptions': {
'exact': {
'1': ExchangeError,
'10': AuthenticationError,
'100': ExchangeError,
'101': AuthenticationError,
'102': AuthenticationError,
'103': InvalidOrder,
'104': InvalidOrder,
'105': InvalidOrder,
'11': AuthenticationError,
'12': AuthenticationError,
'2': AuthenticationError,
'20': AuthenticationError,
'30': AuthenticationError,
'31': NotSupported,
'32': ExchangeError,
'429': DDoSProtection,
'503': ExchangeNotAvailable,
},
'broad': {
'insufficient funds': InsufficientFunds,
'NOT FOUND': OrderNotFound,
'Cannot find order': OrderNotFound,
'Minimal amount is': InvalidOrder,
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetExchangeTicker(params)
restrictions = await self.publicGetExchangeRestrictions()
restrictionsById = self.index_by(restrictions['restrictions'], 'currencyPair')
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
baseId, quoteId = id.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
coinRestrictions = self.safe_value(restrictionsById, symbol)
precision = {
'price': 5,
'amount': 8,
'cost': 8,
}
limits = {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
}
if coinRestrictions:
precision['price'] = self.safe_integer(coinRestrictions, 'priceScale', 5)
limits['amount']['min'] = self.safe_float(coinRestrictions, 'minLimitQuantity', limits['amount']['min'])
limits['price'] = {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetInfoCoinInfo(params)
currencies = self.safe_value(response, 'info')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
precision = 8
walletStatus = self.safe_string(currency, 'walletStatus')
active = (walletStatus == 'normal')
name = self.safe_string(currency, 'name')
fee = self.safe_float(currency, 'withdrawFee')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(currency, 'minOrderAmount'),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': self.safe_float(currency, 'minOrderAmount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(currency, 'minWithdrawAmount'),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_float(currency, 'minDepositAmount'),
'max': None,
},
},
}
result = self.append_fiat_currencies(result)
return result
def append_fiat_currencies(self, result):
precision = 8
defaults = {
'info': None,
'active': True,
'fee': None,
'precision': precision,
'limits': {
'withdraw': {'min': None, 'max': None},
'deposit': {'min': None, 'max': None},
'amount': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
},
}
currencies = [
{'id': 'USD', 'code': 'USD', 'name': 'US Dollar'},
{'id': 'EUR', 'code': 'EUR', 'name': 'Euro'},
]
currencies.append({
'id': 'RUR',
'code': self.safe_currency_code('RUR'),
'name': 'Russian ruble',
})
for i in range(0, len(currencies)):
currency = currencies[i]
code = currency['code']
result[code] = self.extend(defaults, currency)
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetPaymentBalances(params)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'total':
account['total'] = self.safe_float(balance, 'value')
if balance['type'] == 'available':
account['free'] = self.safe_float(balance, 'value')
if balance['type'] == 'trade':
account['used'] = self.safe_float(balance, 'value')
result[code] = account
return self.parse_balance(result)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateGetExchangeCommissionCommonInfo(params)
commission = self.safe_float(response, 'commission')
return {
'info': response,
'maker': commission,
'taker': commission,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
'groupByPrice': 'false',
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetExchangeOrderBook(self.extend(request, params))
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
vwap = self.safe_float(ticker, 'vwap')
baseVolume = self.safe_float(ticker, 'volume')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetExchangeTicker(params)
tickers = self.index_by(response, 'symbol')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
ticker = await self.publicGetExchangeTicker(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp_2(trade, 'time', 'datetime')
fee = None
feeCost = self.safe_float(trade, 'commission')
if feeCost is not None:
feeCurrency = market['quote'] if market else None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'clientorderid')
id = self.safe_string(trade, 'id')
side = self.safe_string_lower(trade, 'type')
amount = self.safe_float(trade, 'quantity')
price = self.safe_float(trade, 'price')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
# 'offset': 0, # page offset, position of the first item on the page
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetExchangeTrades(self.extend(request, params))
#
# [
# {
# "datetime": 1435844369,
# "id": 30651619,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.1,
# "commission": 0,
# "clientorderid": 1472837650
# },
# {
# "datetime": 1435844356,
# "id": 30651618,
# "type": "sell",
# "symbol": "BTC/EUR",
# "price": 230,
# "quantity": 0.2,
# "commission": 0.092,
# "clientorderid": 1472837651
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
response = await self.publicGetExchangeLastTrades(self.extend(request, params))
#
# [
# {
# "time": 1409935047,
# "id": 99451,
# "price": 350,
# "quantity": 2.85714285,
# "type": "BUY"
# },
# {
# "time": 1409934792,
# "id": 99450,
# "price": 350,
# "quantity": 0.57142857,
# "type": "SELL"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderId': id,
}
response = await self.privateGetExchangeOrder(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'OPEN': 'open',
'PARTIALLY_FILLED': 'open',
'EXECUTED': 'closed',
'CANCELLED': 'canceled',
'PARTIALLY_FILLED_AND_CANCELLED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = None
if 'lastModificationTime' in order:
timestamp = self.safe_string(order, 'lastModificationTime')
if timestamp is not None:
if timestamp.find('T') >= 0:
timestamp = self.parse8601(timestamp)
else:
timestamp = self.safe_integer(order, 'lastModificationTime')
# TODO currently not supported by livecoin
# trades = self.parse_trades(order['trades'], market, since, limit)
trades = None
status = self.parse_order_status(self.safe_string_2(order, 'status', 'orderStatus'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'currencyPair')
marketId = self.safe_string(order, 'symbol', marketId)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
type = self.safe_string_lower(order, 'type')
side = None
if type is not None:
orderType = type.split('_')
type = orderType[0]
side = orderType[1]
price = self.safe_float(order, 'price')
# of the next two lines the latter overrides the former, if present in the order structure
remaining = self.safe_float(order, 'remainingQuantity')
remaining = self.safe_float(order, 'remaining_quantity', remaining)
amount = self.safe_float(order, 'quantity', remaining)
filled = None
if remaining is not None:
filled = amount - remaining
cost = None
if filled is not None and price is not None:
cost = filled * price
feeRate = self.safe_float(order, 'commission_rate')
feeCost = None
if cost is not None and feeRate is not None:
feeCost = cost * feeRate
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
return {
'info': order,
'id': order['id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
},
}
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['currencyPair'] = market['id']
if since is not None:
request['issuedFrom'] = int(since)
if limit is not None:
request['endRow'] = limit - 1
response = await self.privateGetExchangeClientOrders(self.extend(request, params))
result = []
rawOrders = []
if response['data']:
rawOrders = response['data']
for i in range(0, len(rawOrders)):
order = rawOrders[i]
result.append(self.parse_order(order, market))
return self.sort_by(result, 'timestamp')
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openClosed': 'OPEN',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openClosed': 'CLOSED',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePostExchange' + self.capitalize(side) + type
market = self.market(symbol)
request = {
'quantity': self.amount_to_precision(symbol, amount),
'currencyPair': market['id'],
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await getattr(self, method)(self.extend(request, params))
result = {
'info': response,
'id': str(response['orderId']),
}
success = self.safe_value(response, 'success')
if success:
result['status'] = 'open'
return result
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'currencyPair': market['id'],
}
response = await self.privatePostExchangeCancellimit(self.extend(request, params))
message = self.safe_string(response, 'message', self.json(response))
if 'success' in response:
if not response['success']:
raise InvalidOrder(message)
elif 'cancelled' in response:
if response['cancelled']:
return {
'status': 'canceled',
'info': response,
}
else:
raise OrderNotFound(message)
raise ExchangeError(self.id + ' cancelOrder() failed: ' + self.json(response))
async def withdraw(self, code, amount, address, tag=None, params={}):
# Sometimes the response with be {key: null} for all keys.
# An example is if you attempt to withdraw more than is allowed when withdrawal fees are considered.
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
wallet = address
if tag is not None:
wallet += '::' + tag
request = {
'amount': self.decimal_to_precision(amount, TRUNCATE, currency['precision'], DECIMAL_PLACES),
'currency': currency['id'],
'wallet': wallet,
}
response = await self.privatePostPaymentOutCoin(self.extend(request, params))
id = self.safe_integer(response, 'id')
if id is None:
raise InsufficientFunds(self.id + ' insufficient funds to cover requested withdrawal amount post fees ' + self.json(response))
return {
'info': response,
'id': id,
}
def parse_transaction(self, transaction, currency=None):
# {
# "id": "c853093d5aa06df1c92d79c2...",(tx on deposits, address on withdrawals)
# "type": "DEPOSIT",
# "date": 1553186482676,
# "amount": 712.61266,
# "fee": 0,
# "fixedCurrency": "XVG",
# "taxCurrency": "XVG",
# "variableAmount": null,
# "variableCurrency": null,
# "external": "Coin",
# "login": "USERNAME",
# "externalKey": "....87diPBy......3hTtuwUT78Yi",(address on deposits, tx on withdrawals)
# "documentId": 1110662453
# },
txid = None
address = None
id = self.safe_string(transaction, 'documentId')
amount = self.safe_float(transaction, 'amount')
timestamp = self.safe_integer(transaction, 'date')
type = self.safe_string_lower(transaction, 'type')
currencyId = self.safe_string(transaction, 'fixedCurrency')
feeCost = self.safe_float(transaction, 'fee')
code = self.safe_currency_code(currencyId, currency)
if type == 'withdrawal':
txid = self.safe_string(transaction, 'externalKey')
address = self.safe_string(transaction, 'id')
elif type == 'deposit':
address = self.safe_string(transaction, 'externalKey')
txid = self.safe_string(transaction, 'id')
status = None
if type == 'deposit':
status = 'ok' # Deposits is not registered until they are in account. Withdrawals are left as None, not entirely sure about theyre status.
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
endtime = 2505600000 # 29 days - exchange has maximum 30 days.
now = self.milliseconds()
request = {
'types': 'DEPOSIT',
'end': now,
'start': int(since) if (since is not None) else now - endtime,
}
currency = None
if code is not None:
currency = self.currency(code)
if limit is not None:
request['limit'] = limit # default is 100
response = await self.privateGetPaymentHistoryTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
endtime = 2505600000 # 29 days - exchange has maximum 30 days.
now = self.milliseconds()
request = {
'types': 'WITHDRAWAL',
'end': now,
'start': int(since) if (since is not None) else now - endtime,
}
currency = None
if code is not None:
currency = self.currency(code)
if limit is not None:
request['limit'] = limit # default is 100
if since is not None:
request['start'] = since
response = await self.privateGetPaymentHistoryTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
async def fetch_deposit_address(self, currency, params={}):
request = {
'currency': currency,
}
response = await self.privateGetPaymentGetAddress(self.extend(request, params))
address = self.safe_string(response, 'wallet')
tag = None
if address.find(':') >= 0:
parts = address.split(':')
address = parts[0]
tag = parts[2]
self.check_address(address)
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
query = self.urlencode(self.keysort(params))
if method == 'GET':
if params:
url += '?' + query
if api == 'private':
self.check_required_credentials()
if method == 'POST':
body = query
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha256)
headers = {
'Api-Key': self.apiKey,
'Sign': signature.upper(),
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if code >= 300:
feedback = self.id + ' ' + body
exact = self.exceptions['exact']
errorCode = self.safe_string(response, 'errorCode')
if errorCode in exact:
raise exact[errorCode](feedback)
else:
raise ExchangeError(feedback)
# returns status code 200 even if success == False
success = self.safe_value(response, 'success', True)
if not success:
feedback = self.id + ' ' + body
broad = self.exceptions['broad']
message = self.safe_string_2(response, 'message', 'exception')
if message is not None:
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback)
| true
| true
|
7905e75924613ecbc9cd377ce8bfe419febd280c
| 8,372
|
py
|
Python
|
container_sdk/model/next_builder/storyboard_node_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
container_sdk/model/next_builder/storyboard_node_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
container_sdk/model/next_builder/storyboard_node_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: storyboard_node.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from container_sdk.model.next_builder import storyboard_brick_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2
from container_sdk.model.next_builder import storyboard_route_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2
from container_sdk.model.next_builder import micro_app_project_pb2 as container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='storyboard_node.proto',
package='next_builder',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builder'),
serialized_pb=_b('\n\x15storyboard_node.proto\x12\x0cnext_builder\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_brick.proto\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_route.proto\x1a\x38\x63ontainer_sdk/model/next_builder/micro_app_project.proto\"\xe8\x02\n\x0eStoryboardNode\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\r\n\x05\x61ppId\x18\x03 \x01(\t\x12\n\n\x02id\x18\x04 \x01(\t\x12\x12\n\nmountPoint\x18\x05 \x01(\t\x12\x0c\n\x04sort\x18\x06 \x01(\x05\x12\x0c\n\x04type\x18\x07 \x01(\t\x12,\n\x05\x62rick\x18\x08 \x01(\x0b\x32\x1d.next_builder.StoryboardBrick\x12,\n\x05route\x18\t \x01(\x0b\x32\x1d.next_builder.StoryboardRoute\x12.\n\x07project\x18\n \x01(\x0b\x32\x1d.next_builder.MicroAppProject\x12,\n\x06parent\x18\x0b \x01(\x0b\x32\x1c.next_builder.StoryboardNode\x12.\n\x08\x63hildren\x18\x0c \x03(\x0b\x32\x1c.next_builder.StoryboardNodeBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builderb\x06proto3')
,
dependencies=[container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2.DESCRIPTOR,])
_STORYBOARDNODE = _descriptor.Descriptor(
name='StoryboardNode',
full_name='next_builder.StoryboardNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='next_builder.StoryboardNode.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alias', full_name='next_builder.StoryboardNode.alias', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='next_builder.StoryboardNode.appId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='next_builder.StoryboardNode.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mountPoint', full_name='next_builder.StoryboardNode.mountPoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort', full_name='next_builder.StoryboardNode.sort', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='next_builder.StoryboardNode.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brick', full_name='next_builder.StoryboardNode.brick', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='next_builder.StoryboardNode.route', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='project', full_name='next_builder.StoryboardNode.project', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent', full_name='next_builder.StoryboardNode.parent', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children', full_name='next_builder.StoryboardNode.children', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=572,
)
_STORYBOARDNODE.fields_by_name['brick'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2._STORYBOARDBRICK
_STORYBOARDNODE.fields_by_name['route'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2._STORYBOARDROUTE
_STORYBOARDNODE.fields_by_name['project'].message_type = container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2._MICROAPPPROJECT
_STORYBOARDNODE.fields_by_name['parent'].message_type = _STORYBOARDNODE
_STORYBOARDNODE.fields_by_name['children'].message_type = _STORYBOARDNODE
DESCRIPTOR.message_types_by_name['StoryboardNode'] = _STORYBOARDNODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StoryboardNode = _reflection.GeneratedProtocolMessageType('StoryboardNode', (_message.Message,), {
'DESCRIPTOR' : _STORYBOARDNODE,
'__module__' : 'storyboard_node_pb2'
# @@protoc_insertion_point(class_scope:next_builder.StoryboardNode)
})
_sym_db.RegisterMessage(StoryboardNode)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 52.987342
| 992
| 0.774845
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from container_sdk.model.next_builder import storyboard_brick_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2
from container_sdk.model.next_builder import storyboard_route_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2
from container_sdk.model.next_builder import micro_app_project_pb2 as container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='storyboard_node.proto',
package='next_builder',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builder'),
serialized_pb=_b('\n\x15storyboard_node.proto\x12\x0cnext_builder\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_brick.proto\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_route.proto\x1a\x38\x63ontainer_sdk/model/next_builder/micro_app_project.proto\"\xe8\x02\n\x0eStoryboardNode\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\r\n\x05\x61ppId\x18\x03 \x01(\t\x12\n\n\x02id\x18\x04 \x01(\t\x12\x12\n\nmountPoint\x18\x05 \x01(\t\x12\x0c\n\x04sort\x18\x06 \x01(\x05\x12\x0c\n\x04type\x18\x07 \x01(\t\x12,\n\x05\x62rick\x18\x08 \x01(\x0b\x32\x1d.next_builder.StoryboardBrick\x12,\n\x05route\x18\t \x01(\x0b\x32\x1d.next_builder.StoryboardRoute\x12.\n\x07project\x18\n \x01(\x0b\x32\x1d.next_builder.MicroAppProject\x12,\n\x06parent\x18\x0b \x01(\x0b\x32\x1c.next_builder.StoryboardNode\x12.\n\x08\x63hildren\x18\x0c \x03(\x0b\x32\x1c.next_builder.StoryboardNodeBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builderb\x06proto3')
,
dependencies=[container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2.DESCRIPTOR,])
_STORYBOARDNODE = _descriptor.Descriptor(
name='StoryboardNode',
full_name='next_builder.StoryboardNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='next_builder.StoryboardNode.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alias', full_name='next_builder.StoryboardNode.alias', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='next_builder.StoryboardNode.appId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='next_builder.StoryboardNode.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mountPoint', full_name='next_builder.StoryboardNode.mountPoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort', full_name='next_builder.StoryboardNode.sort', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='next_builder.StoryboardNode.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brick', full_name='next_builder.StoryboardNode.brick', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='next_builder.StoryboardNode.route', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='project', full_name='next_builder.StoryboardNode.project', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent', full_name='next_builder.StoryboardNode.parent', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children', full_name='next_builder.StoryboardNode.children', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=572,
)
_STORYBOARDNODE.fields_by_name['brick'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2._STORYBOARDBRICK
_STORYBOARDNODE.fields_by_name['route'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2._STORYBOARDROUTE
_STORYBOARDNODE.fields_by_name['project'].message_type = container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2._MICROAPPPROJECT
_STORYBOARDNODE.fields_by_name['parent'].message_type = _STORYBOARDNODE
_STORYBOARDNODE.fields_by_name['children'].message_type = _STORYBOARDNODE
DESCRIPTOR.message_types_by_name['StoryboardNode'] = _STORYBOARDNODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StoryboardNode = _reflection.GeneratedProtocolMessageType('StoryboardNode', (_message.Message,), {
'DESCRIPTOR' : _STORYBOARDNODE,
'__module__' : 'storyboard_node_pb2'
# @@protoc_insertion_point(class_scope:next_builder.StoryboardNode)
})
_sym_db.RegisterMessage(StoryboardNode)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
7905e8c4689bed6057d5fcb5d2b0b1513da8aebc
| 32
|
py
|
Python
|
mica/archive/cda/__init__.py
|
sot/mica
|
136a9b0d9521efda5208067b51cf0c8700b4def3
|
[
"BSD-3-Clause"
] | null | null | null |
mica/archive/cda/__init__.py
|
sot/mica
|
136a9b0d9521efda5208067b51cf0c8700b4def3
|
[
"BSD-3-Clause"
] | 150
|
2015-01-23T17:09:53.000Z
|
2022-01-10T00:50:54.000Z
|
mica/archive/cda/__init__.py
|
sot/mica
|
136a9b0d9521efda5208067b51cf0c8700b4def3
|
[
"BSD-3-Clause"
] | null | null | null |
from .services import * # noqa
| 16
| 31
| 0.6875
|
from .services import *
| true
| true
|
7905e91cb65508a065d22c4e68d6f5a7bd8ca20b
| 21,845
|
py
|
Python
|
saleor/graphql/core/mutations.py
|
quito418/saleor
|
83b831b80472d87e154b2b5bd19390c674350bfb
|
[
"CC-BY-4.0"
] | 1
|
2020-04-08T14:24:43.000Z
|
2020-04-08T14:24:43.000Z
|
saleor/graphql/core/mutations.py
|
weeraravee/saleor
|
83b831b80472d87e154b2b5bd19390c674350bfb
|
[
"CC-BY-4.0"
] | 2
|
2020-06-07T08:48:01.000Z
|
2020-06-07T08:48:02.000Z
|
saleor/graphql/core/mutations.py
|
loftwah/saleor
|
afcdfca0f125147b7f0d4c07993e99608a5ba875
|
[
"CC-BY-4.0"
] | 1
|
2021-03-02T01:50:41.000Z
|
2021-03-02T01:50:41.000Z
|
from itertools import chain
from typing import Tuple, Union
import graphene
from django.contrib.auth import get_user_model
from django.core.exceptions import (
NON_FIELD_ERRORS,
ImproperlyConfigured,
ValidationError,
)
from django.db.models.fields.files import FileField
from graphene import ObjectType
from graphene.types.mutation import MutationOptions
from graphene_django.registry import get_global_registry
from graphql.error import GraphQLError
from graphql_jwt import ObtainJSONWebToken, Verify
from graphql_jwt.exceptions import JSONWebTokenError, PermissionDenied
from ...account import models
from ..account.types import User
from ..utils import get_nodes
from .types import Error, Upload
from .types.common import AccountError
from .utils import from_global_id_strict_type, snake_to_camel_case
from .utils.error_codes import get_error_code_from_error
registry = get_global_registry()
def get_model_name(model):
"""Return name of the model with first letter lowercase."""
model_name = model.__name__
return model_name[:1].lower() + model_name[1:]
def get_output_fields(model, return_field_name):
"""Return mutation output field for model instance."""
model_type = registry.get_type_for_model(model)
if not model_type:
raise ImproperlyConfigured(
"Unable to find type for model %s in graphene registry" % model.__name__
)
fields = {return_field_name: graphene.Field(model_type)}
return fields
def get_error_fields(error_type_class, error_type_field):
return {
error_type_field: graphene.Field(
graphene.List(
graphene.NonNull(error_type_class),
description="List of errors that occurred executing the mutation.",
),
default_value=[],
required=True,
)
}
def validation_error_to_error_type(validation_error: ValidationError) -> list:
"""Convert a ValidationError into a list of Error types."""
err_list = []
if hasattr(validation_error, "error_dict"):
# convert field errors
for field, field_errors in validation_error.error_dict.items():
field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(field)
for err in field_errors:
err_list.append(
(
Error(field=field, message=err.messages[0]),
get_error_code_from_error(err),
err.params,
)
)
else:
# convert non-field errors
for err in validation_error.error_list:
err_list.append(
(
Error(message=err.messages[0]),
get_error_code_from_error(err),
err.params,
)
)
return err_list
class ModelMutationOptions(MutationOptions):
exclude = None
model = None
return_field_name = None
class BaseMutation(graphene.Mutation):
errors = graphene.List(
graphene.NonNull(Error),
description="List of errors that occurred executing the mutation.",
required=True,
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
description=None,
permissions: Tuple = None,
_meta=None,
error_type_class=None,
error_type_field=None,
**options,
):
if not _meta:
_meta = MutationOptions(cls)
if not description:
raise ImproperlyConfigured("No description provided in Meta")
if isinstance(permissions, str):
permissions = (permissions,)
if permissions and not isinstance(permissions, tuple):
raise ImproperlyConfigured(
"Permissions should be a tuple or a string in Meta"
)
_meta.permissions = permissions
_meta.error_type_class = error_type_class
_meta.error_type_field = error_type_field
super().__init_subclass_with_meta__(
description=description, _meta=_meta, **options
)
if error_type_class and error_type_field:
cls._meta.fields.update(
get_error_fields(error_type_class, error_type_field)
)
@classmethod
def _update_mutation_arguments_and_fields(cls, arguments, fields):
cls._meta.arguments.update(arguments)
cls._meta.fields.update(fields)
@classmethod
def get_node_by_pk(
cls, info, graphene_type: ObjectType, pk: Union[int, str], qs=None
):
"""Attempt to resolve a node from the given internal ID.
Whether by using the provided query set object or by calling type's get_node().
"""
if qs is not None:
return qs.filter(pk=pk).first()
get_node = getattr(graphene_type, "get_node", None)
if get_node:
return get_node(info, pk)
return None
@classmethod
def get_node_or_error(cls, info, node_id, field="id", only_type=None, qs=None):
if not node_id:
return None
try:
if only_type is not None:
pk = from_global_id_strict_type(node_id, only_type, field=field)
else:
# FIXME: warn when supplied only_type is None?
only_type, pk = graphene.Node.from_global_id(node_id)
if isinstance(only_type, str):
only_type = info.schema.get_type(only_type).graphene_type
node = cls.get_node_by_pk(info, graphene_type=only_type, pk=pk, qs=qs)
except (AssertionError, GraphQLError) as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
else:
if node is None:
raise ValidationError(
{
field: ValidationError(
"Couldn't resolve to a node: %s" % node_id, code="not_found"
)
}
)
return node
@classmethod
def get_nodes_or_error(cls, ids, field, only_type=None, qs=None):
try:
instances = get_nodes(ids, only_type, qs=qs)
except GraphQLError as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
return instances
@classmethod
def clean_instance(cls, info, instance):
"""Clean the instance that was created using the input data.
Once an instance is created, this method runs `full_clean()` to perform
model validation.
"""
try:
instance.full_clean()
except ValidationError as error:
if hasattr(cls._meta, "exclude"):
# Ignore validation errors for fields that are specified as
# excluded.
new_error_dict = {}
for field, errors in error.error_dict.items():
if field not in cls._meta.exclude:
new_error_dict[field] = errors
error.error_dict = new_error_dict
if error.error_dict:
raise error
@classmethod
def construct_instance(cls, instance, cleaned_data):
"""Fill instance fields with cleaned data.
The `instance` argument is either an empty instance of a already
existing one which was fetched from the database. `cleaned_data` is
data to be set in instance fields. Returns `instance` with filled
fields, but not saved to the database.
"""
from django.db import models
opts = instance._meta
for f in opts.fields:
if any(
[
not f.editable,
isinstance(f, models.AutoField),
f.name not in cleaned_data,
]
):
continue
data = cleaned_data[f.name]
if data is None:
# We want to reset the file field value when None was passed
# in the input, but `FileField.save_form_data` ignores None
# values. In that case we manually pass False which clears
# the file.
if isinstance(f, FileField):
data = False
if not f.null:
data = f._get_default()
f.save_form_data(instance, data)
return instance
@classmethod
def check_permissions(cls, context, permissions=None):
"""Determine whether user or service account has rights to perform this mutation.
Default implementation assumes that account is allowed to perform any
mutation. By overriding this method or defining required permissions
in the meta-class, you can restrict access to it.
The `context` parameter is the Context instance associated with the request.
"""
permissions = permissions or cls._meta.permissions
if not permissions:
return True
if context.user.has_perms(permissions):
return True
service_account = getattr(context, "service_account", None)
if service_account and service_account.has_perms(permissions):
return True
return False
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
try:
response = cls.perform_mutation(root, info, **data)
if response.errors is None:
response.errors = []
return response
except ValidationError as e:
return cls.handle_errors(e)
@classmethod
def perform_mutation(cls, root, info, **data):
pass
@classmethod
def handle_errors(cls, error: ValidationError, **extra):
errors = validation_error_to_error_type(error)
return cls.handle_typed_errors(errors, **extra)
@classmethod
def handle_typed_errors(cls, errors: list, **extra):
"""Return class instance with errors."""
if (
cls._meta.error_type_class is not None
and cls._meta.error_type_field is not None
):
typed_errors = [
cls._meta.error_type_class(field=e.field, message=e.message, code=code)
for e, code, _params in errors
]
extra.update({cls._meta.error_type_field: typed_errors})
return cls(errors=[e[0] for e in errors], **extra)
class ModelMutation(BaseMutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
arguments=None,
model=None,
exclude=None,
return_field_name=None,
_meta=None,
**options,
):
if not model:
raise ImproperlyConfigured("model is required for ModelMutation")
if not _meta:
_meta = ModelMutationOptions(cls)
if exclude is None:
exclude = []
if not return_field_name:
return_field_name = get_model_name(model)
if arguments is None:
arguments = {}
fields = get_output_fields(model, return_field_name)
_meta.model = model
_meta.return_field_name = return_field_name
_meta.exclude = exclude
super().__init_subclass_with_meta__(_meta=_meta, **options)
cls._update_mutation_arguments_and_fields(arguments=arguments, fields=fields)
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
"""Clean input data received from mutation arguments.
Fields containing IDs or lists of IDs are automatically resolved into
model instances. `instance` argument is the model instance the mutation
is operating on (before setting the input data). `input` is raw input
data the mutation receives.
Override this method to provide custom transformations of incoming
data.
"""
def is_list_of_ids(field):
return (
isinstance(field.type, graphene.List)
and field.type.of_type == graphene.ID
)
def is_id_field(field):
return (
field.type == graphene.ID
or isinstance(field.type, graphene.NonNull)
and field.type.of_type == graphene.ID
)
def is_upload_field(field):
if hasattr(field.type, "of_type"):
return field.type.of_type == Upload
return field.type == Upload
if not input_cls:
input_cls = getattr(cls.Arguments, "input")
cleaned_input = {}
for field_name, field_item in input_cls._meta.fields.items():
if field_name in data:
value = data[field_name]
# handle list of IDs field
if value is not None and is_list_of_ids(field_item):
instances = (
cls.get_nodes_or_error(value, field_name) if value else []
)
cleaned_input[field_name] = instances
# handle ID field
elif value is not None and is_id_field(field_item):
instance = cls.get_node_or_error(info, value, field_name)
cleaned_input[field_name] = instance
# handle uploaded files
elif value is not None and is_upload_field(field_item):
value = info.context.FILES.get(value)
cleaned_input[field_name] = value
# handle other fields
else:
cleaned_input[field_name] = value
return cleaned_input
@classmethod
def _save_m2m(cls, info, instance, cleaned_data):
opts = instance._meta
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, "save_form_data"):
continue
if f.name in cleaned_data and cleaned_data[f.name] is not None:
f.save_form_data(instance, cleaned_data[f.name])
@classmethod
def success_response(cls, instance):
"""Return a success response."""
return cls(**{cls._meta.return_field_name: instance, "errors": []})
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
@classmethod
def get_instance(cls, info, **data):
"""Retrieve an instance from the supplied global id.
The expected graphene type can be lazy (str).
"""
object_id = data.get("id")
if object_id:
model_type = registry.get_type_for_model(cls._meta.model)
instance = cls.get_node_or_error(info, object_id, only_type=model_type)
else:
instance = cls._meta.model()
return instance
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Perform model mutation.
Depending on the input data, `mutate` either creates a new instance or
updates an existing one. If `id` argument is present, it is assumed
that this is an "update" mutation. Otherwise, a new instance is
created based on the model associated with this mutation.
"""
instance = cls.get_instance(info, **data)
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
cls.save(info, instance, cleaned_input)
cls._save_m2m(info, instance, cleaned_input)
return cls.success_response(instance)
class ModelDeleteMutation(ModelMutation):
class Meta:
abstract = True
@classmethod
def clean_instance(cls, info, instance):
"""Perform additional logic before deleting the model instance.
Override this method to raise custom validation error and abort
the deletion process.
"""
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Perform a mutation that deletes a model instance."""
if not cls.check_permissions(info.context):
raise PermissionDenied()
node_id = data.get("id")
model_type = registry.get_type_for_model(cls._meta.model)
instance = cls.get_node_or_error(info, node_id, only_type=model_type)
if instance:
cls.clean_instance(info, instance)
db_id = instance.id
instance.delete()
# After the instance is deleted, set its ID to the original database's
# ID so that the success response contains ID of the deleted object.
instance.id = db_id
return cls.success_response(instance)
class BaseBulkMutation(BaseMutation):
count = graphene.Int(
required=True, description="Returns how many objects were affected."
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):
if not model:
raise ImproperlyConfigured("model is required for bulk mutation")
if not _meta:
_meta = ModelMutationOptions(cls)
_meta.model = model
super().__init_subclass_with_meta__(_meta=_meta, **kwargs)
@classmethod
def clean_instance(cls, info, instance):
"""Perform additional logic.
Override this method to raise custom validation error and prevent
bulk action on the instance.
"""
@classmethod
def bulk_action(cls, queryset, **kwargs):
"""Implement action performed on queryset."""
raise NotImplementedError
@classmethod
def perform_mutation(cls, _root, info, ids, **data):
"""Perform a mutation that deletes a list of model instances."""
clean_instance_ids, errors = [], {}
# Allow to pass empty list for dummy mutation
if not ids:
return 0, errors
instance_model = cls._meta.model
model_type = registry.get_type_for_model(instance_model)
instances = cls.get_nodes_or_error(ids, "id", model_type)
for instance, node_id in zip(instances, ids):
instance_errors = []
# catch individual validation errors to raise them later as
# a single error
try:
cls.clean_instance(info, instance)
except ValidationError as e:
msg = ". ".join(e.messages)
instance_errors.append(msg)
if not instance_errors:
clean_instance_ids.append(instance.pk)
else:
instance_errors_msg = ". ".join(instance_errors)
ValidationError({node_id: instance_errors_msg}).update_error_dict(
errors
)
if errors:
errors = ValidationError(errors)
count = len(clean_instance_ids)
if count:
qs = instance_model.objects.filter(pk__in=clean_instance_ids)
cls.bulk_action(queryset=qs, **data)
return count, errors
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
count, errors = cls.perform_mutation(root, info, **data)
if errors:
return cls.handle_errors(errors, count=count)
return cls(errors=errors, count=count)
class ModelBulkDeleteMutation(BaseBulkMutation):
class Meta:
abstract = True
@classmethod
def bulk_action(cls, queryset):
queryset.delete()
class CreateToken(ObtainJSONWebToken):
"""Mutation that authenticates a user and returns token and user data.
It overrides the default graphql_jwt.ObtainJSONWebToken to wrap potential
authentication errors in our Error type, which is consistent to how the rest of
the mutation works.
"""
errors = graphene.List(graphene.NonNull(Error), required=True)
account_errors = graphene.List(
graphene.NonNull(AccountError),
description="List of errors that occurred executing the mutation.",
required=True,
)
user = graphene.Field(User, description="A user instance.")
@classmethod
def mutate(cls, root, info, **kwargs):
try:
result = super().mutate(root, info, **kwargs)
except JSONWebTokenError as e:
return CreateToken(errors=[Error(message=str(e))])
except ValidationError as e:
errors = validation_error_to_error_type(e)
return cls.handle_typed_errors(errors)
else:
return result
@classmethod
def handle_typed_errors(cls, errors: list):
account_errors = [
AccountError(field=e.field, message=e.message, code=code)
for e, code, _params in errors
]
return cls(errors=[e[0] for e in errors], account_errors=account_errors)
@classmethod
def resolve(cls, root, info, **kwargs):
return cls(user=info.context.user, errors=[], account_errors=[])
class VerifyToken(Verify):
"""Mutation that confirms if token is valid and also returns user data."""
user = graphene.Field(User)
def resolve_user(self, _info, **_kwargs):
username_field = get_user_model().USERNAME_FIELD
kwargs = {username_field: self.payload.get(username_field)}
return models.User.objects.get(**kwargs)
@classmethod
def mutate(cls, root, info, token, **kwargs):
try:
return super().mutate(root, info, token, **kwargs)
except JSONWebTokenError:
return None
| 34.132813
| 89
| 0.614603
|
from itertools import chain
from typing import Tuple, Union
import graphene
from django.contrib.auth import get_user_model
from django.core.exceptions import (
NON_FIELD_ERRORS,
ImproperlyConfigured,
ValidationError,
)
from django.db.models.fields.files import FileField
from graphene import ObjectType
from graphene.types.mutation import MutationOptions
from graphene_django.registry import get_global_registry
from graphql.error import GraphQLError
from graphql_jwt import ObtainJSONWebToken, Verify
from graphql_jwt.exceptions import JSONWebTokenError, PermissionDenied
from ...account import models
from ..account.types import User
from ..utils import get_nodes
from .types import Error, Upload
from .types.common import AccountError
from .utils import from_global_id_strict_type, snake_to_camel_case
from .utils.error_codes import get_error_code_from_error
registry = get_global_registry()
def get_model_name(model):
model_name = model.__name__
return model_name[:1].lower() + model_name[1:]
def get_output_fields(model, return_field_name):
model_type = registry.get_type_for_model(model)
if not model_type:
raise ImproperlyConfigured(
"Unable to find type for model %s in graphene registry" % model.__name__
)
fields = {return_field_name: graphene.Field(model_type)}
return fields
def get_error_fields(error_type_class, error_type_field):
return {
error_type_field: graphene.Field(
graphene.List(
graphene.NonNull(error_type_class),
description="List of errors that occurred executing the mutation.",
),
default_value=[],
required=True,
)
}
def validation_error_to_error_type(validation_error: ValidationError) -> list:
err_list = []
if hasattr(validation_error, "error_dict"):
for field, field_errors in validation_error.error_dict.items():
field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(field)
for err in field_errors:
err_list.append(
(
Error(field=field, message=err.messages[0]),
get_error_code_from_error(err),
err.params,
)
)
else:
for err in validation_error.error_list:
err_list.append(
(
Error(message=err.messages[0]),
get_error_code_from_error(err),
err.params,
)
)
return err_list
class ModelMutationOptions(MutationOptions):
exclude = None
model = None
return_field_name = None
class BaseMutation(graphene.Mutation):
errors = graphene.List(
graphene.NonNull(Error),
description="List of errors that occurred executing the mutation.",
required=True,
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
description=None,
permissions: Tuple = None,
_meta=None,
error_type_class=None,
error_type_field=None,
**options,
):
if not _meta:
_meta = MutationOptions(cls)
if not description:
raise ImproperlyConfigured("No description provided in Meta")
if isinstance(permissions, str):
permissions = (permissions,)
if permissions and not isinstance(permissions, tuple):
raise ImproperlyConfigured(
"Permissions should be a tuple or a string in Meta"
)
_meta.permissions = permissions
_meta.error_type_class = error_type_class
_meta.error_type_field = error_type_field
super().__init_subclass_with_meta__(
description=description, _meta=_meta, **options
)
if error_type_class and error_type_field:
cls._meta.fields.update(
get_error_fields(error_type_class, error_type_field)
)
@classmethod
def _update_mutation_arguments_and_fields(cls, arguments, fields):
cls._meta.arguments.update(arguments)
cls._meta.fields.update(fields)
@classmethod
def get_node_by_pk(
cls, info, graphene_type: ObjectType, pk: Union[int, str], qs=None
):
if qs is not None:
return qs.filter(pk=pk).first()
get_node = getattr(graphene_type, "get_node", None)
if get_node:
return get_node(info, pk)
return None
@classmethod
def get_node_or_error(cls, info, node_id, field="id", only_type=None, qs=None):
if not node_id:
return None
try:
if only_type is not None:
pk = from_global_id_strict_type(node_id, only_type, field=field)
else:
only_type, pk = graphene.Node.from_global_id(node_id)
if isinstance(only_type, str):
only_type = info.schema.get_type(only_type).graphene_type
node = cls.get_node_by_pk(info, graphene_type=only_type, pk=pk, qs=qs)
except (AssertionError, GraphQLError) as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
else:
if node is None:
raise ValidationError(
{
field: ValidationError(
"Couldn't resolve to a node: %s" % node_id, code="not_found"
)
}
)
return node
@classmethod
def get_nodes_or_error(cls, ids, field, only_type=None, qs=None):
try:
instances = get_nodes(ids, only_type, qs=qs)
except GraphQLError as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
return instances
@classmethod
def clean_instance(cls, info, instance):
try:
instance.full_clean()
except ValidationError as error:
if hasattr(cls._meta, "exclude"):
# Ignore validation errors for fields that are specified as
# excluded.
new_error_dict = {}
for field, errors in error.error_dict.items():
if field not in cls._meta.exclude:
new_error_dict[field] = errors
error.error_dict = new_error_dict
if error.error_dict:
raise error
@classmethod
def construct_instance(cls, instance, cleaned_data):
from django.db import models
opts = instance._meta
for f in opts.fields:
if any(
[
not f.editable,
isinstance(f, models.AutoField),
f.name not in cleaned_data,
]
):
continue
data = cleaned_data[f.name]
if data is None:
# We want to reset the file field value when None was passed
# in the input, but `FileField.save_form_data` ignores None
# values. In that case we manually pass False which clears
# the file.
if isinstance(f, FileField):
data = False
if not f.null:
data = f._get_default()
f.save_form_data(instance, data)
return instance
@classmethod
def check_permissions(cls, context, permissions=None):
permissions = permissions or cls._meta.permissions
if not permissions:
return True
if context.user.has_perms(permissions):
return True
service_account = getattr(context, "service_account", None)
if service_account and service_account.has_perms(permissions):
return True
return False
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
try:
response = cls.perform_mutation(root, info, **data)
if response.errors is None:
response.errors = []
return response
except ValidationError as e:
return cls.handle_errors(e)
@classmethod
def perform_mutation(cls, root, info, **data):
pass
@classmethod
def handle_errors(cls, error: ValidationError, **extra):
errors = validation_error_to_error_type(error)
return cls.handle_typed_errors(errors, **extra)
@classmethod
def handle_typed_errors(cls, errors: list, **extra):
if (
cls._meta.error_type_class is not None
and cls._meta.error_type_field is not None
):
typed_errors = [
cls._meta.error_type_class(field=e.field, message=e.message, code=code)
for e, code, _params in errors
]
extra.update({cls._meta.error_type_field: typed_errors})
return cls(errors=[e[0] for e in errors], **extra)
class ModelMutation(BaseMutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
arguments=None,
model=None,
exclude=None,
return_field_name=None,
_meta=None,
**options,
):
if not model:
raise ImproperlyConfigured("model is required for ModelMutation")
if not _meta:
_meta = ModelMutationOptions(cls)
if exclude is None:
exclude = []
if not return_field_name:
return_field_name = get_model_name(model)
if arguments is None:
arguments = {}
fields = get_output_fields(model, return_field_name)
_meta.model = model
_meta.return_field_name = return_field_name
_meta.exclude = exclude
super().__init_subclass_with_meta__(_meta=_meta, **options)
cls._update_mutation_arguments_and_fields(arguments=arguments, fields=fields)
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
def is_list_of_ids(field):
return (
isinstance(field.type, graphene.List)
and field.type.of_type == graphene.ID
)
def is_id_field(field):
return (
field.type == graphene.ID
or isinstance(field.type, graphene.NonNull)
and field.type.of_type == graphene.ID
)
def is_upload_field(field):
if hasattr(field.type, "of_type"):
return field.type.of_type == Upload
return field.type == Upload
if not input_cls:
input_cls = getattr(cls.Arguments, "input")
cleaned_input = {}
for field_name, field_item in input_cls._meta.fields.items():
if field_name in data:
value = data[field_name]
# handle list of IDs field
if value is not None and is_list_of_ids(field_item):
instances = (
cls.get_nodes_or_error(value, field_name) if value else []
)
cleaned_input[field_name] = instances
# handle ID field
elif value is not None and is_id_field(field_item):
instance = cls.get_node_or_error(info, value, field_name)
cleaned_input[field_name] = instance
# handle uploaded files
elif value is not None and is_upload_field(field_item):
value = info.context.FILES.get(value)
cleaned_input[field_name] = value
# handle other fields
else:
cleaned_input[field_name] = value
return cleaned_input
@classmethod
def _save_m2m(cls, info, instance, cleaned_data):
opts = instance._meta
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, "save_form_data"):
continue
if f.name in cleaned_data and cleaned_data[f.name] is not None:
f.save_form_data(instance, cleaned_data[f.name])
@classmethod
def success_response(cls, instance):
return cls(**{cls._meta.return_field_name: instance, "errors": []})
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
@classmethod
def get_instance(cls, info, **data):
object_id = data.get("id")
if object_id:
model_type = registry.get_type_for_model(cls._meta.model)
instance = cls.get_node_or_error(info, object_id, only_type=model_type)
else:
instance = cls._meta.model()
return instance
@classmethod
def perform_mutation(cls, _root, info, **data):
instance = cls.get_instance(info, **data)
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
cls.save(info, instance, cleaned_input)
cls._save_m2m(info, instance, cleaned_input)
return cls.success_response(instance)
class ModelDeleteMutation(ModelMutation):
class Meta:
abstract = True
@classmethod
def clean_instance(cls, info, instance):
@classmethod
def perform_mutation(cls, _root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
node_id = data.get("id")
model_type = registry.get_type_for_model(cls._meta.model)
instance = cls.get_node_or_error(info, node_id, only_type=model_type)
if instance:
cls.clean_instance(info, instance)
db_id = instance.id
instance.delete()
# After the instance is deleted, set its ID to the original database's
instance.id = db_id
return cls.success_response(instance)
class BaseBulkMutation(BaseMutation):
count = graphene.Int(
required=True, description="Returns how many objects were affected."
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):
if not model:
raise ImproperlyConfigured("model is required for bulk mutation")
if not _meta:
_meta = ModelMutationOptions(cls)
_meta.model = model
super().__init_subclass_with_meta__(_meta=_meta, **kwargs)
@classmethod
def clean_instance(cls, info, instance):
@classmethod
def bulk_action(cls, queryset, **kwargs):
raise NotImplementedError
@classmethod
def perform_mutation(cls, _root, info, ids, **data):
clean_instance_ids, errors = [], {}
if not ids:
return 0, errors
instance_model = cls._meta.model
model_type = registry.get_type_for_model(instance_model)
instances = cls.get_nodes_or_error(ids, "id", model_type)
for instance, node_id in zip(instances, ids):
instance_errors = []
try:
cls.clean_instance(info, instance)
except ValidationError as e:
msg = ". ".join(e.messages)
instance_errors.append(msg)
if not instance_errors:
clean_instance_ids.append(instance.pk)
else:
instance_errors_msg = ". ".join(instance_errors)
ValidationError({node_id: instance_errors_msg}).update_error_dict(
errors
)
if errors:
errors = ValidationError(errors)
count = len(clean_instance_ids)
if count:
qs = instance_model.objects.filter(pk__in=clean_instance_ids)
cls.bulk_action(queryset=qs, **data)
return count, errors
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
count, errors = cls.perform_mutation(root, info, **data)
if errors:
return cls.handle_errors(errors, count=count)
return cls(errors=errors, count=count)
class ModelBulkDeleteMutation(BaseBulkMutation):
class Meta:
abstract = True
@classmethod
def bulk_action(cls, queryset):
queryset.delete()
class CreateToken(ObtainJSONWebToken):
errors = graphene.List(graphene.NonNull(Error), required=True)
account_errors = graphene.List(
graphene.NonNull(AccountError),
description="List of errors that occurred executing the mutation.",
required=True,
)
user = graphene.Field(User, description="A user instance.")
@classmethod
def mutate(cls, root, info, **kwargs):
try:
result = super().mutate(root, info, **kwargs)
except JSONWebTokenError as e:
return CreateToken(errors=[Error(message=str(e))])
except ValidationError as e:
errors = validation_error_to_error_type(e)
return cls.handle_typed_errors(errors)
else:
return result
@classmethod
def handle_typed_errors(cls, errors: list):
account_errors = [
AccountError(field=e.field, message=e.message, code=code)
for e, code, _params in errors
]
return cls(errors=[e[0] for e in errors], account_errors=account_errors)
@classmethod
def resolve(cls, root, info, **kwargs):
return cls(user=info.context.user, errors=[], account_errors=[])
class VerifyToken(Verify):
user = graphene.Field(User)
def resolve_user(self, _info, **_kwargs):
username_field = get_user_model().USERNAME_FIELD
kwargs = {username_field: self.payload.get(username_field)}
return models.User.objects.get(**kwargs)
@classmethod
def mutate(cls, root, info, token, **kwargs):
try:
return super().mutate(root, info, token, **kwargs)
except JSONWebTokenError:
return None
| true
| true
|
7905e9a632aa32579c15c353a9b819919c074e03
| 3,985
|
py
|
Python
|
courseraprogramming/commands/config.py
|
andres-zartab/courseraprogramming
|
e50dda898c879a3f45d44da3f8516cd660c74453
|
[
"Apache-2.0"
] | 40
|
2015-09-29T20:26:47.000Z
|
2021-07-13T07:53:23.000Z
|
courseraprogramming/commands/config.py
|
andres-zartab/courseraprogramming
|
e50dda898c879a3f45d44da3f8516cd660c74453
|
[
"Apache-2.0"
] | 59
|
2015-07-27T23:07:00.000Z
|
2020-12-11T06:32:32.000Z
|
courseraprogramming/commands/config.py
|
andres-zartab/courseraprogramming
|
e50dda898c879a3f45d44da3f8516cd660c74453
|
[
"Apache-2.0"
] | 24
|
2015-10-16T14:35:04.000Z
|
2020-10-14T08:40:38.000Z
|
#!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coursera's asynchronous grader command line SDK.
You may install it from source, or via pip.
"""
from courseraprogramming.commands import oauth2
import requests
import logging
import time
import sys
def check_auth(args):
"""
Checks courseraprogramming's connectivity to the coursera.org API servers
"""
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet or args.quiet == 0:
print('Name: %s' % name)
print('External ID: %s' % external_id)
if name is None or external_id is None:
sys.exit(1)
def display_auth_cache(args):
'''
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
'''
oauth2_instance = oauth2.build_oauth2(args)
if not args.quiet or args.quiet == 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print("Auth token: %s" % token)
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print("Auth token expires in: %s seconds." % expires_in)
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print("Refresh token: %s" % refresh)
else:
print("No refresh token found.")
def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the configure subcommand. (authentication / etc.)
parser_config = subparsers.add_parser(
'configure',
help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
# Local subsubcommand of the grade subcommand
parser_check_auth = config_subparsers.add_parser(
'check-auth',
help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser(
'display-auth-cache',
help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument(
'--no-truncate',
action='store_true',
help='Do not truncate the keys [DANGER!!]')
return parser_config
| 33.208333
| 78
| 0.658971
|
from courseraprogramming.commands import oauth2
import requests
import logging
import time
import sys
def check_auth(args):
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet or args.quiet == 0:
print('Name: %s' % name)
print('External ID: %s' % external_id)
if name is None or external_id is None:
sys.exit(1)
def display_auth_cache(args):
oauth2_instance = oauth2.build_oauth2(args)
if not args.quiet or args.quiet == 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print("Auth token: %s" % token)
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print("Auth token expires in: %s seconds." % expires_in)
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print("Refresh token: %s" % refresh)
else:
print("No refresh token found.")
def parser(subparsers):
parser_config = subparsers.add_parser(
'configure',
help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
parser_check_auth = config_subparsers.add_parser(
'check-auth',
help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser(
'display-auth-cache',
help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument(
'--no-truncate',
action='store_true',
help='Do not truncate the keys [DANGER!!]')
return parser_config
| true
| true
|
7905eb2099a719d5d5701255b92326e433ccbf4f
| 41,177
|
py
|
Python
|
featuremapper/distribution.py
|
fcr/featuremapper
|
b999110dce9bbbdf4b6dbd2d13bfca1596064c6a
|
[
"BSD-3-Clause"
] | 2
|
2018-03-29T18:52:58.000Z
|
2019-05-07T17:36:35.000Z
|
featuremapper/distribution.py
|
fcr/featuremapper
|
b999110dce9bbbdf4b6dbd2d13bfca1596064c6a
|
[
"BSD-3-Clause"
] | 7
|
2016-11-15T13:02:41.000Z
|
2019-10-21T19:59:31.000Z
|
featuremapper/distribution.py
|
fcr/featuremapper
|
b999110dce9bbbdf4b6dbd2d13bfca1596064c6a
|
[
"BSD-3-Clause"
] | 5
|
2015-09-06T18:11:55.000Z
|
2018-12-19T10:48:52.000Z
|
"""
Distribution class
"""
# To do:
#
# - wrap bins for cyclic histograms
# - check use of float() in count_mag() etc
# - clarify comment about negative selectivity
#
# - function to return value in a range (like a real histogram)
# - cache values
# - assumes cyclic axes start at 0: include a shift based on range
#
# - is there a way to make this work for arrays without mentioning
# "array" anywhere in here?
# - should this be two classes: one for the core (which would be
# small though) and another for statistics?
import numpy as np
import param
import cmath
import math
unavailable_scipy_optimize = False
try:
from scipy import optimize
except ImportError:
param.Parameterized().debug("scipy.optimize not available, dummy von Mises fit")
unavailable_scipy_optimize = True
def wrap(lower, upper, x):
"""
Circularly alias the numeric value x into the range [lower,upper).
Valid for cyclic quantities like orientations or hues.
"""
#I have no idea how I came up with this algorithm; it should be simplified.
#
# Note that Python's % operator works on floats and arrays;
# usually one can simply use that instead. E.g. to wrap array or
# scalar x into 0,2*pi, just use "x % (2*pi)".
axis_range = upper - lower
return lower + (x - lower + 2.0 * axis_range * (1.0 - math.floor(x / (2.0 * axis_range)))) % axis_range
def calc_theta(bins, axis_range):
"""
Convert a bin number to a direction in radians.
Works for NumPy arrays of bin numbers, returning
an array of directions.
"""
return np.exp( (2.0 * np.pi) * bins / axis_range * 1.0j )
class Distribution(object):
"""
Holds a distribution of the values f(x) associated with a variable x.
A Distribution is a histogram-like object that is a dictionary of
samples. Each sample is an x:f(x) pair, where x is called the feature_bin
and f(x) is called the value(). Each feature_bin's value is typically
maintained as the sum of all the values that have been placed into
it.
The feature_bin axis is continuous, and can represent a continuous
quantity without discretization. Alternatively, this class can be
used as a traditional histogram by either discretizing the feature_bin
number before adding each sample, or by binning the values in the
final Distribution.
Distributions are bounded by the specified axis_bounds, and can
either be cyclic (like directions or hues) or non-cyclic. For
cyclic distributions, samples provided outside the axis_bounds
will be wrapped back into the bound range, as is appropriate for
quantities like directions. For non-cyclic distributions,
providing samples outside the axis_bounds will result in a
ValueError.
In addition to the values, can also return the counts, i.e., the
number of times that a sample has been added with the given feature_bin.
Not all instances of this class will be a true distribution in the
mathematical sense; e.g. the values will have to be normalized
before they can be considered a probability distribution.
If keep_peak=True, the value stored in each feature_bin will be the
maximum of all values ever added, instead of the sum. The
distribution will thus be a record of the maximum value
seen at each feature_bin, also known as an envelope.
"""
# Holds the number of times that undefined values have been
# returned from calculations for any instance of this class,
# e.g. calls to vector_direction() or vector_selectivity() when no
# value is non-zero. Useful for warning users when the values are
# not meaningful.
undefined_vals = 0
def __init__(self, axis_bounds, axis_range, cyclic, data, counts, total_count, total_value, theta):
self._data = data
self._counts = counts
# total_count and total_value hold the total number and sum
# (respectively) of values that have ever been provided for
# each feature_bin. For a simple distribution these will be the same as
# sum_counts() and sum_values().
self.total_count = total_count
self.total_value = total_value
self.axis_bounds = axis_bounds
self.axis_range = axis_range
self.cyclic = cyclic
self._pop_store = None
# Cache busy data
self._keys = list(data.keys())
self._values = list(data.values())
self._theta = theta
if self.cyclic:
# Cache the vector sum
self._vector_sum = self._fast_vector_sum(self._values, theta)
else:
self._vector_sum = None
def data(self):
"""
Answer a dictionary with bins as keys.
"""
return self._data
def pop(self, feature_bin):
"""
Remove the entry with bin from the distribution.
"""
if self._pop_store is not None:
raise Exception("Distribution: attempt to pop value before outstanding restore")
self._pop_store = self._data.pop(feature_bin)
self._keys = list(self._data.keys())
self._values = list(self._data.values())
def restore(self, feature_bin):
"""
Restore the entry with bin from the distribution.
Only valid if called after a pop.
"""
if self._pop_store is None:
raise Exception("Distribution: attempt to restore value before pop")
self._data[feature_bin] = self._pop_store
self._pop_store = None
self._keys = list(self._data.keys())
self._values = list(self._data.values())
def vector_sum(self):
"""
Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
Each feature_bin contributes a vector of length equal to its value, at
a direction corresponding to the feature_bin number. Specifically,
the total feature_bin number range is mapped into a direction range
[0,2pi].
For a cyclic distribution, the avgbinnum will be a continuous
measure analogous to the max_value_bin() of the distribution.
But this quantity has more precision than max_value_bin()
because it is computed from the entire distribution instead of
just the peak feature_bin. However, it is likely to be useful only
for uniform or very dense sampling; with sparse, non-uniform
sampling the estimates will be biased significantly by the
particular samples chosen.
The avgbinnum is not meaningful when the magnitude is 0,
because a zero-length vector has no direction. To find out
whether such cases occurred, you can compare the value of
undefined_vals before and after a series of calls to this
function.
This tries to use cached values of this.
"""
if self._vector_sum is None:
# There is a non cyclic distribution that is using this.
# Calculate and then cache it
# First check if there is a cached theta. If not derive it.
if self._theta is None:
self._theta = calc_theta(np.array(self._keys), self.axis_range)
self._vector_sum = self._fast_vector_sum(self._values, self._theta)
return self._vector_sum
def _fast_vector_sum(self, values, theta):
"""
Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
This implementation assumes that the values of the distribution needed for the
vector sum will not be changed and depends on cached values.
"""
# vectors are represented in polar form as complex numbers
v_sum = np.inner(values, theta)
magnitude = abs(v_sum)
direction = cmath.phase(v_sum)
if v_sum == 0:
self.undefined_vals += 1
direction_radians = self._radians_to_bins(direction)
# wrap the direction because arctan2 returns principal values
wrapped_direction = wrap(self.axis_bounds[0], self.axis_bounds[1], direction_radians)
return (magnitude, wrapped_direction)
def get_value(self, feature_bin):
"""
Return the value of the specified feature_bin.
(Return None if there is no such feature_bin.)
"""
return self._data.get(feature_bin)
def get_count(self, feature_bin):
"""
Return the count from the specified feature_bin.
(Return None if there is no such feature_bin.)
"""
return self._counts.get(feature_bin)
def values(self):
"""
Return a list of values.
Various statistics can then be calculated if desired:
sum(vals) (total of all values)
max(vals) (highest value in any feature_bin)
Note that the feature_bin-order of values returned does not necessarily
match that returned by counts().
"""
return self._values
def counts(self):
"""
Return a list of values.
Various statistics can then be calculated if desired:
sum(counts) (total of all counts)
max(counts) (highest count in any feature_bin)
Note that the feature_bin-order of values returned does not necessarily
match that returned by values().
"""
return list(self._counts.values())
def bins(self):
"""
Return a list of bins that have been populated.
"""
return self._keys
def sub_distr( self, distr ):
"""
Subtract the given distribution from the current one.
Only existing bins are modified, new bins in the given
distribution are discarded without raising errors.
Note that total_value and total_count are not affected, and
keep_peak is ignored, therefore analysis relying on these
values should not call this method.
"""
for b in distr.bins():
if b in self.bins():
v = distr._data.get(b)
if v is not None: self._data[b] -= v
def max_value_bin(self):
"""
Return the feature_bin with the largest value.
Note that uses cached values so that pop and restore
need to be used if want with altered distribution.
"""
return self._keys[np.argmax(self._values)]
def weighted_sum(self):
"""Return the sum of each value times its feature_bin."""
return np.inner(self._keys, self._values)
def value_mag(self, feature_bin):
"""Return the value of a single feature_bin as a proportion of total_value."""
return self._safe_divide(self._data.get(feature_bin), self.total_value)
def count_mag(self, feature_bin):
"""Return the count of a single feature_bin as a proportion of total_count."""
return self._safe_divide(float(self._counts.get(feature_bin)), float(self.total_count))
# use of float()
def _bins_to_radians(self, bin):
"""
Convert a bin number to a direction in radians.
Works for NumPy arrays of bin numbers, returning
an array of directions.
"""
return (2*np.pi)*bin/self.axis_range
def _radians_to_bins(self, direction):
"""
Convert a direction in radians into a feature_bin number.
Works for NumPy arrays of direction, returning
an array of feature_bin numbers.
"""
return direction * self.axis_range / (2 * np.pi)
def _safe_divide(self, numerator, denominator):
"""
Division routine that avoids division-by-zero errors
(returning zero in such cases) but keeps track of them
for undefined_values().
"""
if denominator == 0:
self.undefined_vals += 1
return 0
else:
return numerator/denominator
class Pref(dict):
"""
This class simply collects named arguments into a dictionary
the main purpose is to make pretty readable the output of DistributionStatisticFn
functions.
In addition, trap missing keys
"""
def __init__(self, **args):
dict.__init__(self, **args)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return None
class DistributionStatisticFn(param.Parameterized):
"""
Base class for various functions performing statistics on a distribution.
"""
value_scale = param.NumericTuple((0.0, 1.0), doc="""
Scaling of the resulting value of the distribution statistics,
typically the preference of a unit to feature values. The tuple
specifies (offset, multiplier) of the output scaling""")
# APNOTE: previously selectivity_scale[ 1 ] used to be 17, a value suitable
# for combining preference and selectivity in HSV plots. Users wishing to keep
# this value should now set it when creating SheetViews, in commands like that
# in command/analysis.py
selectivity_scale = param.NumericTuple((0.0, 1.0), doc="""
Scaling of the resulting measure of the distribution peakedness,
typically the selectivity of a unit to its preferred feature value.
The tuple specifies (offset, multiplier) of the output scaling""")
__abstract = True
def __call__(self, distribution):
"""
Apply the distribution statistic function; must be implemented by subclasses.
Subclasses sould be called with a Distribution as argument, return will be a
dictionary, with Pref objects as values
"""
raise NotImplementedError
class DescriptiveStatisticFn(DistributionStatisticFn):
"""
Abstract class for basic descriptive statistics
"""
def vector_sum(self, d):
"""
Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).
Each bin contributes a vector of length equal to its value, at
a direction corresponding to the bin number. Specifically,
the total bin number range is mapped into a direction range
[0,2pi].
For a cyclic distribution, the avgbinnum will be a continuous
measure analogous to the max_value_bin() of the distribution.
But this quantity has more precision than max_value_bin()
because it is computed from the entire distribution instead of
just the peak bin. However, it is likely to be useful only
for uniform or very dense sampling; with sparse, non-uniform
sampling the estimates will be biased significantly by the
particular samples chosen.
The avgbinnum is not meaningful when the magnitude is 0,
because a zero-length vector has no direction. To find out
whether such cases occurred, you can compare the value of
undefined_vals before and after a series of calls to this
function.
This is a slow algorithm and should only be used if the
contents of the distribution have been changed by the statistical
function.
If not, then the cached value in the distribution should be used.
"""
# vectors are represented in polar form as complex numbers
h = d.data()
theta = calc_theta(np.array(list(h.keys())), d.axis_range)
return d._fast_vector_sum(list(h.values()), theta)
def _weighted_average(self, d ):
"""
Return the weighted_sum divided by the sum of the values
"""
return d._safe_divide(d.weighted_sum(), sum(d.values()))
def selectivity(self, d):
"""
Return a measure of the peakedness of the distribution. The
calculation differs depending on whether this is a cyclic
variable. For a cyclic variable, returns the magnitude of the
vector_sum() divided by the sum_value() (see
_vector_selectivity for more details). For a non-cyclic
variable, returns the max_value_bin()) as a proportion of the
sum_value() (see _relative_selectivity for more details).
"""
if d.cyclic == True:
return self._vector_selectivity(d)
else:
return self._relative_selectivity(d)
# CEBHACKALERT: the definition of selectivity for non-cyclic
# quantities probably needs some more thought.
# Additionally, this fails the test in testfeaturemap
# (see the comment there).
def _relative_selectivity(self, d):
"""
Return max_value_bin()) as a proportion of the sum_value().
This quantity is a measure of how strongly the distribution is
biased towards the max_value_bin(). For a smooth,
single-lobed distribution with an inclusive, non-cyclic range,
this quantity is an analog to vector_selectivity. To be a
precise analog for arbitrary distributions, it would need to
compute some measure of the selectivity that works like the
weighted_average() instead of the max_value_bin(). The result
is scaled such that if all bins are identical, the selectivity
is 0.0, and if all bins but one are zero, the selectivity is
1.0.
"""
# A single feature_bin is considered fully selective (but could also
# arguably be considered fully unselective)
if len(d.data()) <= 1:
return 1.0
proportion = d._safe_divide(max(d.values()), sum(d.values()))
offset = 1.0/len(d.values())
scaled = (proportion-offset) / (1.0-offset)
# negative scaled is possible
# e.g. 2 bins, with values that sum to less than 0.5
# this probably isn't what should be done in those cases
if scaled >= 0.0:
return scaled
else:
return 0.0
def _vector_selectivity(self, d):
"""
Return the magnitude of the vector_sum() divided by the sum_value().
This quantity is a vector-based measure of the peakedness of
the distribution. If only a single feature_bin has a non-zero value(),
the selectivity will be 1.0, and if all bins have the same
value() then the selectivity will be 0.0. Other distributions
will result in intermediate values.
For a distribution with a sum_value() of zero (i.e. all bins
empty), the selectivity is undefined. Assuming that one will
usually be looking for high selectivity, we return zero in such
a case so that high selectivity will not mistakenly be claimed.
To find out whether such cases occurred, you can compare the
value of undefined_values() before and after a series of
calls to this function.
"""
return d._safe_divide(d.vector_sum()[0], sum(d.values()))
__abstract = True
class DescriptiveBimodalStatisticFn(DescriptiveStatisticFn):
"""
Abstract class for descriptive statistics of two-modes distributions
"""
def second_max_value_bin(self, d):
"""
Return the feature_bin with the second largest value.
If there is one feature_bin only, return it. This is not a correct result,
however it is practical for plotting compatibility, and it will not
mistakenly be claimed as secondary maximum, by forcing its selectivity
to 0.0
"""
if len(d.bins()) <= 1:
return d.bins()[0]
k = d.max_value_bin()
d.pop(k)
m = d.max_value_bin()
d.restore(k)
return m
def second_selectivity(self, d):
"""
Return the selectivity of the second largest value in the distribution.
If there is one feature_bin only, the selectivity is 0, since there is no second
peack at all, and this value is also used to discriminate the validity
of second_max_value_bin()
Selectivity is computed in two ways depending on whether the variable is
a cyclic, as in selectivity()
"""
if len( d._data ) <= 1:
return 0.0
if d.cyclic == True:
return self._vector_second_selectivity(d)
else:
return self._relative_second_selectivity(d)
def _relative_second_selectivity(self, d):
"""
Return the value of the second maximum as a proportion of the sum_value()
see _relative_selectivity() for further details
"""
k = d.max_value_bin()
d.pop(k)
m = max(d.values())
d.restore(k)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / len(d.data())
scaled = (proportion - offset) / (1.0 - offset)
return max(scaled, 0.0)
def _vector_second_selectivity(self, d):
"""
Return the magnitude of the vector_sum() of all bins excluding the
maximum one, divided by the sum_value().
see _vector_selectivity() for further details
"""
k = d.max_value_bin()
d.pop(k)
s = self.vector_sum(d)[0]
d.restore(k)
return self._safe_divide(s, sum(d.values()))
def second_peak_bin(self, d):
"""
Return the feature_bin with the second peak in the distribution.
Unlike second_max_value_bin(), it does not return a feature_bin which is the
second largest value, if laying on a wing of the first peak, the second
peak is returned only if the distribution is truly multimodal. If it isn't,
return the first peak (for compatibility with numpy array type, and
plotting compatibility), however the corresponding selectivity will be
forced to 0.0
"""
h = d.data()
l = len(h)
if l <= 1:
return d.keys()[0]
ks = list(h.keys())
ks.sort()
ik0 = ks.index(d.keys()[np.argmax(d.values())])
k0 = ks[ik0]
v0 = h[k0]
v = v0
k = k0
ik = ik0
while h[k] <= v:
ik += 1
if ik >= l:
ik = 0
if ik == ik0:
return k0
v = h[k]
k = ks[ik]
ik1 = ik
v = v0
k = k0
ik = ik0
while h[k] <= v:
ik -= 1
if ik < 0:
ik = l - 1
if ik == ik0:
return k0
v = h[k]
k = ks[ik]
ik2 = ik
if ik1 == ik2:
return ks[ik1]
ik = ik1
m = 0
while ik != ik2:
k = ks[ik]
if h[k] > m:
m = h[k]
im = ik
ik += 1
if ik >= l:
ik = 0
return ks[im]
def second_peak_selectivity(self, d):
"""
Return the selectivity of the second peak in the distribution.
If the distribution has only one peak, return 0.0, and this value is
also usefl to discriminate the validity of second_peak_bin()
"""
l = len(d.keys())
if l <= 1:
return 0.0
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if p1 == p2:
return 0.0
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / l
scaled = (proportion - offset) / (1.0 - offset)
return max(scaled, 0.0)
def second_peak(self, d):
"""
Return preference and selectivity of the second peak in the distribution.
It is just the combination of second_peak_bin() and
second_peak_selectivity(), with the advantage of avoiding a duplicate
call of second_peak_bin(), if the user is interested in both preference
and selectivity, as often is the case.
"""
l = len(d.keys())
if l <= 1:
return (d.keys()[0], 0.0)
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if p1 == p2:
return (p1, 0.0)
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / l
scaled = (proportion - offset) / (1.0 - offset)
return (p2, max(scaled, 0.0))
__abstract = True
class DSF_MaxValue(DescriptiveStatisticFn):
"""
Return the peak value of the given distribution
"""
def __call__(self, d):
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d)+self.selectivity_scale[0])
return {"": Pref(preference=p, selectivity=s)}
class DSF_WeightedAverage(DescriptiveStatisticFn):
"""
Return the main mode of the given distribution
The prefence value ia a continuous, interpolated equivalent of the max_value_bin().
For a cyclic distribution, this is the direction of the vector
sum (see vector_sum()).
For a non-cyclic distribution, this is the arithmetic average
of the data on the bin_axis, where each feature_bin is weighted by its
value.
Such a computation will generally produce much more precise maps using
fewer test stimuli than the discrete method. However, weighted_average
methods generally require uniform and full-range sampling, which is not
always feasible.
For measurements at evenly-spaced intervals over the full range of
possible parameter values, weighted_averages are a good measure of the
underlying continuous-valued parameter preference, assuming that neurons
are tuned broadly enough (and/or sampled finely enough) that they
respond to at least two of the tested parameter values. This method
will not usually give good results when those criteria are not met, i.e.
if the sampling is too sparse, not at evenly-spaced intervals, or does
not cover the full range of possible values. In such cases
max_value_bin should be used, and the number of test patterns will
usually need to be increased instead.
"""
def __call__(self, d):
p = d.vector_sum()[1] if d.cyclic else self._weighted_average(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
return {"": Pref(preference=p, selectivity=s)}
class DSF_TopTwoValues(DescriptiveBimodalStatisticFn):
"""
Return the two max values of distributions in the given matrix
"""
def __call__(self, d):
r = {}
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
r[""] = Pref(preference=p, selectivity=s)
p = self.second_max_value_bin(d)
s = self.second_selectivity(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
r["Mode2"] = Pref(preference=p, selectivity=s)
return r
class DSF_BimodalPeaks(DescriptiveBimodalStatisticFn):
"""
Return the two peak values of distributions in the given matrix
"""
def __call__(self, d):
r = {}
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
r[""] = Pref(preference=p, selectivity=s)
p, s = self.second_peak(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
r["Mode2"] = Pref(preference=p, selectivity=s)
return r
class VonMisesStatisticFn(DistributionStatisticFn):
"""
Base class for von Mises statistics
"""
# values to fit the maximum value of k parameter in von Mises distribution,
# as a function of the number of bins in the distribution. Useful for
# keeping selectivity in range 0..1. Values derived offline from distribution
# with a single active feature_bin, and total bins from 8 to 32
vm_kappa_fit = (0.206, 0.614)
# level of activity in units confoundable with noise. Used in von Mises fit,
# for two purposes: if the standard deviation of a distribution is below this
# value, the distribution is assumed to lack any mode; it is the maximum level
# of random noise added to a distribution before the fit optimization, for
# stability reasons
noise_level = 0.001
# exit code of the distribution fit function. Codes are function-specific and
# each fit function, if provide exit codes, should have corresponding string translation
fit_exit_code = 0
user_warned_if_unavailable = False
__abstract = True
def _orth(self, t):
"""
Return the orthogonal orientation
"""
if t < 0.5 * np.pi:
return t + 0.5 * np.pi
return t - 0.5 * np.pi
def _in_pi(self, t):
"""
Reduce orientation from -pi..2pi to 0..pi
"""
if t > np.pi:
return t - np.pi
if t < 0:
return t + np.pi
return t
def von_mises(self, pars, x):
"""
Compute a simplified von Mises function.
Original formulation in Richard von Mises, "Wahrscheinlichkeitsrechnung
und ihre Anwendungen in der Statistik und theoretischen Physik", 1931,
Deuticke, Leipzig; see also Mardia, K.V. and Jupp, P.E., " Directional
Statistics", 1999, J. Wiley, p.36;
http://en.wikipedia.org/wiki/Von_Mises_distribution
The two differences are that this function is a continuous probability
distribution on a semi-circle, while von Mises is on the full circle,
and that the normalization factor, which is the inverse of the modified
Bessel function of first kind and 0 degree in the original, is here a fit parameter.
"""
a, k, t = pars
return a * np.exp(k * (np.cos(2 * (x - t)) - 1))
def von2_mises(self, pars, x):
"""
Compute a simplified bimodal von Mises function
Two superposed von Mises functions, with different peak and bandwith values
"""
p1 = pars[: 3]
p2 = pars[3:]
return self.von_mises(p1, x) + self.von_mises(p2, x)
def von_mises_res(self, pars, x, y):
return y - self.von_mises(pars, x)
def von2_mises_res(self, pars, x, y):
return y - self.von2_mises(pars, x)
def norm_sel(self, k, n):
m = (self.vm_kappa_fit[0] + n * self.vm_kappa_fit[1])**2
return np.log(1 + k) / np.log(1 + m)
def fit_vm(self, distribution):
"""
computes the best fit of the monovariate von Mises function in the
semi-circle.
Return a tuple with the orientation preference, in the same range of
axis_bounds, the orientation selectivity, and an estimate of the
goodness-of-fit, as the variance of the predicted orientation
preference. The selectivity is given by the bandwith parameter of the
von Mises function, modified for compatibility with other selectivity
computations in this class. The bandwith parameter is transposed in
logaritmic scale, and is normalized by the maximum value for the number
of bins in the distribution, in order to give roughly 1.0 for a
distribution with one feature_bin at 1.0 an all the other at 0.0, and 0.0 for
uniform distributions. The normalizing factor of the selectivity is fit
for the total number of bins, using fit parameters computed offline.
There are conditions that prevents apriori the possibility to fit the
distribution:
* not enough bins, at least 4 are necessary
* the distribution is too flat, below the noise level
and conditions of aposteriori failures:
* "ier" flag returned by leastsq out of ( 1, 2, 3, 4 )
* no estimated Jacobian around the solution
* negative bandwith (the peak of the distribution is convex)
Note that these are the minimal conditions, their fulfillment does not
warrant unimodality, is up to the user to check the goodness-of-fit value
for an accurate acceptance of the fit.
"""
if unavailable_scipy_optimize:
if not VonMisesStatisticFn.user_warned_if_unavailable:
param.Parameterized().warning("scipy.optimize not available, dummy von Mises fit")
VonMisesStatisticFn.user_warned_if_unavailable=True
self.fit_exit_code = 3
return 0, 0, 0
to_pi = np.pi / distribution.axis_range
x = to_pi * np.array(distribution.bins())
n = len(x)
if n < 5:
param.Parameterized().warning("No von Mises fit possible with less than 4 bins")
self.fit_exit_code = -1
return 0, 0, 0
y = np.array(distribution.values())
if y.std() < self.noise_level:
self.fit_exit_code = 1
return 0, 0, 0
rn = self.noise_level * np.random.random_sample(y.shape)
p0 = (1.0, 1.0, distribution.max_value_bin())
r = optimize.leastsq(self.von_mises_res, p0, args=(x, y + rn),
full_output=True)
if not r[-1] in ( 1, 2, 3, 4 ):
self.fit_exit_code = 100 + r[-1]
return 0, 0, 0
residuals = r[2]['fvec']
jacobian = r[1]
bandwith = r[0][1]
tuning = r[0][2]
if bandwith < 0:
self.fit_exit_code = 1
return 0, 0, 0
if jacobian is None:
self.fit_exit_code = 2
return 0, 0, 0
error = (residuals**2).sum() / (n - len(p0))
covariance = jacobian * error
g = covariance[2, 2]
p = self._in_pi(tuning) / to_pi
s = self.norm_sel(bandwith, n)
self.fit_exit_code = 0
return p, s, g
def vm_fit_exit_codes(self):
if self.fit_exit_code == 0:
return "succesfull exit"
if self.fit_exit_code == -1:
return "not enough bins for this fit"
if self.fit_exit_code == 1:
return "flat distribution"
if self.fit_exit_code == 2:
return "flat distribution"
if self.fit_exit_code == 3:
return "missing scipy.optimize import"
if self.fit_exit_code > 110:
return "unknown exit code"
if self.fit_exit_code > 100:
return "error " + str(self.fit_exit_code - 100) + " in scipy.optimize.leastsq"
return "unknown exit code"
def fit_v2m(self, distribution):
"""
computes the best fit of the bivariate von Mises function in the
semi-circle.
Return the tuple:
(
orientation1_preference, orientation1_selectivity, goodness_of_fit1,
orientation2_preference, orientation2_selectivity, goodness_of_fit2
)
See fit_vm() for considerations about selectivity and goodness_of_fit
"""
null = 0, 0, 0, 0, 0, 0
if unavailable_scipy_optimize:
if not VonMisesStatisticFn.user_warned_if_unavailable:
param.Parameterized().warning("scipy.optimize not available, dummy von Mises fit")
VonMisesStatisticFn.user_warned_if_unavailable=True
self.fit_exit_code = 3
return null
to_pi = np.pi / distribution.axis_range
x = to_pi * np.array(distribution.bins())
n = len(x)
if n < 9:
param.Parameterized().warning( "no bimodal von Mises fit possible with less than 8 bins" )
self.fit_exit_code = -1
return null
y = np.array(distribution.values())
if y.std() < self.noise_level:
self.fit_exit_code = 1
return null
rn = self.noise_level * np.random.random_sample(y.shape)
t0 = distribution.max_value_bin()
p0 = (1.0, 1.0, t0, 1.0, 1.0, self._orth(t0))
r = optimize.leastsq(self.von2_mises_res, p0, args=(x, y + rn),
full_output=True)
if not r[-1] in ( 1, 2, 3, 4 ):
self.fit_exit_code = 100 + r[-1]
return null
residuals = r[2]['fvec']
jacobian = r[1]
bandwith_1 = r[0][1]
tuning_1 = r[0][2]
bandwith_2 = r[0][4]
tuning_2 = r[0][5]
if jacobian is None:
self.fit_exit_code = 2
return null
if bandwith_1 < 0:
self.fit_exit_code = 1
return null
if bandwith_2 < 0:
self.fit_exit_code = 1
return null
error = (residuals ** 2).sum() / (n - len(p0))
covariance = jacobian * error
g1 = covariance[2, 2]
g2 = covariance[5, 5]
p1 = self._in_pi(tuning_1) / to_pi
p2 = self._in_pi(tuning_2) / to_pi
s1 = self.norm_sel(bandwith_1, n)
s2 = self.norm_sel(bandwith_2, n)
self.fit_exit_code = 0
return p1, s1, g1, p2, s2, g2
def __call__(self, distribution):
"""
Apply the distribution statistic function; must be implemented by subclasses.
"""
raise NotImplementedError
class DSF_VonMisesFit(VonMisesStatisticFn):
"""
Return the main mode of distribution in the given matrix, by fit with von Mises function.
"""
worst_fit = param.Number(default=0.5, bounds=(0.0, None), softbounds=(0.0, 1.0), doc="""
worst good-of-fitness value for accepting the distribution as monomodal""")
# default result in case of failure of the fit
null_result = {"": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Modes": Pref(number=0)}
def __call__(self, distribution):
f = self.fit_vm(distribution)
if self.fit_exit_code != 0 or f[-1] > self.worst_fit:
return self.null_result
results = {}
p, s, g = f
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results[""] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
results["Modes"] = Pref(number=1)
return results
class DSF_BimodalVonMisesFit(VonMisesStatisticFn):
"""
Return the two modes of distributions in the given matrix, by fit with von Mises function
The results of the main mode are available in
self.{preference,selectivity,good_of_fit}, while the second mode results are
in the first element of the self.more_modes list, as a dictionary with keys
preference,selectivity,good_of_fit.
"""
worst_fit = param.Number(default=0.5, bounds=(0.0, None), softbounds=(0.0, 1.0), doc="""
Worst good-of-fitness value for accepting the distribution as mono- or bi-modal""")
# default result in case of failure of the fit
null_result = {
"": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Mode2": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Modes": Pref(number=0)
}
def _analyze_distr(self, d):
"""
Analyze the given distribution with von Mises bimodal fit.
The distribution is analyzed with both unimodal and bimodal fits, and a
decision about the number of modes is made by comparing the goodness of
fit. It is a quick but inaccurate way of estimating the number of modes.
Return preference, selectivity, goodness of fit for both modes, and the
estimated numer of modes, None if even the unimodal fit failed. If the
distribution is unimodal, values of the second mode are set to 0. The main
mode is always the one with the largest selectivity (von Mises bandwith).
"""
no1 = False
f = self.fit_vm(d)
if self.fit_exit_code != 0:
no1 = True
p, s, g = f
f2 = self.fit_v2m(d)
if self.fit_exit_code != 0 or f2[2] > self.worst_fit:
if no1 or f[-1] > self.worst_fit:
return None
return p, s, g, 0, 0, 0, 1
p1, s1, g1, p2, s2, g2 = f2
if g1 > g:
return p, s, g, 0, 0, 0, 1
if s2 > s1:
return p2, s2, g2, p1, s1, g1, 2
return p1, s1, g1, p2, s2, g2, 2
def __call__(self, distribution):
f = self._analyze_distr(distribution)
if f is None:
return self.null_result
results = {}
p, s, g = f[: 3]
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results[""] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
p, s, g, n = f[3:]
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results["Mode2"] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
results["Modes"] = Pref(number=n)
return results
| 35.28449
| 107
| 0.624888
|
import numpy as np
import param
import cmath
import math
unavailable_scipy_optimize = False
try:
from scipy import optimize
except ImportError:
param.Parameterized().debug("scipy.optimize not available, dummy von Mises fit")
unavailable_scipy_optimize = True
def wrap(lower, upper, x):
# usually one can simply use that instead. E.g. to wrap array or
# scalar x into 0,2*pi, just use "x % (2*pi)".
axis_range = upper - lower
return lower + (x - lower + 2.0 * axis_range * (1.0 - math.floor(x / (2.0 * axis_range)))) % axis_range
def calc_theta(bins, axis_range):
return np.exp( (2.0 * np.pi) * bins / axis_range * 1.0j )
class Distribution(object):
# Holds the number of times that undefined values have been
# returned from calculations for any instance of this class,
# e.g. calls to vector_direction() or vector_selectivity() when no
# value is non-zero. Useful for warning users when the values are
# not meaningful.
undefined_vals = 0
def __init__(self, axis_bounds, axis_range, cyclic, data, counts, total_count, total_value, theta):
self._data = data
self._counts = counts
# total_count and total_value hold the total number and sum
# (respectively) of values that have ever been provided for
# each feature_bin. For a simple distribution these will be the same as
# sum_counts() and sum_values().
self.total_count = total_count
self.total_value = total_value
self.axis_bounds = axis_bounds
self.axis_range = axis_range
self.cyclic = cyclic
self._pop_store = None
# Cache busy data
self._keys = list(data.keys())
self._values = list(data.values())
self._theta = theta
if self.cyclic:
# Cache the vector sum
self._vector_sum = self._fast_vector_sum(self._values, theta)
else:
self._vector_sum = None
def data(self):
return self._data
def pop(self, feature_bin):
if self._pop_store is not None:
raise Exception("Distribution: attempt to pop value before outstanding restore")
self._pop_store = self._data.pop(feature_bin)
self._keys = list(self._data.keys())
self._values = list(self._data.values())
def restore(self, feature_bin):
if self._pop_store is None:
raise Exception("Distribution: attempt to restore value before pop")
self._data[feature_bin] = self._pop_store
self._pop_store = None
self._keys = list(self._data.keys())
self._values = list(self._data.values())
def vector_sum(self):
if self._vector_sum is None:
# There is a non cyclic distribution that is using this.
# Calculate and then cache it
# First check if there is a cached theta. If not derive it.
if self._theta is None:
self._theta = calc_theta(np.array(self._keys), self.axis_range)
self._vector_sum = self._fast_vector_sum(self._values, self._theta)
return self._vector_sum
def _fast_vector_sum(self, values, theta):
# vectors are represented in polar form as complex numbers
v_sum = np.inner(values, theta)
magnitude = abs(v_sum)
direction = cmath.phase(v_sum)
if v_sum == 0:
self.undefined_vals += 1
direction_radians = self._radians_to_bins(direction)
# wrap the direction because arctan2 returns principal values
wrapped_direction = wrap(self.axis_bounds[0], self.axis_bounds[1], direction_radians)
return (magnitude, wrapped_direction)
def get_value(self, feature_bin):
return self._data.get(feature_bin)
def get_count(self, feature_bin):
return self._counts.get(feature_bin)
def values(self):
return self._values
def counts(self):
return list(self._counts.values())
def bins(self):
return self._keys
def sub_distr( self, distr ):
for b in distr.bins():
if b in self.bins():
v = distr._data.get(b)
if v is not None: self._data[b] -= v
def max_value_bin(self):
return self._keys[np.argmax(self._values)]
def weighted_sum(self):
return np.inner(self._keys, self._values)
def value_mag(self, feature_bin):
return self._safe_divide(self._data.get(feature_bin), self.total_value)
def count_mag(self, feature_bin):
return self._safe_divide(float(self._counts.get(feature_bin)), float(self.total_count))
# use of float()
def _bins_to_radians(self, bin):
return (2*np.pi)*bin/self.axis_range
def _radians_to_bins(self, direction):
return direction * self.axis_range / (2 * np.pi)
def _safe_divide(self, numerator, denominator):
if denominator == 0:
self.undefined_vals += 1
return 0
else:
return numerator/denominator
class Pref(dict):
def __init__(self, **args):
dict.__init__(self, **args)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return None
class DistributionStatisticFn(param.Parameterized):
value_scale = param.NumericTuple((0.0, 1.0), doc="""
Scaling of the resulting value of the distribution statistics,
typically the preference of a unit to feature values. The tuple
specifies (offset, multiplier) of the output scaling""")
# APNOTE: previously selectivity_scale[ 1 ] used to be 17, a value suitable
# for combining preference and selectivity in HSV plots. Users wishing to keep
# this value should now set it when creating SheetViews, in commands like that
# in command/analysis.py
selectivity_scale = param.NumericTuple((0.0, 1.0), doc="""
Scaling of the resulting measure of the distribution peakedness,
typically the selectivity of a unit to its preferred feature value.
The tuple specifies (offset, multiplier) of the output scaling""")
__abstract = True
def __call__(self, distribution):
raise NotImplementedError
class DescriptiveStatisticFn(DistributionStatisticFn):
def vector_sum(self, d):
# vectors are represented in polar form as complex numbers
h = d.data()
theta = calc_theta(np.array(list(h.keys())), d.axis_range)
return d._fast_vector_sum(list(h.values()), theta)
def _weighted_average(self, d ):
return d._safe_divide(d.weighted_sum(), sum(d.values()))
def selectivity(self, d):
if d.cyclic == True:
return self._vector_selectivity(d)
else:
return self._relative_selectivity(d)
# CEBHACKALERT: the definition of selectivity for non-cyclic
# quantities probably needs some more thought.
# Additionally, this fails the test in testfeaturemap
# (see the comment there).
def _relative_selectivity(self, d):
# A single feature_bin is considered fully selective (but could also
# arguably be considered fully unselective)
if len(d.data()) <= 1:
return 1.0
proportion = d._safe_divide(max(d.values()), sum(d.values()))
offset = 1.0/len(d.values())
scaled = (proportion-offset) / (1.0-offset)
# negative scaled is possible
# e.g. 2 bins, with values that sum to less than 0.5
# this probably isn't what should be done in those cases
if scaled >= 0.0:
return scaled
else:
return 0.0
def _vector_selectivity(self, d):
return d._safe_divide(d.vector_sum()[0], sum(d.values()))
__abstract = True
class DescriptiveBimodalStatisticFn(DescriptiveStatisticFn):
def second_max_value_bin(self, d):
if len(d.bins()) <= 1:
return d.bins()[0]
k = d.max_value_bin()
d.pop(k)
m = d.max_value_bin()
d.restore(k)
return m
def second_selectivity(self, d):
if len( d._data ) <= 1:
return 0.0
if d.cyclic == True:
return self._vector_second_selectivity(d)
else:
return self._relative_second_selectivity(d)
def _relative_second_selectivity(self, d):
k = d.max_value_bin()
d.pop(k)
m = max(d.values())
d.restore(k)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / len(d.data())
scaled = (proportion - offset) / (1.0 - offset)
return max(scaled, 0.0)
def _vector_second_selectivity(self, d):
k = d.max_value_bin()
d.pop(k)
s = self.vector_sum(d)[0]
d.restore(k)
return self._safe_divide(s, sum(d.values()))
def second_peak_bin(self, d):
h = d.data()
l = len(h)
if l <= 1:
return d.keys()[0]
ks = list(h.keys())
ks.sort()
ik0 = ks.index(d.keys()[np.argmax(d.values())])
k0 = ks[ik0]
v0 = h[k0]
v = v0
k = k0
ik = ik0
while h[k] <= v:
ik += 1
if ik >= l:
ik = 0
if ik == ik0:
return k0
v = h[k]
k = ks[ik]
ik1 = ik
v = v0
k = k0
ik = ik0
while h[k] <= v:
ik -= 1
if ik < 0:
ik = l - 1
if ik == ik0:
return k0
v = h[k]
k = ks[ik]
ik2 = ik
if ik1 == ik2:
return ks[ik1]
ik = ik1
m = 0
while ik != ik2:
k = ks[ik]
if h[k] > m:
m = h[k]
im = ik
ik += 1
if ik >= l:
ik = 0
return ks[im]
def second_peak_selectivity(self, d):
l = len(d.keys())
if l <= 1:
return 0.0
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if p1 == p2:
return 0.0
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / l
scaled = (proportion - offset) / (1.0 - offset)
return max(scaled, 0.0)
def second_peak(self, d):
l = len(d.keys())
if l <= 1:
return (d.keys()[0], 0.0)
p1 = d.max_value_bin()
p2 = self.second_peak_bin(d)
if p1 == p2:
return (p1, 0.0)
m = d.get_value(p2)
proportion = d._safe_divide(m, sum(d.values()))
offset = 1.0 / l
scaled = (proportion - offset) / (1.0 - offset)
return (p2, max(scaled, 0.0))
__abstract = True
class DSF_MaxValue(DescriptiveStatisticFn):
def __call__(self, d):
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d)+self.selectivity_scale[0])
return {"": Pref(preference=p, selectivity=s)}
class DSF_WeightedAverage(DescriptiveStatisticFn):
def __call__(self, d):
p = d.vector_sum()[1] if d.cyclic else self._weighted_average(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
return {"": Pref(preference=p, selectivity=s)}
class DSF_TopTwoValues(DescriptiveBimodalStatisticFn):
def __call__(self, d):
r = {}
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
r[""] = Pref(preference=p, selectivity=s)
p = self.second_max_value_bin(d)
s = self.second_selectivity(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
r["Mode2"] = Pref(preference=p, selectivity=s)
return r
class DSF_BimodalPeaks(DescriptiveBimodalStatisticFn):
def __call__(self, d):
r = {}
p = self.value_scale[1] * (d.max_value_bin() + self.value_scale[0])
s = self.selectivity_scale[1] * (self.selectivity(d) + self.selectivity_scale[0])
r[""] = Pref(preference=p, selectivity=s)
p, s = self.second_peak(d)
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
r["Mode2"] = Pref(preference=p, selectivity=s)
return r
class VonMisesStatisticFn(DistributionStatisticFn):
vm_kappa_fit = (0.206, 0.614)
noise_level = 0.001
fit_exit_code = 0
user_warned_if_unavailable = False
__abstract = True
def _orth(self, t):
if t < 0.5 * np.pi:
return t + 0.5 * np.pi
return t - 0.5 * np.pi
def _in_pi(self, t):
if t > np.pi:
return t - np.pi
if t < 0:
return t + np.pi
return t
def von_mises(self, pars, x):
a, k, t = pars
return a * np.exp(k * (np.cos(2 * (x - t)) - 1))
def von2_mises(self, pars, x):
p1 = pars[: 3]
p2 = pars[3:]
return self.von_mises(p1, x) + self.von_mises(p2, x)
def von_mises_res(self, pars, x, y):
return y - self.von_mises(pars, x)
def von2_mises_res(self, pars, x, y):
return y - self.von2_mises(pars, x)
def norm_sel(self, k, n):
m = (self.vm_kappa_fit[0] + n * self.vm_kappa_fit[1])**2
return np.log(1 + k) / np.log(1 + m)
def fit_vm(self, distribution):
if unavailable_scipy_optimize:
if not VonMisesStatisticFn.user_warned_if_unavailable:
param.Parameterized().warning("scipy.optimize not available, dummy von Mises fit")
VonMisesStatisticFn.user_warned_if_unavailable=True
self.fit_exit_code = 3
return 0, 0, 0
to_pi = np.pi / distribution.axis_range
x = to_pi * np.array(distribution.bins())
n = len(x)
if n < 5:
param.Parameterized().warning("No von Mises fit possible with less than 4 bins")
self.fit_exit_code = -1
return 0, 0, 0
y = np.array(distribution.values())
if y.std() < self.noise_level:
self.fit_exit_code = 1
return 0, 0, 0
rn = self.noise_level * np.random.random_sample(y.shape)
p0 = (1.0, 1.0, distribution.max_value_bin())
r = optimize.leastsq(self.von_mises_res, p0, args=(x, y + rn),
full_output=True)
if not r[-1] in ( 1, 2, 3, 4 ):
self.fit_exit_code = 100 + r[-1]
return 0, 0, 0
residuals = r[2]['fvec']
jacobian = r[1]
bandwith = r[0][1]
tuning = r[0][2]
if bandwith < 0:
self.fit_exit_code = 1
return 0, 0, 0
if jacobian is None:
self.fit_exit_code = 2
return 0, 0, 0
error = (residuals**2).sum() / (n - len(p0))
covariance = jacobian * error
g = covariance[2, 2]
p = self._in_pi(tuning) / to_pi
s = self.norm_sel(bandwith, n)
self.fit_exit_code = 0
return p, s, g
def vm_fit_exit_codes(self):
if self.fit_exit_code == 0:
return "succesfull exit"
if self.fit_exit_code == -1:
return "not enough bins for this fit"
if self.fit_exit_code == 1:
return "flat distribution"
if self.fit_exit_code == 2:
return "flat distribution"
if self.fit_exit_code == 3:
return "missing scipy.optimize import"
if self.fit_exit_code > 110:
return "unknown exit code"
if self.fit_exit_code > 100:
return "error " + str(self.fit_exit_code - 100) + " in scipy.optimize.leastsq"
return "unknown exit code"
def fit_v2m(self, distribution):
null = 0, 0, 0, 0, 0, 0
if unavailable_scipy_optimize:
if not VonMisesStatisticFn.user_warned_if_unavailable:
param.Parameterized().warning("scipy.optimize not available, dummy von Mises fit")
VonMisesStatisticFn.user_warned_if_unavailable=True
self.fit_exit_code = 3
return null
to_pi = np.pi / distribution.axis_range
x = to_pi * np.array(distribution.bins())
n = len(x)
if n < 9:
param.Parameterized().warning( "no bimodal von Mises fit possible with less than 8 bins" )
self.fit_exit_code = -1
return null
y = np.array(distribution.values())
if y.std() < self.noise_level:
self.fit_exit_code = 1
return null
rn = self.noise_level * np.random.random_sample(y.shape)
t0 = distribution.max_value_bin()
p0 = (1.0, 1.0, t0, 1.0, 1.0, self._orth(t0))
r = optimize.leastsq(self.von2_mises_res, p0, args=(x, y + rn),
full_output=True)
if not r[-1] in ( 1, 2, 3, 4 ):
self.fit_exit_code = 100 + r[-1]
return null
residuals = r[2]['fvec']
jacobian = r[1]
bandwith_1 = r[0][1]
tuning_1 = r[0][2]
bandwith_2 = r[0][4]
tuning_2 = r[0][5]
if jacobian is None:
self.fit_exit_code = 2
return null
if bandwith_1 < 0:
self.fit_exit_code = 1
return null
if bandwith_2 < 0:
self.fit_exit_code = 1
return null
error = (residuals ** 2).sum() / (n - len(p0))
covariance = jacobian * error
g1 = covariance[2, 2]
g2 = covariance[5, 5]
p1 = self._in_pi(tuning_1) / to_pi
p2 = self._in_pi(tuning_2) / to_pi
s1 = self.norm_sel(bandwith_1, n)
s2 = self.norm_sel(bandwith_2, n)
self.fit_exit_code = 0
return p1, s1, g1, p2, s2, g2
def __call__(self, distribution):
raise NotImplementedError
class DSF_VonMisesFit(VonMisesStatisticFn):
worst_fit = param.Number(default=0.5, bounds=(0.0, None), softbounds=(0.0, 1.0), doc="""
worst good-of-fitness value for accepting the distribution as monomodal""")
null_result = {"": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Modes": Pref(number=0)}
def __call__(self, distribution):
f = self.fit_vm(distribution)
if self.fit_exit_code != 0 or f[-1] > self.worst_fit:
return self.null_result
results = {}
p, s, g = f
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results[""] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
results["Modes"] = Pref(number=1)
return results
class DSF_BimodalVonMisesFit(VonMisesStatisticFn):
worst_fit = param.Number(default=0.5, bounds=(0.0, None), softbounds=(0.0, 1.0), doc="""
Worst good-of-fitness value for accepting the distribution as mono- or bi-modal""")
null_result = {
"": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Mode2": Pref(preference=0, selectivity=0, goodness_of_fit=0),
"Modes": Pref(number=0)
}
def _analyze_distr(self, d):
no1 = False
f = self.fit_vm(d)
if self.fit_exit_code != 0:
no1 = True
p, s, g = f
f2 = self.fit_v2m(d)
if self.fit_exit_code != 0 or f2[2] > self.worst_fit:
if no1 or f[-1] > self.worst_fit:
return None
return p, s, g, 0, 0, 0, 1
p1, s1, g1, p2, s2, g2 = f2
if g1 > g:
return p, s, g, 0, 0, 0, 1
if s2 > s1:
return p2, s2, g2, p1, s1, g1, 2
return p1, s1, g1, p2, s2, g2, 2
def __call__(self, distribution):
f = self._analyze_distr(distribution)
if f is None:
return self.null_result
results = {}
p, s, g = f[: 3]
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results[""] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
p, s, g, n = f[3:]
p = self.value_scale[1] * (p + self.value_scale[0])
s = self.selectivity_scale[1] * (s + self.selectivity_scale[0])
results["Mode2"] = Pref(preference=p, selectivity=s, goodness_of_fit=g)
results["Modes"] = Pref(number=n)
return results
| true
| true
|
7905edaab8ffaa2ad86e256c81f61f43552bccf1
| 1,189
|
py
|
Python
|
.circleci/scripts/pre_commit_readme_extra.py
|
astronomer/astronomer-providers
|
e19c656daab19f3e881f140495e2184c16eaafe0
|
[
"Apache-2.0"
] | 27
|
2022-03-02T04:49:54.000Z
|
2022-03-30T13:19:02.000Z
|
.circleci/scripts/pre_commit_readme_extra.py
|
astronomer/astronomer-providers
|
e19c656daab19f3e881f140495e2184c16eaafe0
|
[
"Apache-2.0"
] | 92
|
2022-03-02T08:01:31.000Z
|
2022-03-31T19:47:33.000Z
|
.circleci/scripts/pre_commit_readme_extra.py
|
astronomer/astronomer-providers
|
e19c656daab19f3e881f140495e2184c16eaafe0
|
[
"Apache-2.0"
] | 2
|
2022-03-07T17:39:41.000Z
|
2022-03-18T20:37:03.000Z
|
#!/usr/bin/env python3
"""Pre-commit hook to verify that all extras are documented in README.rst"""
import configparser
import re
from pathlib import Path
repo_dir = Path(__file__).parent.parent.parent
config = configparser.ConfigParser(strict=False)
config.read(repo_dir / "setup.cfg")
all_extra = []
extra_to_exclude = {"tests", "mypy", "docs"}
all_extras = set(config["options.extras_require"].keys()) - extra_to_exclude
readme_path = repo_dir / "README.rst"
extra_doc = """
.. list-table::
:header-rows: 1
* - Extra Name
- Installation Command
- Dependencies
"""
for extra in sorted(all_extras):
extra_doc += f"""
* - ``{extra}``
- ``pip install 'astronomer-providers[{extra}]'``
- {extra.replace(".", " ").title()}
"""
with open(readme_path, "r") as readme_file:
readme_contents = readme_file.read()
new_readme_text = re.sub(
r".. EXTRA_DOC_START([\s\S]*).. EXTRA_DOC_END",
f".. EXTRA_DOC_START{extra_doc}\n.. EXTRA_DOC_END",
readme_contents,
flags=re.MULTILINE,
)
if new_readme_text != readme_contents:
with open(readme_path, "w") as readme_file:
readme_file.write(new_readme_text)
| 24.265306
| 76
| 0.666947
|
import configparser
import re
from pathlib import Path
repo_dir = Path(__file__).parent.parent.parent
config = configparser.ConfigParser(strict=False)
config.read(repo_dir / "setup.cfg")
all_extra = []
extra_to_exclude = {"tests", "mypy", "docs"}
all_extras = set(config["options.extras_require"].keys()) - extra_to_exclude
readme_path = repo_dir / "README.rst"
extra_doc = """
.. list-table::
:header-rows: 1
* - Extra Name
- Installation Command
- Dependencies
"""
for extra in sorted(all_extras):
extra_doc += f"""
* - ``{extra}``
- ``pip install 'astronomer-providers[{extra}]'``
- {extra.replace(".", " ").title()}
"""
with open(readme_path, "r") as readme_file:
readme_contents = readme_file.read()
new_readme_text = re.sub(
r".. EXTRA_DOC_START([\s\S]*).. EXTRA_DOC_END",
f".. EXTRA_DOC_START{extra_doc}\n.. EXTRA_DOC_END",
readme_contents,
flags=re.MULTILINE,
)
if new_readme_text != readme_contents:
with open(readme_path, "w") as readme_file:
readme_file.write(new_readme_text)
| true
| true
|
7905ee74f070737c45e294353b215f55a3fb7b86
| 20,260
|
py
|
Python
|
tensorflow/python/training/checkpoint_utils.py
|
KodeWorker/tensorflow
|
a7f91fd5ce53253ab4bfd6448886028a085e0ddf
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/training/checkpoint_utils.py
|
KodeWorker/tensorflow
|
a7f91fd5ce53253ab4bfd6448886028a085e0ddf
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/training/checkpoint_utils.py
|
KodeWorker/tensorflow
|
a7f91fd5ce53253ab4bfd6448886028a085e0ddf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| 41.516393
| 104
| 0.699161
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
with ops.device(variable.device), ops.device("/cpu:0"):
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| true
| true
|
7905eed07b7ffa5affb2cc2395fe6035e0323680
| 787
|
py
|
Python
|
Chatbot_investment/chatbot/investment_bot/migrations/0006_section_deduction.py
|
dreamvrutik/Investment-Chatbot
|
aae7f9a500a2ac1f7d9a310b2eb5334f18e547fc
|
[
"MIT"
] | 5
|
2019-07-12T10:48:28.000Z
|
2020-01-02T11:55:43.000Z
|
Chatbot_investment/chatbot/investment_bot/migrations/0006_section_deduction.py
|
dreamvrutik/Investment-Chatbot
|
aae7f9a500a2ac1f7d9a310b2eb5334f18e547fc
|
[
"MIT"
] | null | null | null |
Chatbot_investment/chatbot/investment_bot/migrations/0006_section_deduction.py
|
dreamvrutik/Investment-Chatbot
|
aae7f9a500a2ac1f7d9a310b2eb5334f18e547fc
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-07-10 04:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('investment_bot', '0005_amount_restrictions'),
]
operations = [
migrations.CreateModel(
name='Section_Deduction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=10)),
('employee_code', models.CharField(max_length=100)),
('section_id', models.CharField(max_length=100)),
('subsection_id', models.CharField(max_length=100)),
('amount', models.IntegerField()),
],
),
]
| 31.48
| 114
| 0.584498
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('investment_bot', '0005_amount_restrictions'),
]
operations = [
migrations.CreateModel(
name='Section_Deduction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=10)),
('employee_code', models.CharField(max_length=100)),
('section_id', models.CharField(max_length=100)),
('subsection_id', models.CharField(max_length=100)),
('amount', models.IntegerField()),
],
),
]
| true
| true
|
7905eefd15e94ab3709bbdedb8181f2b1681d86c
| 686
|
py
|
Python
|
CSDP/teachers/forms.py
|
Ravyo/Department-Portal
|
509f1426c785653499f49e7afdc882fe1afbe9a1
|
[
"bzip2-1.0.6"
] | null | null | null |
CSDP/teachers/forms.py
|
Ravyo/Department-Portal
|
509f1426c785653499f49e7afdc882fe1afbe9a1
|
[
"bzip2-1.0.6"
] | null | null | null |
CSDP/teachers/forms.py
|
Ravyo/Department-Portal
|
509f1426c785653499f49e7afdc882fe1afbe9a1
|
[
"bzip2-1.0.6"
] | null | null | null |
from django import forms
class AddTeacherForm(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'teacher name'
}))
teacher_pin = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'enter unique id'
}))
designation = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control'
}))
joined = forms.DateField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'yyyy-mm-dd'
}))
phone = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control'
}))
| 29.826087
| 64
| 0.620991
|
from django import forms
class AddTeacherForm(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'teacher name'
}))
teacher_pin = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'enter unique id'
}))
designation = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control'
}))
joined = forms.DateField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'yyyy-mm-dd'
}))
phone = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control'
}))
| true
| true
|
7905ef1c2dea948435c93fdd7beb5937b06bf7ef
| 1,821
|
py
|
Python
|
facial_recog/tests/test_app.py
|
MePsyDuck/amfr
|
691d9270dece8846a5bbb64ba4f2bdea95ee05e5
|
[
"MIT"
] | null | null | null |
facial_recog/tests/test_app.py
|
MePsyDuck/amfr
|
691d9270dece8846a5bbb64ba4f2bdea95ee05e5
|
[
"MIT"
] | null | null | null |
facial_recog/tests/test_app.py
|
MePsyDuck/amfr
|
691d9270dece8846a5bbb64ba4f2bdea95ee05e5
|
[
"MIT"
] | null | null | null |
import unittest
from facial_recog.app import *
from .test_config import test_run_count, seed, success_perc
from .test_util import *
class TestFR(unittest.TestCase):
subject_names = dict()
subject_classes = dict()
def setUp(self):
random.seed(seed)
create_app_dirs()
setup_logger()
logging.debug('Seed is %s', seed)
# only for super strict testing
# clear_fdb()
prepare_fdb()
self.subject_names, self.subject_classes = create_sample()
logging.info('Subject names: %s', self.subject_names)
logging.info('Subject classes are: %s', self.subject_classes)
recreate_db()
populate_db(self.subject_classes)
logging.info('New db created')
clear_dataset()
copy_dataset(subject_names=self.subject_names)
logging.info('Training Dataset created')
clear_recognizers()
for class_id in get_all_classes():
train(class_id=class_id)
logging.info('Classifiers trained')
def test_fr(self):
success = 0
for _ in range(test_run_count):
random_class = random.choice(get_all_classes())
random_subject = random.choice(get_class_subjects(random_class))
random_image = random.choice(
get_images_for_subject(subject_name=self.subject_names[random_subject]))
logging.info('Testing subject %s in class %s with image %s', random_subject, random_class, random_image)
if predict(img=path_to_img(random_image), class_id=random_class) == random_subject:
success += 1
logging.info('Test success')
else:
logging.warning('Test failed')
self.assertGreaterEqual(success, int(success_perc * test_run_count))
| 30.864407
| 116
| 0.645799
|
import unittest
from facial_recog.app import *
from .test_config import test_run_count, seed, success_perc
from .test_util import *
class TestFR(unittest.TestCase):
subject_names = dict()
subject_classes = dict()
def setUp(self):
random.seed(seed)
create_app_dirs()
setup_logger()
logging.debug('Seed is %s', seed)
prepare_fdb()
self.subject_names, self.subject_classes = create_sample()
logging.info('Subject names: %s', self.subject_names)
logging.info('Subject classes are: %s', self.subject_classes)
recreate_db()
populate_db(self.subject_classes)
logging.info('New db created')
clear_dataset()
copy_dataset(subject_names=self.subject_names)
logging.info('Training Dataset created')
clear_recognizers()
for class_id in get_all_classes():
train(class_id=class_id)
logging.info('Classifiers trained')
def test_fr(self):
success = 0
for _ in range(test_run_count):
random_class = random.choice(get_all_classes())
random_subject = random.choice(get_class_subjects(random_class))
random_image = random.choice(
get_images_for_subject(subject_name=self.subject_names[random_subject]))
logging.info('Testing subject %s in class %s with image %s', random_subject, random_class, random_image)
if predict(img=path_to_img(random_image), class_id=random_class) == random_subject:
success += 1
logging.info('Test success')
else:
logging.warning('Test failed')
self.assertGreaterEqual(success, int(success_perc * test_run_count))
| true
| true
|
7905efdeb2cb65c63ae5dc436169a282e8b3aa04
| 8,743
|
py
|
Python
|
src/datasets.py
|
IssamLaradji/SSR
|
90623188abb4dd9f30566faa2f170a76db9e1846
|
[
"Apache-2.0"
] | 2
|
2021-08-24T14:56:49.000Z
|
2022-01-24T16:10:59.000Z
|
src/datasets.py
|
IssamLaradji/SSR
|
90623188abb4dd9f30566faa2f170a76db9e1846
|
[
"Apache-2.0"
] | 1
|
2022-02-20T12:47:54.000Z
|
2022-02-20T13:44:51.000Z
|
src/datasets.py
|
IssamLaradji/SSR
|
90623188abb4dd9f30566faa2f170a76db9e1846
|
[
"Apache-2.0"
] | null | null | null |
import os
import soft_renderer.functional as srf
import torch, random
import numpy as np
import tqdm
from haven import haven_utils as hu
from PIL import Image, ImageOps, ImageFilter
import torchvision.transforms as transforms
class_ids_map = {
'02691156': 'Airplane',
'02828884': 'Bench',
'02933112': 'Cabinet',
'02958343': 'Car',
'03001627': 'Chair',
'03211117': 'Display',
'03636649': 'Lamp',
'03691459': 'Loudspeaker',
'04090263': 'Rifle',
'04256520': 'Sofa',
'04379243': 'Table',
'04401088': 'Telephone',
'04530566': 'Watercraft',
}
CLASS_IDS = sorted(list(class_ids_map.keys()))
class ShapeNet(object):
def __init__(self, directory=None, split=None, exp_dict=None):
self.class_ids = CLASS_IDS
n_classes = exp_dict.get('n_classes')
if n_classes:
self.class_ids = CLASS_IDS[:n_classes]
classes = exp_dict.get('classes')
if classes:
classes_map = {key: value for (value, key) in class_ids_map.items()}
self.class_ids = sorted([classes_map[k] for k in classes])
self.split = split
self.elevation = 30.
self.distance = 2.732
self.exp_dict = exp_dict
self.class_ids_map = class_ids_map
self.images = []
self.voxels = []
self.labels = []
self.class_ids_pair = list(zip(self.class_ids, [self.class_ids_map[i] for i in self.class_ids]))
self.num_data = {}
self.pos = {}
count = 0
# ind2class = {key: value for (value, key) in enumerate(self.class_ids)}
loop = tqdm.tqdm(self.class_ids)
loop.set_description(f'Loading {split} Dataset')
n_train_objects = exp_dict.get('n_train_objects')
n_ratio_val = exp_dict.get('n_val_ratio')
# assert n_ratio_val is not None
if n_train_objects is None and split == 'unlabeled':
return
if split in ['train', 'unlabeled']:
set_name = 'train'
elif split in ['val', 'test']:
set_name = 'val'
if n_ratio_val is None:
set_name = split
for ci, class_id in enumerate(loop):
i = list(np.load(os.path.join(directory, '%s_%s_images.npz' % (class_id, set_name))).items())[0][1]
v = list(np.load(os.path.join(directory, '%s_%s_voxels.npz' % (class_id, set_name))).items())[0][1]
# train get only first n
if split == 'train' and n_train_objects is not None:
n = n_train_objects
i = i[:n]
v = v[:n]
# unlabeled get only first n
if split == 'unlabeled' and n_train_objects is not None:
n = n_train_objects
i = i[n:]
v = v[n:]
elif split == 'val' and n_ratio_val is not None:
n = int(i.shape[0]*n_ratio_val)
i = i[:n]
v = v[:n]
elif split == 'test' and n_ratio_val is not None:
n = int(i.shape[0]*n_ratio_val)
i = i[n:]
v = v[n:]
self.images += [i]
self.voxels += [v]
self.labels += [torch.ones(i.shape[0]) * ci]
self.images = np.concatenate(self.images, axis=0)
self.images = torch.from_numpy(self.images.astype('float32') / 255.)
self.voxels = np.concatenate(self.voxels, axis=0)
self.voxels = torch.from_numpy(self.voxels.astype('float32'))
self.labels = torch.cat(self.labels, dim=0)
# positible view points
distances = torch.ones(24).float() * self.distance
elevations = torch.ones(24).float() * self.elevation
self.possible_viewpoints = srf.get_points_from_angles(distances, elevations, -torch.arange(24) * 15)
print(f'{split} samples: {len(self)}')
def __len__(self):
if isinstance(self.images, list):
return len(self.images)
return self.images.shape[0]
def __getitem__(self, idx, vp_idx=None, vp_idx_b=None):
# image A
images_a, viewpoints_a, viewpoint_id_a = self.get_random_viewpoint(idx, vp_idx)
# image B
images_b, viewpoints_b, viewpoint_id_b = self.get_random_viewpoint(idx, vp_idx_b)
return {'images_a':images_a,
'viewpoints_a': viewpoints_a,
'object_id_a':idx,
'viewpoint_id_a':viewpoint_id_a,
'images_b':images_b,
'viewpoints_b': viewpoints_b,
'object_id_b':idx,
'viewpoint_id_b':viewpoint_id_b}
def insert_images(self, images):
self.images = torch.cat([self.images, images], dim=0)
def pop_indices(self, ind_list):
selected_images = self.images[ind_list]
keep_idx = np.delete(np.arange(self.images.shape[0]), ind_list)
self.images = self.images[keep_idx]
# return list(np.delete(arr, id_to_del))
return selected_images
def get_random_viewpoint(self, idx, vp_idx=None):
if vp_idx is None:
viewpoint_id = np.random.randint(0, 24)
else:
viewpoint_id = vp_idx
# get image and viewpoint
images = self.images[idx][viewpoint_id]
# get viewpoint
viewpoints = srf.get_points_from_angles(self.distance, self.elevation, -viewpoint_id * 15)
return images, torch.as_tensor(viewpoints), viewpoint_id
def get_all_batches_for_evaluation(self, batch_size, class_id):
assert self.images.shape[0] == self.voxels.shape[0]
ci = self.class_ids.index(class_id)
ind_ci = self.labels == ci
im_cls = self.images[ind_ci]
vx_cls = self.voxels[ind_ci]
data_ids = np.arange(im_cls.shape[0])
viewpoint_ids = np.tile(np.arange(24), data_ids.size)
data_ids = np.repeat(data_ids, 24) * 24 + viewpoint_ids
distances = torch.ones(data_ids.size).float() * self.distance
elevations = torch.ones(data_ids.size).float() * self.elevation
viewpoints_all = srf.get_points_from_angles(distances, elevations, -torch.from_numpy(viewpoint_ids).float() * 15)
shape = im_cls.shape[-3:]
images = im_cls.view(-1, *shape)
shape = vx_cls.shape[-3:]
voxels = vx_cls.view(-1, *shape)
for i in range((data_ids.size - 1) // batch_size + 1):
im = images[data_ids[i * batch_size:(i + 1) * batch_size]]
vx = voxels[data_ids[i * batch_size:(i + 1) * batch_size] // 24]
yield im, vx
class Transform:
def __init__(self):
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=1.0),
Solarization(p=0.0),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
self.transform_prime = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=0.1),
Solarization(p=0.2),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
def __call__(self, x):
y1 = self.transform(x)
y2 = self.transform_prime(x)
return y1, y2
class GaussianBlur(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
sigma = random.random() * 1.9 + 0.1
return img.filter(ImageFilter.GaussianBlur(sigma))
else:
return img
class Solarization(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
| 34.557312
| 121
| 0.566396
|
import os
import soft_renderer.functional as srf
import torch, random
import numpy as np
import tqdm
from haven import haven_utils as hu
from PIL import Image, ImageOps, ImageFilter
import torchvision.transforms as transforms
class_ids_map = {
'02691156': 'Airplane',
'02828884': 'Bench',
'02933112': 'Cabinet',
'02958343': 'Car',
'03001627': 'Chair',
'03211117': 'Display',
'03636649': 'Lamp',
'03691459': 'Loudspeaker',
'04090263': 'Rifle',
'04256520': 'Sofa',
'04379243': 'Table',
'04401088': 'Telephone',
'04530566': 'Watercraft',
}
CLASS_IDS = sorted(list(class_ids_map.keys()))
class ShapeNet(object):
def __init__(self, directory=None, split=None, exp_dict=None):
self.class_ids = CLASS_IDS
n_classes = exp_dict.get('n_classes')
if n_classes:
self.class_ids = CLASS_IDS[:n_classes]
classes = exp_dict.get('classes')
if classes:
classes_map = {key: value for (value, key) in class_ids_map.items()}
self.class_ids = sorted([classes_map[k] for k in classes])
self.split = split
self.elevation = 30.
self.distance = 2.732
self.exp_dict = exp_dict
self.class_ids_map = class_ids_map
self.images = []
self.voxels = []
self.labels = []
self.class_ids_pair = list(zip(self.class_ids, [self.class_ids_map[i] for i in self.class_ids]))
self.num_data = {}
self.pos = {}
count = 0
loop = tqdm.tqdm(self.class_ids)
loop.set_description(f'Loading {split} Dataset')
n_train_objects = exp_dict.get('n_train_objects')
n_ratio_val = exp_dict.get('n_val_ratio')
if n_train_objects is None and split == 'unlabeled':
return
if split in ['train', 'unlabeled']:
set_name = 'train'
elif split in ['val', 'test']:
set_name = 'val'
if n_ratio_val is None:
set_name = split
for ci, class_id in enumerate(loop):
i = list(np.load(os.path.join(directory, '%s_%s_images.npz' % (class_id, set_name))).items())[0][1]
v = list(np.load(os.path.join(directory, '%s_%s_voxels.npz' % (class_id, set_name))).items())[0][1]
if split == 'train' and n_train_objects is not None:
n = n_train_objects
i = i[:n]
v = v[:n]
if split == 'unlabeled' and n_train_objects is not None:
n = n_train_objects
i = i[n:]
v = v[n:]
elif split == 'val' and n_ratio_val is not None:
n = int(i.shape[0]*n_ratio_val)
i = i[:n]
v = v[:n]
elif split == 'test' and n_ratio_val is not None:
n = int(i.shape[0]*n_ratio_val)
i = i[n:]
v = v[n:]
self.images += [i]
self.voxels += [v]
self.labels += [torch.ones(i.shape[0]) * ci]
self.images = np.concatenate(self.images, axis=0)
self.images = torch.from_numpy(self.images.astype('float32') / 255.)
self.voxels = np.concatenate(self.voxels, axis=0)
self.voxels = torch.from_numpy(self.voxels.astype('float32'))
self.labels = torch.cat(self.labels, dim=0)
distances = torch.ones(24).float() * self.distance
elevations = torch.ones(24).float() * self.elevation
self.possible_viewpoints = srf.get_points_from_angles(distances, elevations, -torch.arange(24) * 15)
print(f'{split} samples: {len(self)}')
def __len__(self):
if isinstance(self.images, list):
return len(self.images)
return self.images.shape[0]
def __getitem__(self, idx, vp_idx=None, vp_idx_b=None):
images_a, viewpoints_a, viewpoint_id_a = self.get_random_viewpoint(idx, vp_idx)
images_b, viewpoints_b, viewpoint_id_b = self.get_random_viewpoint(idx, vp_idx_b)
return {'images_a':images_a,
'viewpoints_a': viewpoints_a,
'object_id_a':idx,
'viewpoint_id_a':viewpoint_id_a,
'images_b':images_b,
'viewpoints_b': viewpoints_b,
'object_id_b':idx,
'viewpoint_id_b':viewpoint_id_b}
def insert_images(self, images):
self.images = torch.cat([self.images, images], dim=0)
def pop_indices(self, ind_list):
selected_images = self.images[ind_list]
keep_idx = np.delete(np.arange(self.images.shape[0]), ind_list)
self.images = self.images[keep_idx]
return selected_images
def get_random_viewpoint(self, idx, vp_idx=None):
if vp_idx is None:
viewpoint_id = np.random.randint(0, 24)
else:
viewpoint_id = vp_idx
images = self.images[idx][viewpoint_id]
viewpoints = srf.get_points_from_angles(self.distance, self.elevation, -viewpoint_id * 15)
return images, torch.as_tensor(viewpoints), viewpoint_id
def get_all_batches_for_evaluation(self, batch_size, class_id):
assert self.images.shape[0] == self.voxels.shape[0]
ci = self.class_ids.index(class_id)
ind_ci = self.labels == ci
im_cls = self.images[ind_ci]
vx_cls = self.voxels[ind_ci]
data_ids = np.arange(im_cls.shape[0])
viewpoint_ids = np.tile(np.arange(24), data_ids.size)
data_ids = np.repeat(data_ids, 24) * 24 + viewpoint_ids
distances = torch.ones(data_ids.size).float() * self.distance
elevations = torch.ones(data_ids.size).float() * self.elevation
viewpoints_all = srf.get_points_from_angles(distances, elevations, -torch.from_numpy(viewpoint_ids).float() * 15)
shape = im_cls.shape[-3:]
images = im_cls.view(-1, *shape)
shape = vx_cls.shape[-3:]
voxels = vx_cls.view(-1, *shape)
for i in range((data_ids.size - 1) // batch_size + 1):
im = images[data_ids[i * batch_size:(i + 1) * batch_size]]
vx = voxels[data_ids[i * batch_size:(i + 1) * batch_size] // 24]
yield im, vx
class Transform:
def __init__(self):
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=1.0),
Solarization(p=0.0),
transforms.ToTensor(),
])
self.transform_prime = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(p=0.1),
Solarization(p=0.2),
transforms.ToTensor(),
])
def __call__(self, x):
y1 = self.transform(x)
y2 = self.transform_prime(x)
return y1, y2
class GaussianBlur(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
sigma = random.random() * 1.9 + 0.1
return img.filter(ImageFilter.GaussianBlur(sigma))
else:
return img
class Solarization(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
| true
| true
|
7905f03a441eefda9fb5fa0e8571a2c5aec55ec0
| 1,740
|
py
|
Python
|
sia_load_tester/upload_queue.py
|
mtlynch/sia_load_tester
|
f4e2785e6dbceb1cf9c912ccb2fad49617102afb
|
[
"MIT"
] | 6
|
2018-03-01T04:06:50.000Z
|
2020-07-28T12:28:28.000Z
|
sia_load_tester/upload_queue.py
|
mtlynch/sia_load_tester
|
f4e2785e6dbceb1cf9c912ccb2fad49617102afb
|
[
"MIT"
] | 40
|
2018-02-09T00:41:41.000Z
|
2018-04-20T04:02:57.000Z
|
sia_load_tester/upload_queue.py
|
mtlynch/sia_load_tester
|
f4e2785e6dbceb1cf9c912ccb2fad49617102afb
|
[
"MIT"
] | null | null | null |
import logging
import Queue
import sia_client as sc
logger = logging.getLogger(__name__)
def from_upload_jobs(upload_jobs):
"""Creates a new upload queue from a list of upload jobs.
Creates a new queue of files to upload by starting with the full input
dataset and removing any files that are uploaded (partially or fully) to
Sia.
Args:
upload_jobs: The unfiltered set of upload jobs.
Returns:
A Queue of upload jobs, filtered to remove jobs that are already
complete (the paths already exist on Sia).
"""
return from_upload_jobs_and_sia_client(upload_jobs, sc.make_sia_client())
def from_upload_jobs_and_sia_client(upload_jobs, sia_client):
"""Creates a new upload queue from a dataset.
Creates a new queue of files to upload by starting with the full input
dataset and removing any files that are uploaded (partially or fully) to
Sia.
Args:
upload_jobs: The unfiltered set of upload jobs.
sia_client: An implementation of the Sia client interface.
Returns:
A Queue of upload jobs, filtered to remove jobs that are already
complete (the paths already exist on Sia).
"""
sia_paths = _get_sia_paths(sia_client)
# Filter jobs for files that have already been uploaded to Sia.
upload_jobs = [j for j in upload_jobs if j.sia_path not in sia_paths]
logger.info('%d files already uploaded to Sia, need to upload %d more',
len(sia_paths), len(upload_jobs))
upload_queue = Queue.Queue()
for upload_job in upload_jobs:
upload_queue.put(upload_job)
return upload_queue
def _get_sia_paths(sia_client):
return set([f[u'siapath'] for f in sia_client.renter_files()])
| 32.222222
| 77
| 0.711494
|
import logging
import Queue
import sia_client as sc
logger = logging.getLogger(__name__)
def from_upload_jobs(upload_jobs):
return from_upload_jobs_and_sia_client(upload_jobs, sc.make_sia_client())
def from_upload_jobs_and_sia_client(upload_jobs, sia_client):
sia_paths = _get_sia_paths(sia_client)
upload_jobs = [j for j in upload_jobs if j.sia_path not in sia_paths]
logger.info('%d files already uploaded to Sia, need to upload %d more',
len(sia_paths), len(upload_jobs))
upload_queue = Queue.Queue()
for upload_job in upload_jobs:
upload_queue.put(upload_job)
return upload_queue
def _get_sia_paths(sia_client):
return set([f[u'siapath'] for f in sia_client.renter_files()])
| true
| true
|
7905f1b56399335e5398c7ecbe2e6178f69ae07f
| 9,763
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/monitor/operations/metric_alert.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | 2
|
2020-08-08T11:00:25.000Z
|
2020-08-08T11:00:30.000Z
|
src/azure-cli/azure/cli/command_modules/monitor/operations/metric_alert.py
|
cindywu/azure-cli
|
bd011cb91ac6e0ac89f53e1105d76ea30b6609a0
|
[
"MIT"
] | 1
|
2021-06-02T02:49:48.000Z
|
2021-06-02T02:49:48.000Z
|
src/azure-cli/azure/cli/command_modules/monitor/operations/metric_alert.py
|
cindywu/azure-cli
|
bd011cb91ac6e0ac89f53e1105d76ea30b6609a0
|
[
"MIT"
] | 1
|
2020-07-31T17:22:13.000Z
|
2020-07-31T17:22:13.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.monitor.util import get_operator_map, get_aggregation_map
from knack.log import get_logger
logger = get_logger(__name__)
def create_metric_alert(client, resource_group_name, rule_name, scopes, condition, disabled=False, description=None,
tags=None, actions=None, severity=2, window_size='5m', evaluation_frequency='1m',
auto_mitigate=None):
from azure.mgmt.monitor.models import (MetricAlertResource,
MetricAlertSingleResourceMultipleMetricCriteria,
MetricAlertMultipleResourceMultipleMetricCriteria)
# generate names for the conditions
for i, cond in enumerate(condition):
cond.name = 'cond{}'.format(i)
criteria = None
target_resource_type = None
target_resource_region = None
if len(scopes) == 1:
criteria = MetricAlertSingleResourceMultipleMetricCriteria(all_of=condition)
else:
criteria = MetricAlertMultipleResourceMultipleMetricCriteria(all_of=condition)
target_resource_type = _parse_resource_type(scopes)
target_resource_region = 'global'
kwargs = {
'description': description,
'severity': severity,
'enabled': not disabled,
'scopes': scopes,
'evaluation_frequency': evaluation_frequency,
'window_size': window_size,
'criteria': criteria,
'target_resource_type': target_resource_type,
'target_resource_region': target_resource_region,
'actions': actions,
'tags': tags,
'location': 'global',
'auto_mitigate': auto_mitigate
}
return client.create_or_update(resource_group_name, rule_name, MetricAlertResource(**kwargs))
def update_metric_alert(instance, scopes=None, description=None, enabled=None, tags=None,
severity=None, window_size=None, evaluation_frequency=None, auto_mitigate=None,
add_actions=None, remove_actions=None, add_conditions=None, remove_conditions=None):
if scopes is not None:
instance.scopes = scopes
if description is not None:
instance.description = description
if enabled is not None:
instance.enabled = enabled
if tags is not None:
instance.tags = tags
if severity is not None:
instance.severity = severity
if window_size is not None:
instance.window_size = window_size
if evaluation_frequency is not None:
instance.evaluation_frequency = evaluation_frequency
if auto_mitigate is not None:
instance.auto_mitigate = auto_mitigate
# process action removals
if remove_actions is not None:
instance.actions = [x for x in instance.actions if x.action_group_id.lower() not in remove_actions]
# process action additions
if add_actions is not None:
for action in add_actions:
match = next(
(x for x in instance.actions if action.action_group_id.lower() == x.action_group_id.lower()), None
)
if match:
match.webhook_properties = action.webhook_properties
else:
instance.actions.append(action)
# process condition removals
if remove_conditions is not None:
instance.criteria.all_of = [x for x in instance.criteria.all_of if x.name not in remove_conditions]
def _get_next_name():
i = 0
while True:
possible_name = 'cond{}'.format(i)
match = next((x for x in instance.criteria.all_of if x.name == possible_name), None)
if match:
i = i + 1
continue
return possible_name
# process condition additions
if add_conditions is not None:
for condition in add_conditions:
condition.name = _get_next_name()
instance.criteria.all_of.append(condition)
return instance
def list_metric_alerts(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_metric_rule(client, resource_group_name, rule_name, target, condition, description=None, disabled=False,
location=None, tags=None, email_service_owners=False, actions=None):
from azure.mgmt.monitor.models import AlertRuleResource, RuleEmailAction
condition.data_source.resource_uri = target
custom_emails, webhooks, _ = _parse_actions(actions)
actions = [
RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=custom_emails)
] + (webhooks or [])
rule = AlertRuleResource(
location=location, alert_rule_resource_name=rule_name, is_enabled=not disabled,
condition=condition, tags=tags, description=description, actions=actions)
return client.create_or_update(resource_group_name, rule_name, rule)
def update_metric_rule(instance, target=None, condition=None, description=None, enabled=None, metric=None,
operator=None, threshold=None, aggregation=None, period=None, tags=None,
email_service_owners=None, add_actions=None, remove_actions=None):
# Update general properties
if description is not None:
instance.description = description
if enabled is not None:
instance.is_enabled = enabled
if tags is not None:
instance.tags = tags
# Update conditions
if condition is not None:
target = target or instance.condition.data_source.resource_uri
instance.condition = condition
if metric is not None:
instance.condition.data_source.metric_name = metric
if operator is not None:
instance.condition.operator = get_operator_map()[operator]
if threshold is not None:
instance.condition.threshold = threshold
if aggregation is not None:
instance.condition.time_aggregation = get_aggregation_map()[aggregation]
if period is not None:
instance.condition.window_size = period
if target is not None:
instance.condition.data_source.resource_uri = target
# Update actions
emails, webhooks, curr_email_service_owners = _parse_actions(instance.actions)
# process removals
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
emails = [x for x in emails if x not in removed_emails]
webhooks = [x for x in webhooks if x.service_uri not in removed_webhooks]
# process additions
if add_actions is not None:
added_emails, added_webhooks, _ = _parse_actions(add_actions)
emails = list(set(emails) | set(added_emails))
webhooks = webhooks + added_webhooks
# Replace the existing actions array. This potentially restructures rules that were created
# via other methods (Portal, ARM template). However, the functionality of these rules should
# be the same.
from azure.mgmt.monitor.models import RuleEmailAction
if email_service_owners is None:
email_service_owners = curr_email_service_owners
actions = [RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=emails)] + webhooks
instance.actions = actions
return instance
def _parse_actions(actions):
""" Actions come in as a combined list. This method separates the webhook actions into a
separate collection and combines any number of email actions into a single email collection
and a single value for `email_service_owners`. If any email action contains a True value
for `send_to_service_owners` then it is assumed the entire value should be True. """
from azure.mgmt.monitor.models import RuleEmailAction, RuleWebhookAction
actions = actions or []
email_service_owners = None
webhooks = [x for x in actions if isinstance(x, RuleWebhookAction)]
custom_emails = set()
for action in actions:
if isinstance(action, RuleEmailAction):
if action.send_to_service_owners:
email_service_owners = True
custom_emails = custom_emails | set(action.custom_emails)
return list(custom_emails), webhooks, email_service_owners
def _parse_action_removals(actions):
""" Separates the combined list of keys to remove into webhooks and emails. """
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _parse_resource_type(scopes):
from msrestazure.tools import parse_resource_id
from azure.cli.core import CLIError
namespace = None
resource_type = None
for item in scopes:
item_namespace = parse_resource_id(item)['namespace']
item_resource_type = parse_resource_id(item)['resource_type']
if namespace is None and resource_type is None:
namespace = item_namespace
resource_type = item_resource_type
else:
if namespace != item_namespace or resource_type != item_resource_type:
raise CLIError('Multiple scopes should be the same resource type.')
return namespace + '/' + resource_type
| 42.447826
| 116
| 0.679402
|
from azure.cli.command_modules.monitor.util import get_operator_map, get_aggregation_map
from knack.log import get_logger
logger = get_logger(__name__)
def create_metric_alert(client, resource_group_name, rule_name, scopes, condition, disabled=False, description=None,
tags=None, actions=None, severity=2, window_size='5m', evaluation_frequency='1m',
auto_mitigate=None):
from azure.mgmt.monitor.models import (MetricAlertResource,
MetricAlertSingleResourceMultipleMetricCriteria,
MetricAlertMultipleResourceMultipleMetricCriteria)
for i, cond in enumerate(condition):
cond.name = 'cond{}'.format(i)
criteria = None
target_resource_type = None
target_resource_region = None
if len(scopes) == 1:
criteria = MetricAlertSingleResourceMultipleMetricCriteria(all_of=condition)
else:
criteria = MetricAlertMultipleResourceMultipleMetricCriteria(all_of=condition)
target_resource_type = _parse_resource_type(scopes)
target_resource_region = 'global'
kwargs = {
'description': description,
'severity': severity,
'enabled': not disabled,
'scopes': scopes,
'evaluation_frequency': evaluation_frequency,
'window_size': window_size,
'criteria': criteria,
'target_resource_type': target_resource_type,
'target_resource_region': target_resource_region,
'actions': actions,
'tags': tags,
'location': 'global',
'auto_mitigate': auto_mitigate
}
return client.create_or_update(resource_group_name, rule_name, MetricAlertResource(**kwargs))
def update_metric_alert(instance, scopes=None, description=None, enabled=None, tags=None,
severity=None, window_size=None, evaluation_frequency=None, auto_mitigate=None,
add_actions=None, remove_actions=None, add_conditions=None, remove_conditions=None):
if scopes is not None:
instance.scopes = scopes
if description is not None:
instance.description = description
if enabled is not None:
instance.enabled = enabled
if tags is not None:
instance.tags = tags
if severity is not None:
instance.severity = severity
if window_size is not None:
instance.window_size = window_size
if evaluation_frequency is not None:
instance.evaluation_frequency = evaluation_frequency
if auto_mitigate is not None:
instance.auto_mitigate = auto_mitigate
if remove_actions is not None:
instance.actions = [x for x in instance.actions if x.action_group_id.lower() not in remove_actions]
if add_actions is not None:
for action in add_actions:
match = next(
(x for x in instance.actions if action.action_group_id.lower() == x.action_group_id.lower()), None
)
if match:
match.webhook_properties = action.webhook_properties
else:
instance.actions.append(action)
if remove_conditions is not None:
instance.criteria.all_of = [x for x in instance.criteria.all_of if x.name not in remove_conditions]
def _get_next_name():
i = 0
while True:
possible_name = 'cond{}'.format(i)
match = next((x for x in instance.criteria.all_of if x.name == possible_name), None)
if match:
i = i + 1
continue
return possible_name
if add_conditions is not None:
for condition in add_conditions:
condition.name = _get_next_name()
instance.criteria.all_of.append(condition)
return instance
def list_metric_alerts(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_metric_rule(client, resource_group_name, rule_name, target, condition, description=None, disabled=False,
location=None, tags=None, email_service_owners=False, actions=None):
from azure.mgmt.monitor.models import AlertRuleResource, RuleEmailAction
condition.data_source.resource_uri = target
custom_emails, webhooks, _ = _parse_actions(actions)
actions = [
RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=custom_emails)
] + (webhooks or [])
rule = AlertRuleResource(
location=location, alert_rule_resource_name=rule_name, is_enabled=not disabled,
condition=condition, tags=tags, description=description, actions=actions)
return client.create_or_update(resource_group_name, rule_name, rule)
def update_metric_rule(instance, target=None, condition=None, description=None, enabled=None, metric=None,
operator=None, threshold=None, aggregation=None, period=None, tags=None,
email_service_owners=None, add_actions=None, remove_actions=None):
if description is not None:
instance.description = description
if enabled is not None:
instance.is_enabled = enabled
if tags is not None:
instance.tags = tags
if condition is not None:
target = target or instance.condition.data_source.resource_uri
instance.condition = condition
if metric is not None:
instance.condition.data_source.metric_name = metric
if operator is not None:
instance.condition.operator = get_operator_map()[operator]
if threshold is not None:
instance.condition.threshold = threshold
if aggregation is not None:
instance.condition.time_aggregation = get_aggregation_map()[aggregation]
if period is not None:
instance.condition.window_size = period
if target is not None:
instance.condition.data_source.resource_uri = target
emails, webhooks, curr_email_service_owners = _parse_actions(instance.actions)
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
emails = [x for x in emails if x not in removed_emails]
webhooks = [x for x in webhooks if x.service_uri not in removed_webhooks]
if add_actions is not None:
added_emails, added_webhooks, _ = _parse_actions(add_actions)
emails = list(set(emails) | set(added_emails))
webhooks = webhooks + added_webhooks
from azure.mgmt.monitor.models import RuleEmailAction
if email_service_owners is None:
email_service_owners = curr_email_service_owners
actions = [RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=emails)] + webhooks
instance.actions = actions
return instance
def _parse_actions(actions):
from azure.mgmt.monitor.models import RuleEmailAction, RuleWebhookAction
actions = actions or []
email_service_owners = None
webhooks = [x for x in actions if isinstance(x, RuleWebhookAction)]
custom_emails = set()
for action in actions:
if isinstance(action, RuleEmailAction):
if action.send_to_service_owners:
email_service_owners = True
custom_emails = custom_emails | set(action.custom_emails)
return list(custom_emails), webhooks, email_service_owners
def _parse_action_removals(actions):
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _parse_resource_type(scopes):
from msrestazure.tools import parse_resource_id
from azure.cli.core import CLIError
namespace = None
resource_type = None
for item in scopes:
item_namespace = parse_resource_id(item)['namespace']
item_resource_type = parse_resource_id(item)['resource_type']
if namespace is None and resource_type is None:
namespace = item_namespace
resource_type = item_resource_type
else:
if namespace != item_namespace or resource_type != item_resource_type:
raise CLIError('Multiple scopes should be the same resource type.')
return namespace + '/' + resource_type
| true
| true
|
7905f3416d52bfdfea15dde1aab7a233aaf5554b
| 274
|
py
|
Python
|
solutions/example2.py
|
ricleal/IPythonParallel
|
c0b9446553dc709e918c6e8fb437b0d55bfba38d
|
[
"BSD-3-Clause"
] | 23
|
2015-04-29T00:38:20.000Z
|
2021-11-28T13:38:20.000Z
|
solutions/example2.py
|
ricleal/IPythonParallel
|
c0b9446553dc709e918c6e8fb437b0d55bfba38d
|
[
"BSD-3-Clause"
] | null | null | null |
solutions/example2.py
|
ricleal/IPythonParallel
|
c0b9446553dc709e918c6e8fb437b0d55bfba38d
|
[
"BSD-3-Clause"
] | 20
|
2015-01-24T02:43:42.000Z
|
2021-08-29T05:52:06.000Z
|
def estimate_pi_parallel(N, lview, N_per_trial=1E6):
result = lview.map(estimate_pi, [N_per_trial for i in range(N)])
while not result.ready():
print(result.progress)
time.sleep(0.5)
return np.mean(list(result))
estimate_pi_parallel(100, lview)
| 30.444444
| 68
| 0.693431
|
def estimate_pi_parallel(N, lview, N_per_trial=1E6):
result = lview.map(estimate_pi, [N_per_trial for i in range(N)])
while not result.ready():
print(result.progress)
time.sleep(0.5)
return np.mean(list(result))
estimate_pi_parallel(100, lview)
| true
| true
|
7905f3c291e6e5a8390af86d158774626c12c33c
| 2,383
|
py
|
Python
|
code/extract_balanced.py
|
tedunderwood/biographies
|
aba7b7180aea944bdc4fa163b0008eca34fe73cc
|
[
"MIT"
] | 1
|
2019-04-22T16:41:52.000Z
|
2019-04-22T16:41:52.000Z
|
code/extract_balanced.py
|
afcarl/biographies
|
b79dbd054fca10860d2c5a89d9c5ab1df8a93642
|
[
"MIT"
] | null | null | null |
code/extract_balanced.py
|
afcarl/biographies
|
b79dbd054fca10860d2c5a89d9c5ab1df8a93642
|
[
"MIT"
] | 1
|
2019-11-07T00:50:52.000Z
|
2019-11-07T00:50:52.000Z
|
#!/usr/bin/python3
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
# remove holding_folder if it exists, and create new folder
# use 'rm -r /holding_folder/* in shell script instead?'
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
def extract(infile):
'''
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
'''
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.read_table(infile)
for suffix in balanced_bioindex.filesuffix.unique():
volsplit_file = 'volsplit'+str(suffix)+'.zip'
volsplit_df = balanced_bioindex.loc[balanced_bioindex.filesuffix == suffix,:]
try:
with zipfile.ZipFile('/media/secure_volume/'+volsplit_file, 'r') as myzip:
for idx, row in volsplit_df.iterrows():
filename = row['mainid']+'.zip'
myzip.extract(filename, '/media/secure_volume/holding_folder')
except Exception as e:
print('ERROR:',filename,'not found in',volsplit_file,'!', e)
def slicer(outfile):
idx_file_path = '/media/secure_volume/index/bioindex.tsv'
holding_folder_path = '/media/secure_volume/holding_folder/'
bio_idx_df = pd.read_table(idx_file_path)
bio_idx_df.set_index('mainid', inplace = True)
mainid_list = [vol for vol in os.listdir(holding_folder_path) if vol.endswith('.zip')]
# remove '.zip' from file names
mainid_list_clean = [item[0:-4] for item in mainid_list]
#subset bioindex on holding_folder IDs
htid_series = bio_idx_df.htid[mainid_list_clean]
file_path_list = glob.glob(holding_folder_path+'*.zip')
# print('file path list has: ',len(file_path_list))
# print('htid_list has', len(htid_list))
slice_df = pd.DataFrame(htid_series)
slice_df['path'] = file_path_list
slice_df['c'] = 0
slice_df['d'] = 1001
with open(outfile, 'w') as outf:
slice_df.to_csv(outfile, sep='\t', header=False, index=False)
print("Wrote", len(slice_df), "rows to", outfile)
extract(infile)
slicer(outfile)
| 34.042857
| 90
| 0.684012
|
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
def extract(infile):
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.read_table(infile)
for suffix in balanced_bioindex.filesuffix.unique():
volsplit_file = 'volsplit'+str(suffix)+'.zip'
volsplit_df = balanced_bioindex.loc[balanced_bioindex.filesuffix == suffix,:]
try:
with zipfile.ZipFile('/media/secure_volume/'+volsplit_file, 'r') as myzip:
for idx, row in volsplit_df.iterrows():
filename = row['mainid']+'.zip'
myzip.extract(filename, '/media/secure_volume/holding_folder')
except Exception as e:
print('ERROR:',filename,'not found in',volsplit_file,'!', e)
def slicer(outfile):
idx_file_path = '/media/secure_volume/index/bioindex.tsv'
holding_folder_path = '/media/secure_volume/holding_folder/'
bio_idx_df = pd.read_table(idx_file_path)
bio_idx_df.set_index('mainid', inplace = True)
mainid_list = [vol for vol in os.listdir(holding_folder_path) if vol.endswith('.zip')]
mainid_list_clean = [item[0:-4] for item in mainid_list]
htid_series = bio_idx_df.htid[mainid_list_clean]
file_path_list = glob.glob(holding_folder_path+'*.zip')
slice_df = pd.DataFrame(htid_series)
slice_df['path'] = file_path_list
slice_df['c'] = 0
slice_df['d'] = 1001
with open(outfile, 'w') as outf:
slice_df.to_csv(outfile, sep='\t', header=False, index=False)
print("Wrote", len(slice_df), "rows to", outfile)
extract(infile)
slicer(outfile)
| true
| true
|
7905f3f184c24f1c21f5d4f050373b4a61b5cad9
| 2,051
|
py
|
Python
|
{{cookiecutter.project_name}}/server/settings/environments/production.py
|
alisher-matkurbanov/wemake-django-template
|
28d17211d5a9f466a7281006a09775b1e6ad1ef1
|
[
"MIT"
] | 3
|
2020-02-26T05:57:13.000Z
|
2020-03-09T17:07:18.000Z
|
{{cookiecutter.project_name}}/server/settings/environments/production.py
|
neilwithdata/wemake-django-template
|
fde107312b7649483c65d118e172045ff362482c
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/server/settings/environments/production.py
|
neilwithdata/wemake-django-template
|
fde107312b7649483c65d118e172045ff362482c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This file contains all the settings used in production.
This file is required and if development.py is present these
values are overridden.
"""
from server.settings.components import config
# Production flags:
# https://docs.djangoproject.com/en/2.2/howto/deployment/
DEBUG = False
ALLOWED_HOSTS = [
# TODO: check production hosts
config('DOMAIN_NAME'),
# We need this value for `healthcheck` to work:
'localhost',
]
# Staticfiles
# https://docs.djangoproject.com/en/2.2/ref/contrib/staticfiles/
# This is a hack to allow a special flag to be used with `--dry-run`
# to test things locally.
_COLLECTSTATIC_DRYRUN = config(
'DJANGO_COLLECTSTATIC_DRYRUN', cast=bool, default=False,
)
# Adding STATIC_ROOT to collect static files via 'collectstatic':
STATIC_ROOT = '.static' if _COLLECTSTATIC_DRYRUN else '/var/www/django/static'
STATICFILES_STORAGE = (
# This is a string, not a tuple,
# but it does not fit into 80 characters rule.
'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
)
# Media files
# https://docs.djangoproject.com/en/2.2/topics/files/
MEDIA_ROOT = '/var/www/django/media'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
_PASS = 'django.contrib.auth.password_validation' # noqa: S105
AUTH_PASSWORD_VALIDATORS = [
{'NAME': '{0}.UserAttributeSimilarityValidator'.format(_PASS)},
{'NAME': '{0}.MinimumLengthValidator'.format(_PASS)},
{'NAME': '{0}.CommonPasswordValidator'.format(_PASS)},
{'NAME': '{0}.NumericPasswordValidator'.format(_PASS)},
]
# Security
# https://docs.djangoproject.com/en/2.2/topics/security/
SECURE_HSTS_SECONDS = 31536000 # the same as Caddy has
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SECURE_REDIRECT_EXEMPT = [
# This is required for healthcheck to work:
'^health/',
]
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
| 26.294872
| 78
| 0.736714
|
from server.settings.components import config
DEBUG = False
ALLOWED_HOSTS = [
config('DOMAIN_NAME'),
'localhost',
]
_COLLECTSTATIC_DRYRUN = config(
'DJANGO_COLLECTSTATIC_DRYRUN', cast=bool, default=False,
)
STATIC_ROOT = '.static' if _COLLECTSTATIC_DRYRUN else '/var/www/django/static'
STATICFILES_STORAGE = (
'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
)
MEDIA_ROOT = '/var/www/django/media'
.auth.password_validation'
AUTH_PASSWORD_VALIDATORS = [
{'NAME': '{0}.UserAttributeSimilarityValidator'.format(_PASS)},
{'NAME': '{0}.MinimumLengthValidator'.format(_PASS)},
{'NAME': '{0}.CommonPasswordValidator'.format(_PASS)},
{'NAME': '{0}.NumericPasswordValidator'.format(_PASS)},
]
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SECURE_REDIRECT_EXEMPT = [
'^health/',
]
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
| true
| true
|
7905f4524960f4cccee78e6bb449aed957ac73a5
| 344
|
py
|
Python
|
simpleproject/simpleproject/urls.py
|
Shailendre/simpleproject
|
cd7319636d0569be06bb9dab4c5546c1e9542b07
|
[
"BSD-2-Clause"
] | null | null | null |
simpleproject/simpleproject/urls.py
|
Shailendre/simpleproject
|
cd7319636d0569be06bb9dab4c5546c1e9542b07
|
[
"BSD-2-Clause"
] | null | null | null |
simpleproject/simpleproject/urls.py
|
Shailendre/simpleproject
|
cd7319636d0569be06bb9dab4c5546c1e9542b07
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'simpleproject.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^simpleapp/', include('simpleapp.urls')),
]
| 26.461538
| 58
| 0.65407
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^simpleapp/', include('simpleapp.urls')),
]
| true
| true
|
7905f488b9660c83bf66292c28660458e9c7c2d7
| 104
|
py
|
Python
|
schema/datasets/__init__.py
|
brianhie/schema
|
6ab3ed7a25c5ebff9974f2316cea0e120812d888
|
[
"MIT"
] | 14
|
2019-11-07T14:28:15.000Z
|
2022-01-03T09:30:40.000Z
|
schema/datasets/__init__.py
|
brianhie/schema
|
6ab3ed7a25c5ebff9974f2316cea0e120812d888
|
[
"MIT"
] | 1
|
2021-09-02T06:56:48.000Z
|
2021-09-02T17:46:07.000Z
|
schema/datasets/__init__.py
|
brianhie/schema
|
6ab3ed7a25c5ebff9974f2316cea0e120812d888
|
[
"MIT"
] | 2
|
2019-11-07T12:44:10.000Z
|
2021-12-27T03:14:40.000Z
|
from ._datasets import fly_brain, scicar_mouse_kidney
__all__ = ['fly_brain', 'scicar_mouse_kidney']
| 17.333333
| 53
| 0.788462
|
from ._datasets import fly_brain, scicar_mouse_kidney
__all__ = ['fly_brain', 'scicar_mouse_kidney']
| true
| true
|
7905f4dde9438dceb8325a94f48f3247be4eba95
| 5,975
|
py
|
Python
|
src/rl_coach_2020_v2/src/markov/s3_client.py
|
adam-aph/deepracer-local
|
fec4d55867245168ab76a2096e345ef27977b356
|
[
"MIT"
] | 1
|
2020-05-15T00:34:11.000Z
|
2020-05-15T00:34:11.000Z
|
src/rl_coach_2020_v2/src/markov/s3_client.py
|
adam-aph/deepracer-local
|
fec4d55867245168ab76a2096e345ef27977b356
|
[
"MIT"
] | null | null | null |
src/rl_coach_2020_v2/src/markov/s3_client.py
|
adam-aph/deepracer-local
|
fec4d55867245168ab76a2096e345ef27977b356
|
[
"MIT"
] | 1
|
2020-06-06T10:49:36.000Z
|
2020-06-06T10:49:36.000Z
|
import io
import logging
import os
import json
import time
import boto3
import botocore
from markov.utils import log_and_exit, Logger, get_boto_config, \
SIMAPP_EVENT_ERROR_CODE_500, SIMAPP_EVENT_ERROR_CODE_400, \
SIMAPP_S3_DATA_STORE_EXCEPTION
LOG = Logger(__name__, logging.INFO).get_logger()
# The amount of time for the sim app to wait for sagemaker to produce
# the ip
SAGEMAKER_WAIT_TIME = 1200 # 20 minutes
class SageS3Client():
def __init__(self, bucket=None, s3_prefix=None, aws_region=None, s3_endpoint_url=None):
self.aws_region = aws_region
self.bucket = bucket
self.s3_prefix = s3_prefix
self.s3_endpoint_url = s3_endpoint_url
self.config_key = os.path.normpath(s3_prefix + "/ip/ip.json")
self.hyperparameters_key = os.path.normpath(s3_prefix + "/ip/hyperparameters.json")
self.done_file_key = os.path.normpath(s3_prefix + "/ip/done")
self.model_checkpoints_prefix = os.path.normpath(s3_prefix + "/model/") + "/"
LOG.info("Initializing SageS3Client...")
def get_client(self):
session = boto3.session.Session()
return session.client('s3', region_name=self.aws_region, endpoint_url=self.s3_endpoint_url, config=get_boto_config())
def _get_s3_key(self, key):
return os.path.normpath(self.model_checkpoints_prefix + "/" + key)
def write_ip_config(self, ip_address):
try:
s3_client = self.get_client()
data = {"IP": ip_address}
json_blob = json.dumps(data)
file_handle = io.BytesIO(json_blob.encode())
file_handle_done = io.BytesIO(b'done')
s3_client.upload_fileobj(file_handle, self.bucket, self.config_key)
s3_client.upload_fileobj(file_handle_done, self.bucket, self.done_file_key)
except botocore.exceptions.ClientError:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_hyperparameters(self, hyperparams_json):
try:
s3_client = self.get_client()
file_handle = io.BytesIO(hyperparams_json.encode())
s3_client.upload_fileobj(file_handle, self.bucket, self.hyperparameters_key)
except botocore.exceptions.ClientError:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def get_ip(self):
s3_client = self.get_client()
time_elapsed = 0
try:
# Wait for sagemaker to produce the redis ip
while time_elapsed < SAGEMAKER_WAIT_TIME:
response = s3_client.list_objects(Bucket=self.bucket, Prefix=self.done_file_key)
if "Contents" in response:
break
time.sleep(1)
time_elapsed += 1
if time_elapsed % 5 == 0:
LOG.info("Waiting for SageMaker Redis server IP: Time elapsed: %s seconds",
time_elapsed)
if time_elapsed >= SAGEMAKER_WAIT_TIME:
log_and_exit("Timed out while attempting to retrieve the Redis IP",
SIMAPP_S3_DATA_STORE_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
# Download the ip file
s3_client.download_file(self.bucket, self.config_key, 'ip.json')
with open("ip.json") as file:
ip_file = json.load(file)["IP"]
return ip_file
except botocore.exceptions.ClientError:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def download_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.download_file(self.bucket, s3_key, local_path)
return True
except botocore.exceptions.ClientError as err:
# It is possible that the file isn't there in which case we should
# return fasle and let the client decide the next action
if err.response['Error']['Code'] == "404":
return False
else:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.upload_file(Filename=local_path,
Bucket=self.bucket,
Key=s3_key)
return True
except botocore.exceptions.ClientError:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
| 44.589552
| 125
| 0.604017
|
import io
import logging
import os
import json
import time
import boto3
import botocore
from markov.utils import log_and_exit, Logger, get_boto_config, \
SIMAPP_EVENT_ERROR_CODE_500, SIMAPP_EVENT_ERROR_CODE_400, \
SIMAPP_S3_DATA_STORE_EXCEPTION
LOG = Logger(__name__, logging.INFO).get_logger()
SAGEMAKER_WAIT_TIME = 1200
class SageS3Client():
def __init__(self, bucket=None, s3_prefix=None, aws_region=None, s3_endpoint_url=None):
self.aws_region = aws_region
self.bucket = bucket
self.s3_prefix = s3_prefix
self.s3_endpoint_url = s3_endpoint_url
self.config_key = os.path.normpath(s3_prefix + "/ip/ip.json")
self.hyperparameters_key = os.path.normpath(s3_prefix + "/ip/hyperparameters.json")
self.done_file_key = os.path.normpath(s3_prefix + "/ip/done")
self.model_checkpoints_prefix = os.path.normpath(s3_prefix + "/model/") + "/"
LOG.info("Initializing SageS3Client...")
def get_client(self):
session = boto3.session.Session()
return session.client('s3', region_name=self.aws_region, endpoint_url=self.s3_endpoint_url, config=get_boto_config())
def _get_s3_key(self, key):
return os.path.normpath(self.model_checkpoints_prefix + "/" + key)
def write_ip_config(self, ip_address):
try:
s3_client = self.get_client()
data = {"IP": ip_address}
json_blob = json.dumps(data)
file_handle = io.BytesIO(json_blob.encode())
file_handle_done = io.BytesIO(b'done')
s3_client.upload_fileobj(file_handle, self.bucket, self.config_key)
s3_client.upload_fileobj(file_handle_done, self.bucket, self.done_file_key)
except botocore.exceptions.ClientError:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_hyperparameters(self, hyperparams_json):
try:
s3_client = self.get_client()
file_handle = io.BytesIO(hyperparams_json.encode())
s3_client.upload_fileobj(file_handle, self.bucket, self.hyperparameters_key)
except botocore.exceptions.ClientError:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def get_ip(self):
s3_client = self.get_client()
time_elapsed = 0
try:
while time_elapsed < SAGEMAKER_WAIT_TIME:
response = s3_client.list_objects(Bucket=self.bucket, Prefix=self.done_file_key)
if "Contents" in response:
break
time.sleep(1)
time_elapsed += 1
if time_elapsed % 5 == 0:
LOG.info("Waiting for SageMaker Redis server IP: Time elapsed: %s seconds",
time_elapsed)
if time_elapsed >= SAGEMAKER_WAIT_TIME:
log_and_exit("Timed out while attempting to retrieve the Redis IP",
SIMAPP_S3_DATA_STORE_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
s3_client.download_file(self.bucket, self.config_key, 'ip.json')
with open("ip.json") as file:
ip_file = json.load(file)["IP"]
return ip_file
except botocore.exceptions.ClientError:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def download_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.download_file(self.bucket, s3_key, local_path)
return True
except botocore.exceptions.ClientError as err:
# return fasle and let the client decide the next action
if err.response['Error']['Code'] == "404":
return False
else:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.upload_file(Filename=local_path,
Bucket=self.bucket,
Key=s3_key)
return True
except botocore.exceptions.ClientError:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
| true
| true
|
7905f564673f69a1aaca662f3425b71609c826b5
| 775
|
py
|
Python
|
setup.py
|
Kuzyashin/faust-pydantic-validate
|
727c01f976febcd24df8cb6a60110fbf22c23be2
|
[
"MIT"
] | null | null | null |
setup.py
|
Kuzyashin/faust-pydantic-validate
|
727c01f976febcd24df8cb6a60110fbf22c23be2
|
[
"MIT"
] | null | null | null |
setup.py
|
Kuzyashin/faust-pydantic-validate
|
727c01f976febcd24df8cb6a60110fbf22c23be2
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="faust_pydantic_validate",
version="0.0.1",
author="Alexey Kuzyashin",
author_email="alex@rocketcompute.com",
description="A small decorator for post data view validation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Kuzyashin/faust-pydantic-validate",
packages=['faust_pydantic_validate'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"pydantic",
"faust",
],
)
| 28.703704
| 66
| 0.651613
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="faust_pydantic_validate",
version="0.0.1",
author="Alexey Kuzyashin",
author_email="alex@rocketcompute.com",
description="A small decorator for post data view validation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Kuzyashin/faust-pydantic-validate",
packages=['faust_pydantic_validate'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"pydantic",
"faust",
],
)
| true
| true
|
7905f588d6333aa9b18a921376827b4f4116f3cb
| 1,695
|
py
|
Python
|
database/userDAO.py
|
saraivaufc/PySpy
|
711a14da559c0315335eaf843f6d2a68daf9acbf
|
[
"MIT"
] | null | null | null |
database/userDAO.py
|
saraivaufc/PySpy
|
711a14da559c0315335eaf843f6d2a68daf9acbf
|
[
"MIT"
] | null | null | null |
database/userDAO.py
|
saraivaufc/PySpy
|
711a14da559c0315335eaf843f6d2a68daf9acbf
|
[
"MIT"
] | null | null | null |
import os
from access import Access
from user import User
log = os.path.dirname(os.path.abspath(__file__)) + "/temp/access.log"
class UserDAO(object):
__database = None
__cursor = None
def __init__(self):
self.__database = Access()
self.__cursor = self.__database.getCursor()
self.initDatabase()
def initDatabase(self):
try:
self.__cursor.execute(""" create table user (name text, username text, password text) """)
self.__database.commit()
except:
pass
def insert(self, user):
if len(self.getUser(user.getUsername())) == 0:
users = [(user.getName(), user.getUsername() , user.getPassword()), ]
self.__cursor.executemany("INSERT INTO user VALUES (?,?,?)", users)
self.__database.commit()
def update(self, user):
users = [(user.getName(),user.getPassword(), user.getUsername())]
self.__cursor.executemany("UPDATE user SET name = ?, password = ? where username = ? ", users)
self.__database.commit()
def delete(self, username):
self.__cursor.execute("DELETE FROM user WHERE username = " + username)
self.__database.commit()
def list(self):
self.__cursor.execute("SELECT * FROM user")
print self.__cursor.fetchall()
def getUser(self, username):
self.__cursor.execute("SELECT * FROM user WHERE username = ?",[(username)] )
return self.__cursor.fetchall()
def log(self, user, request):
flines = user.toString() + " >>> " + request + "\n"
f = open(log, 'a')
f.writelines([flines,])
f.close()
| 36.06383
| 102
| 0.59174
|
import os
from access import Access
from user import User
log = os.path.dirname(os.path.abspath(__file__)) + "/temp/access.log"
class UserDAO(object):
__database = None
__cursor = None
def __init__(self):
self.__database = Access()
self.__cursor = self.__database.getCursor()
self.initDatabase()
def initDatabase(self):
try:
self.__cursor.execute(""" create table user (name text, username text, password text) """)
self.__database.commit()
except:
pass
def insert(self, user):
if len(self.getUser(user.getUsername())) == 0:
users = [(user.getName(), user.getUsername() , user.getPassword()), ]
self.__cursor.executemany("INSERT INTO user VALUES (?,?,?)", users)
self.__database.commit()
def update(self, user):
users = [(user.getName(),user.getPassword(), user.getUsername())]
self.__cursor.executemany("UPDATE user SET name = ?, password = ? where username = ? ", users)
self.__database.commit()
def delete(self, username):
self.__cursor.execute("DELETE FROM user WHERE username = " + username)
self.__database.commit()
def list(self):
self.__cursor.execute("SELECT * FROM user")
print self.__cursor.fetchall()
def getUser(self, username):
self.__cursor.execute("SELECT * FROM user WHERE username = ?",[(username)] )
return self.__cursor.fetchall()
def log(self, user, request):
flines = user.toString() + " >>> " + request + "\n"
f = open(log, 'a')
f.writelines([flines,])
f.close()
| false
| true
|
7905f70e31d46b8e770613d25cfb4b2b723ad93b
| 98
|
py
|
Python
|
runtime/runtime_main/apps.py
|
Bodya00/RunTime
|
c83ef316ae4be265dec77bdaeb154bf0f4659767
|
[
"Apache-2.0"
] | null | null | null |
runtime/runtime_main/apps.py
|
Bodya00/RunTime
|
c83ef316ae4be265dec77bdaeb154bf0f4659767
|
[
"Apache-2.0"
] | null | null | null |
runtime/runtime_main/apps.py
|
Bodya00/RunTime
|
c83ef316ae4be265dec77bdaeb154bf0f4659767
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class RuntimeMainConfig(AppConfig):
name = 'runtime_main'
| 16.333333
| 35
| 0.77551
|
from django.apps import AppConfig
class RuntimeMainConfig(AppConfig):
name = 'runtime_main'
| true
| true
|
7905f922a1507236dfbb3a862f4225599cdd5192
| 1,688
|
py
|
Python
|
corona.py
|
stephengarn/coronavirus
|
a6c488461b5c2f88b373074581eda2ecfb4d23ce
|
[
"MIT"
] | null | null | null |
corona.py
|
stephengarn/coronavirus
|
a6c488461b5c2f88b373074581eda2ecfb4d23ce
|
[
"MIT"
] | null | null | null |
corona.py
|
stephengarn/coronavirus
|
a6c488461b5c2f88b373074581eda2ecfb4d23ce
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
#import itertools
from openpyxl import Workbook, load_workbook
import re
import datetime
driver = webdriver.Firefox()
driver.get("https://www.worldometers.info/coronavirus/")
countries = []
cases = []
newCases = []
data = []
casesInt = []
newCasesInt = []
cells = []
cellsB = []
datez = datetime.datetime.now()
nowDate = datez.strftime("%d%b%y")
for country in range(2,22):
countries.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(country) + "]/td[1]").text)
for case in range(2,22):
cases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(case) + "]/td[2]").text)
for newCase in range(2,22):
newCases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(newCase) + "]/td[3]").text)
data = dict(zip(countries, zip(cases, newCases)))
#print(data)
for case in cases:
case = re.sub(r'\D', '', case)
casesInt.append(int(case))
for newCase in newCases:
if newCase:
newCase = re.sub(r'\D', '', newCase)
newCasesInt.append(int(newCase))
else:
newCasesInt.append(1)
percentages = []
for caseInt,newCase in zip(casesInt, newCasesInt):
result = caseInt - newCase
percentage = round((newCase/result)*100, 2)
percentages.append(percentage)
#for country, percentage in zip(countries, percentages):
# print(country, ":", percentage)
wb = Workbook()
wb = load_workbook(filename='corona.xlsx')
ws = wb.active
#for countries column
for i in range(2,22):
i = str(i)
appendValue = 'A' + i
appendValueB = 'B' + i
cells.append(appendValue)
cellsB.append(appendValueB)
for i in range(20):
ws['A' + str(i+2)] = countries[i]
ws['B' + str(i+2)] = percentages[i]
wb.save(filename="corona" + nowDate + ".xlsx")
| 27.672131
| 103
| 0.695498
|
from selenium import webdriver
from openpyxl import Workbook, load_workbook
import re
import datetime
driver = webdriver.Firefox()
driver.get("https://www.worldometers.info/coronavirus/")
countries = []
cases = []
newCases = []
data = []
casesInt = []
newCasesInt = []
cells = []
cellsB = []
datez = datetime.datetime.now()
nowDate = datez.strftime("%d%b%y")
for country in range(2,22):
countries.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(country) + "]/td[1]").text)
for case in range(2,22):
cases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(case) + "]/td[2]").text)
for newCase in range(2,22):
newCases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(newCase) + "]/td[3]").text)
data = dict(zip(countries, zip(cases, newCases)))
for case in cases:
case = re.sub(r'\D', '', case)
casesInt.append(int(case))
for newCase in newCases:
if newCase:
newCase = re.sub(r'\D', '', newCase)
newCasesInt.append(int(newCase))
else:
newCasesInt.append(1)
percentages = []
for caseInt,newCase in zip(casesInt, newCasesInt):
result = caseInt - newCase
percentage = round((newCase/result)*100, 2)
percentages.append(percentage)
wb = Workbook()
wb = load_workbook(filename='corona.xlsx')
ws = wb.active
for i in range(2,22):
i = str(i)
appendValue = 'A' + i
appendValueB = 'B' + i
cells.append(appendValue)
cellsB.append(appendValueB)
for i in range(20):
ws['A' + str(i+2)] = countries[i]
ws['B' + str(i+2)] = percentages[i]
wb.save(filename="corona" + nowDate + ".xlsx")
| true
| true
|
7905f9c32401bbad95248e00377652a41b22f33c
| 4,227
|
py
|
Python
|
src/python_minifier/ministring.py
|
donno2048/python-minifier
|
9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5
|
[
"MIT"
] | 301
|
2018-06-26T04:10:43.000Z
|
2022-03-30T16:30:15.000Z
|
src/python_minifier/ministring.py
|
donno2048/python-minifier
|
9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5
|
[
"MIT"
] | 34
|
2019-04-28T13:19:13.000Z
|
2022-03-27T21:10:33.000Z
|
src/python_minifier/ministring.py
|
donno2048/python-minifier
|
9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5
|
[
"MIT"
] | 20
|
2019-11-17T00:13:27.000Z
|
2022-01-21T15:35:07.000Z
|
BACKSLASH = '\\'
class MiniString(object):
"""
Create a representation of a string object
:param str string: The string to minify
"""
def __init__(self, string, quote="'"):
self._s = string
self.safe_mode = False
self.quote = quote
def __str__(self):
"""
The smallest python literal representation of a string
:rtype: str
"""
if self._s == '':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
try:
eval(self.quote + s + self.quote)
except UnicodeDecodeError:
if self._safe_mode:
raise
self._safe_mode = True
assert eval(self.quote + s + self.quote) == self._s
return s
def to_short(self):
s = ''
escaped = {
'\n': BACKSLASH + 'n',
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote: BACKSLASH + self.quote,
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
def to_long(self):
s = ''
escaped = {
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote[0]: BACKSLASH + self.quote[0],
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
class MiniBytes(object):
"""
Create a representation of a bytes object
:param bytes string: The string to minify
"""
def __init__(self, string, quote="'"):
self._b = string
self.quote = quote
def __str__(self):
"""
The smallest python literal representation of a string
:rtype: str
"""
if self._b == b'':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
assert eval('b' + self.quote + s + self.quote) == self._b
return s
def to_short(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == b'\n':
b += BACKSLASH + 'n'
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
def to_long(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
| 24.017045
| 75
| 0.399574
|
BACKSLASH = '\\'
class MiniString(object):
def __init__(self, string, quote="'"):
self._s = string
self.safe_mode = False
self.quote = quote
def __str__(self):
if self._s == '':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
try:
eval(self.quote + s + self.quote)
except UnicodeDecodeError:
if self._safe_mode:
raise
self._safe_mode = True
assert eval(self.quote + s + self.quote) == self._s
return s
def to_short(self):
s = ''
escaped = {
'\n': BACKSLASH + 'n',
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote: BACKSLASH + self.quote,
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
def to_long(self):
s = ''
escaped = {
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote[0]: BACKSLASH + self.quote[0],
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
class MiniBytes(object):
def __init__(self, string, quote="'"):
self._b = string
self.quote = quote
def __str__(self):
if self._b == b'':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
assert eval('b' + self.quote + s + self.quote) == self._b
return s
def to_short(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == b'\n':
b += BACKSLASH + 'n'
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
def to_long(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
| true
| true
|
7905fae9b47a8302f5e5a44520e1ed089432d08c
| 4,367
|
py
|
Python
|
pyFAI/test/test_pickle.py
|
yugangzhang/pyFAI
|
e0453b279dac1f165f637e2a2ed1d4ddf57d31ba
|
[
"MIT"
] | 1
|
2021-04-28T20:09:13.000Z
|
2021-04-28T20:09:13.000Z
|
pyFAI/test/test_pickle.py
|
yugangzhang/pyFAI
|
e0453b279dac1f165f637e2a2ed1d4ddf57d31ba
|
[
"MIT"
] | null | null | null |
pyFAI/test/test_pickle.py
|
yugangzhang/pyFAI
|
e0453b279dac1f165f637e2a2ed1d4ddf57d31ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function
"""Test suite for pickled objects"""
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "18/10/2018"
import numpy
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.detectors import detector_factory
from pickle import dumps, loads
import unittest
import logging
logger = logging.getLogger(__name__)
class TestPickle(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPickle, cls).setUpClass()
cls.ai = AzimuthalIntegrator(1.0, detector="Pilatus100k")
cls.ai.wavelength = 1e-10
cls.npt = 100
cls.data = numpy.random.random(cls.ai.detector.shape)
@classmethod
def tearDownClass(cls):
super(TestPickle, cls).tearDownClass()
cls.data = cls.ai = cls.npt = None
def test_Detector_pickle(self):
det = self.ai.detector # type: Detector
dets = dumps(det)
self.assert_(dets, "pickle works")
rest = loads(dets)
self.assert_(rest, "unpickle works")
self.assertEqual(rest.shape, self.ai.detector.MAX_SHAPE)
# test the binning
mar = detector_factory("RayonixMx225")
mar.guess_binning((2048, 2048))
self.assertEqual(mar.binning, (3, 3), "binning OK")
mars = dumps(mar)
marr = loads(mars)
self.assertEqual(mar.binning, marr.binning, "restored binning OK")
def test_AzimuthalIntegrator_pickle(self):
spectra = self.ai.integrate1d(self.data, self.npt) # force lut generation
ais = dumps(self.ai)
newai = loads(ais) # type: AzimuthalIntegrator
self.assertEqual(newai._cached_array.keys(), self.ai._cached_array.keys())
for key in self.ai._cached_array.keys():
if isinstance(self.ai._cached_array[key], numpy.ndarray):
self.assertEqual(abs(newai._cached_array[key] - self.ai._cached_array[key]).max(), 0,
"key %s is the same" % key)
else:
self.assertEqual(newai._cached_array[key], self.ai._cached_array[key],
"key %s is the same: %s %s" %
(key, newai._cached_array[key], self.ai._cached_array[key]))
for first, second in zip(newai.integrate1d(self.data, self.npt), spectra):
self.assertEqual(abs(first - second).max(), 0, "Spectra are the same")
def test_Calibrant(self):
from pyFAI import calibrant
calibrant = calibrant.CalibrantFactory()('AgBh')
assert dumps(calibrant)
assert loads(dumps(calibrant))
def suite():
loader = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loader(TestPickle))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 38.646018
| 101
| 0.686512
|
from __future__ import absolute_import, division, print_function
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "18/10/2018"
import numpy
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.detectors import detector_factory
from pickle import dumps, loads
import unittest
import logging
logger = logging.getLogger(__name__)
class TestPickle(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPickle, cls).setUpClass()
cls.ai = AzimuthalIntegrator(1.0, detector="Pilatus100k")
cls.ai.wavelength = 1e-10
cls.npt = 100
cls.data = numpy.random.random(cls.ai.detector.shape)
@classmethod
def tearDownClass(cls):
super(TestPickle, cls).tearDownClass()
cls.data = cls.ai = cls.npt = None
def test_Detector_pickle(self):
det = self.ai.detector
dets = dumps(det)
self.assert_(dets, "pickle works")
rest = loads(dets)
self.assert_(rest, "unpickle works")
self.assertEqual(rest.shape, self.ai.detector.MAX_SHAPE)
mar = detector_factory("RayonixMx225")
mar.guess_binning((2048, 2048))
self.assertEqual(mar.binning, (3, 3), "binning OK")
mars = dumps(mar)
marr = loads(mars)
self.assertEqual(mar.binning, marr.binning, "restored binning OK")
def test_AzimuthalIntegrator_pickle(self):
spectra = self.ai.integrate1d(self.data, self.npt)
ais = dumps(self.ai)
newai = loads(ais)
self.assertEqual(newai._cached_array.keys(), self.ai._cached_array.keys())
for key in self.ai._cached_array.keys():
if isinstance(self.ai._cached_array[key], numpy.ndarray):
self.assertEqual(abs(newai._cached_array[key] - self.ai._cached_array[key]).max(), 0,
"key %s is the same" % key)
else:
self.assertEqual(newai._cached_array[key], self.ai._cached_array[key],
"key %s is the same: %s %s" %
(key, newai._cached_array[key], self.ai._cached_array[key]))
for first, second in zip(newai.integrate1d(self.data, self.npt), spectra):
self.assertEqual(abs(first - second).max(), 0, "Spectra are the same")
def test_Calibrant(self):
from pyFAI import calibrant
calibrant = calibrant.CalibrantFactory()('AgBh')
assert dumps(calibrant)
assert loads(dumps(calibrant))
def suite():
loader = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loader(TestPickle))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| true
| true
|
7905fbc369678a0dbebbe2bc91795588c6386fa9
| 3,590
|
py
|
Python
|
sdk/python/pulumi_aws/rds/get_event_categories.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/rds/get_event_categories.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/rds/get_event_categories.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetEventCategoriesResult',
'AwaitableGetEventCategoriesResult',
'get_event_categories',
]
@pulumi.output_type
class GetEventCategoriesResult:
"""
A collection of values returned by getEventCategories.
"""
def __init__(__self__, event_categories=None, id=None, source_type=None):
if event_categories and not isinstance(event_categories, list):
raise TypeError("Expected argument 'event_categories' to be a list")
pulumi.set(__self__, "event_categories", event_categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> List[str]:
"""
A list of the event categories.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
return pulumi.get(self, "source_type")
class AwaitableGetEventCategoriesResult(GetEventCategoriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventCategoriesResult(
event_categories=self.event_categories,
id=self.id,
source_type=self.source_type)
def get_event_categories(source_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventCategoriesResult:
"""
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
"""
__args__ = dict()
__args__['sourceType'] = source_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(
event_categories=__ret__.event_categories,
id=__ret__.id,
source_type=__ret__.source_type)
| 33.867925
| 200
| 0.688858
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetEventCategoriesResult',
'AwaitableGetEventCategoriesResult',
'get_event_categories',
]
@pulumi.output_type
class GetEventCategoriesResult:
def __init__(__self__, event_categories=None, id=None, source_type=None):
if event_categories and not isinstance(event_categories, list):
raise TypeError("Expected argument 'event_categories' to be a list")
pulumi.set(__self__, "event_categories", event_categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> List[str]:
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
return pulumi.get(self, "source_type")
class AwaitableGetEventCategoriesResult(GetEventCategoriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventCategoriesResult(
event_categories=self.event_categories,
id=self.id,
source_type=self.source_type)
def get_event_categories(source_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventCategoriesResult:
__args__ = dict()
__args__['sourceType'] = source_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(
event_categories=__ret__.event_categories,
id=__ret__.id,
source_type=__ret__.source_type)
| true
| true
|
7905fbcba481ebd176c7c4f3c323fd982706e7c1
| 10,813
|
py
|
Python
|
detectron2/modeling/mmdet_wrapper.py
|
KnightOfTheMoonlight/visdom4detectron2
|
df2ce412d9eb9ff1bb67034261248199f6d6b696
|
[
"Apache-2.0"
] | 171
|
2021-05-04T02:44:01.000Z
|
2022-03-28T09:58:29.000Z
|
detectron2/modeling/mmdet_wrapper.py
|
ylf2002/detectron2
|
2455e4790f470bba54299c049410fc0713ae7529
|
[
"Apache-2.0"
] | 10
|
2021-05-09T16:04:43.000Z
|
2021-12-03T01:21:44.000Z
|
detectron2/modeling/mmdet_wrapper.py
|
ylf2002/detectron2
|
2455e4790f470bba54299c049410fc0713ae7529
|
[
"Apache-2.0"
] | 21
|
2021-05-04T02:47:57.000Z
|
2022-01-06T07:34:24.000Z
|
# -*- coding: utf-8 -*-
import itertools
import logging
import numpy as np
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from detectron2.layers import ShapeSpec
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from .backbone import Backbone
logger = logging.getLogger(__name__)
def _to_container(cfg):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
pretrained_backbone: Optional[str] = None,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
# It's confusing that backbone weights are given as a separate argument,
# but "neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...")
self.backbone.init_weights(pretrained_backbone)
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.sqrt(h * w / (input["height"] * input["width"]))
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
else:
gt_masks = None
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape)
inst.pred_boxes = Boxes(bboxes)
inst.scores = scores
inst.pred_classes = labels
if segm_result is not None and len(labels) > 0:
segm_result = list(itertools.chain(*segm_result))
segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]
segm_result = torch.stack(segm_result, dim=0)
inst.pred_masks = segm_result
return inst
# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
if "loss" not in loss_name:
# put metrics to storage; don't return them
storage = get_event_storage()
value = log_vars.pop(loss_name).cpu().item()
storage.put_scalar(loss_name, value)
return log_vars
| 39.900369
| 100
| 0.620827
|
import itertools
import logging
import numpy as np
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from detectron2.layers import ShapeSpec
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from .backbone import Backbone
logger = logging.getLogger(__name__)
def _to_container(cfg):
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
pretrained_backbone: Optional[str] = None,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
# but "neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...")
self.backbone.init_weights(pretrained_backbone)
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.sqrt(h * w / (input["height"] * input["width"]))
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
def convert_mask(m, shape):
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
else:
gt_masks = None
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result))
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape)
inst.pred_boxes = Boxes(bboxes)
inst.scores = scores
inst.pred_classes = labels
if segm_result is not None and len(labels) > 0:
segm_result = list(itertools.chain(*segm_result))
segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]
segm_result = torch.stack(segm_result, dim=0)
inst.pred_masks = segm_result
return inst
def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
if "loss" not in loss_name:
storage = get_event_storage()
value = log_vars.pop(loss_name).cpu().item()
storage.put_scalar(loss_name, value)
return log_vars
| true
| true
|
7905fc5a450ba81408132f62c176cc5ff1bc697b
| 6,591
|
py
|
Python
|
allennlp/data/token_indexers/token_indexer.py
|
loopylangur/allennlp
|
0fc695b08a0376317e45ae0a45584aa9eb14beb6
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/token_indexers/token_indexer.py
|
loopylangur/allennlp
|
0fc695b08a0376317e45ae0a45584aa9eb14beb6
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/token_indexers/token_indexer.py
|
loopylangur/allennlp
|
0fc695b08a0376317e45ae0a45584aa9eb14beb6
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, TypeVar, Generic
import warnings
import torch
import numpy
from allennlp.common import Registrable
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
TokenType = TypeVar("TokenType", int, List[int], numpy.ndarray)
class TokenIndexer(Generic[TokenType], Registrable):
"""
A ``TokenIndexer`` determines how string tokens get represented as arrays of indices in a model.
This class both converts strings into numerical values, with the help of a
:class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays.
Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number
34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]),
or in some other way that you can come up with (e.g., if you have some structured input you
want to represent in a special way in your data arrays, you can do that here).
# Parameters
token_min_padding_length : ``int``, optional (default=``0``)
The minimum padding length required for the :class:`TokenIndexer`. For example,
the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of
filter when using :class:`CnnEncoder`.
Note that if you set this for one TokenIndexer, you likely have to set it for all
:class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes.
"""
default_implementation = "single_id"
has_warned_for_as_padded_tensor = False
def __init__(self, token_min_padding_length: int = 0) -> None:
self._token_min_padding_length: int = token_min_padding_length
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
"""
The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training
data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,
token). This method takes a token and a dictionary of counts and increments counts for
whatever vocabulary items are present in the token. If this is a single token ID
representation, the vocabulary item is likely the token itself. If this is a token
characters representation, the vocabulary items are all of the characters in the token.
"""
raise NotImplementedError
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary, index_name: str
) -> Dict[str, List[TokenType]]:
"""
Takes a list of tokens and converts them to one or more sets of indices.
This could be just an ID for each token from the vocabulary.
Or it could split each token into characters and return one ID per character.
Or (for instance, in the case of byte-pair encoding) there might not be a clean
mapping from individual tokens to indices.
"""
raise NotImplementedError
def get_padding_token(self) -> TokenType:
"""
Deprecated. Please just implement the padding token in `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve
backward compatability, otherwise it would be abstract.
When we need to add padding tokens, what should they look like? This method returns a
"blank" token of whatever type is returned by :func:`tokens_to_indices`.
"""
warnings.warn(
"Using a Field with get_padding_token as an inherited method,"
" which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
return 0 # type: ignore
def get_padding_lengths(self, token: TokenType) -> Dict[str, int]:
"""
This method returns a padding dictionary for the given token that specifies lengths for
all arrays that need padding. For example, for single ID tokens the returned dictionary
will be empty, but for a token characters representation, this will return the number
of characters in the token.
"""
raise NotImplementedError
def get_token_min_padding_length(self) -> int:
"""
This method returns the minimum padding length required for this TokenIndexer.
For example, the minimum padding length of `SingleIdTokenIndexer` is the largest
size of filter when using `CnnEncoder`.
"""
return self._token_min_padding_length
def as_padded_tensor(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, torch.Tensor]:
"""
This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list
of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens``
then it will be truncated.
``padding_lengths`` is used to provide supplemental padding parameters which are needed
in some cases. For example, it contains the widths to pad characters to when doing
character-level padding.
Note that this method should be abstract, but it is implemented to allow backward compatability.
"""
if not self.has_warned_for_as_padded_tensor:
warnings.warn(
"Using a Field with pad_token_sequence, which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
self.has_warned_for_as_padded_tensor = True
padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths)
return {key: torch.LongTensor(array) for key, array in padded.items()}
def pad_token_sequence(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, TokenType]:
"""
Deprecated. Please use `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release.
"""
raise NotImplementedError
def get_keys(self, index_name: str) -> List[str]:
"""
Return a list of the keys this indexer return from ``tokens_to_indices``.
"""
return [index_name]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| 44.234899
| 104
| 0.678046
|
from typing import Dict, List, TypeVar, Generic
import warnings
import torch
import numpy
from allennlp.common import Registrable
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
TokenType = TypeVar("TokenType", int, List[int], numpy.ndarray)
class TokenIndexer(Generic[TokenType], Registrable):
default_implementation = "single_id"
has_warned_for_as_padded_tensor = False
def __init__(self, token_min_padding_length: int = 0) -> None:
self._token_min_padding_length: int = token_min_padding_length
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
raise NotImplementedError
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary, index_name: str
) -> Dict[str, List[TokenType]]:
raise NotImplementedError
def get_padding_token(self) -> TokenType:
warnings.warn(
"Using a Field with get_padding_token as an inherited method,"
" which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
return 0
def get_padding_lengths(self, token: TokenType) -> Dict[str, int]:
raise NotImplementedError
def get_token_min_padding_length(self) -> int:
return self._token_min_padding_length
def as_padded_tensor(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, torch.Tensor]:
if not self.has_warned_for_as_padded_tensor:
warnings.warn(
"Using a Field with pad_token_sequence, which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
self.has_warned_for_as_padded_tensor = True
padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths)
return {key: torch.LongTensor(array) for key, array in padded.items()}
def pad_token_sequence(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, TokenType]:
raise NotImplementedError
def get_keys(self, index_name: str) -> List[str]:
return [index_name]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| true
| true
|
7905fccc0488ee79239e5677e9ef11f7ec51b6a0
| 403
|
py
|
Python
|
main.py
|
tomjur/TF2.0DQN
|
4813d40ffaa455e4b70459a6db0a996d73b760d9
|
[
"MIT"
] | 1
|
2020-07-28T10:09:14.000Z
|
2020-07-28T10:09:14.000Z
|
main.py
|
tomjur/TF2.0DQN
|
4813d40ffaa455e4b70459a6db0a996d73b760d9
|
[
"MIT"
] | null | null | null |
main.py
|
tomjur/TF2.0DQN
|
4813d40ffaa455e4b70459a6db0a996d73b760d9
|
[
"MIT"
] | null | null | null |
from config_utils import read_main_config
from deep_q_network import DeepQNetwork
from gym_wrapper import GymWrapper
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
config = read_main_config()
gym_wrapper = GymWrapper(config['general']['scenario'])
deep_q_network = DeepQNetwork(config, gym_wrapper)
deep_q_network.train()
deep_q_network.test(episodes=3)
| 31
| 67
| 0.848635
|
from config_utils import read_main_config
from deep_q_network import DeepQNetwork
from gym_wrapper import GymWrapper
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
config = read_main_config()
gym_wrapper = GymWrapper(config['general']['scenario'])
deep_q_network = DeepQNetwork(config, gym_wrapper)
deep_q_network.train()
deep_q_network.test(episodes=3)
| true
| true
|
7905ff6f813e2aeb7d61d558ba67ce986c986ce7
| 4,396
|
py
|
Python
|
src/braket/device_schema/dwave/dwave_device_capabilities_v1.py
|
shiyunon/amazon-braket-schemas-python
|
10e864e05a2a7fef27683f48e17eefe30753e7df
|
[
"Apache-2.0"
] | 1
|
2021-07-10T15:22:12.000Z
|
2021-07-10T15:22:12.000Z
|
src/braket/device_schema/dwave/dwave_device_capabilities_v1.py
|
shiyunon/amazon-braket-schemas-python
|
10e864e05a2a7fef27683f48e17eefe30753e7df
|
[
"Apache-2.0"
] | null | null | null |
src/braket/device_schema/dwave/dwave_device_capabilities_v1.py
|
shiyunon/amazon-braket-schemas-python
|
10e864e05a2a7fef27683f48e17eefe30753e7df
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from pydantic import Field
from braket.device_schema.device_capabilities import DeviceCapabilities
from braket.device_schema.dwave.dwave_provider_properties_v1 import DwaveProviderProperties
from braket.schema_common import BraketSchemaBase, BraketSchemaHeader
class DwaveDeviceCapabilities(DeviceCapabilities, BraketSchemaBase):
"""
These are the capabilities specific to D-Wave device
Attributes:
provider: Properties specific to D-Wave provider
Examples:
>>> import json
>>> input_json = ...{
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_device_capabilities",
... "version": "1",
... },
... "provider": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_provider_properties",
... "version": "1",
... },
... "annealingOffsetStep": 1.45,
... "annealingOffsetStepPhi0": 1.45,
... "annealingOffsetRanges": [[1.45, 1.45], [1.45, 1.45]],
... "annealingDurationRange": [1, 2, 3],
... "couplers": [[1, 2, 3], [1, 2, 3]],
... "defaultAnnealingDuration": 1,
... "defaultProgrammingThermalizationDuration": 1,
... "defaultReadoutThermalizationDuration": 1,
... "extendedJRange": [1, 2, 3],
... "hGainScheduleRange": [1, 2, 3],
... "hRange": [1, 2, 3],
... "jRange": [1, 2, 3],
... "maximumAnnealingSchedulePoints": 1,
... "maximumHGainSchedulePoints": 1,
... "perQubitCouplingRange": [1, 2, 3],
... "programmingThermalizationDurationRange": [1, 2, 3],
... "qubits": [1, 2, 3],
... "qubitCount": 1,
... "quotaConversionRate": 1,
... "readoutThermalizationDurationRange": [1, 2, 3],
... "taskRunDurationRange": [1, 2, 3],
... "topology": {},
... },
... "service": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.device_service_properties",
... "version": "1",
... },
... "executionWindows": [
... {
... "executionDay": "Everyday",
... "windowStartHour": "09:00",
... "windowEndHour": "19:00",
... }
... ],
... "shotsRange": [1, 10],
... "deviceCost": {
... "price": 0.25,
... "unit": "minute"
... },
... "deviceDocumentation": {
... "imageUrl": "image_url",
... "summary": "Summary on the device",
... "externalDocumentationUrl": "exter doc link",
... },
... "deviceLocation": "us-east-1",
... "updatedAt": "2020-06-16T19:28:02.869136"
... },
... "action": {
... "braket.ir.annealing.problem": {
... "actionType": "braket.ir.annealing.problem",
... "version": ["1"],
... }
... },
... "deviceParameters": {DwaveDeviceParameters.schema_json()},
... }
>>> DwaveDeviceCapabilities.parse_raw_schema(json.dumps(input_json))
"""
_PROGRAM_HEADER = BraketSchemaHeader(
name="braket.device_schema.dwave.dwave_device_capabilities", version="1"
)
braketSchemaHeader: BraketSchemaHeader = Field(default=_PROGRAM_HEADER, const=_PROGRAM_HEADER)
provider: DwaveProviderProperties
| 41.866667
| 98
| 0.512966
|
from pydantic import Field
from braket.device_schema.device_capabilities import DeviceCapabilities
from braket.device_schema.dwave.dwave_provider_properties_v1 import DwaveProviderProperties
from braket.schema_common import BraketSchemaBase, BraketSchemaHeader
class DwaveDeviceCapabilities(DeviceCapabilities, BraketSchemaBase):
_PROGRAM_HEADER = BraketSchemaHeader(
name="braket.device_schema.dwave.dwave_device_capabilities", version="1"
)
braketSchemaHeader: BraketSchemaHeader = Field(default=_PROGRAM_HEADER, const=_PROGRAM_HEADER)
provider: DwaveProviderProperties
| true
| true
|
7906000327b6e1cea1b1de29fb8c6cdf9c25fcbb
| 3,912
|
py
|
Python
|
harness/core/module.py
|
vysec/Harness
|
ed2a6aaa2c4350853f5bda2f6d514d7eb429f27e
|
[
"MIT"
] | 81
|
2015-08-07T23:25:41.000Z
|
2022-02-21T03:45:24.000Z
|
harness/core/module.py
|
samyoyo/Harness
|
ed2a6aaa2c4350853f5bda2f6d514d7eb429f27e
|
[
"MIT"
] | 6
|
2015-11-04T08:06:14.000Z
|
2018-05-21T23:46:40.000Z
|
harness/core/module.py
|
samyoyo/Harness
|
ed2a6aaa2c4350853f5bda2f6d514d7eb429f27e
|
[
"MIT"
] | 30
|
2015-08-09T01:15:31.000Z
|
2020-05-22T21:17:41.000Z
|
'''
Harness Toolset
Copyright (c) 2015 Rich Kelley
Contact:
@RGKelley5
RK5DEVMAIL[A T]gmail[D O T]com
www.frogstarworldc.com
License: MIT
'''
import threading
import builtins
import sys
from random import randint
from harness.core import framework
from harness.core import threads
from collections import namedtuple
from queue import Queue
class ModuleFrame(framework.Framework):
def __init__(self, about):
# -----------------------------------------------------
# Thread Events must be initialized before framework
# due to print function thread controls in ModuleFrame
# -----------------------------------------------------
self.stopper = threading.Event()
self.stopper.clear()
self.allow_print = threading.Event()
self.allow_print.isSet()
self.stdin_q = Queue()
self.FORCE_THREAD = False
# -----------------------------------------------------
framework.Framework.__init__(self)
self.prompt = "H_MOD(" + about["name"] + ") "
self.thread_to_return = None
self.module_id = randint(1, 100000)
# TODO: add exception handling for undeclared keys
self.name = about['name']
self.author = about['author']
self.info = about['info']
self.contact = about['contact']
self.version = about['version']
def isrunning(self):
if self.stopper.isSet():
return False
return True
def print(self, *objects, sep=' ', end='\n', file=sys.stdout, flush=False):
if self.allow_print.isSet():
return builtins.print(*objects, sep=sep, end=end, file=file, flush=flush)
def print_error(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_error(self, outstr)
def print_output(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_output(self, outstr)
def print_debug(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_debug(self, outstr)
def add_session(self, remote_conn_info=None, local_conn_info=None, stype=None):
return framework.Framework.add_session(self, remote_conn_info=remote_conn_info, local_conn_info=local_conn_info, id=self.module_id, stype=stype)
def go(self, _globals):
self.framework_globals = _globals
self.cmdloop()
return self.thread_to_return, self.framework_globals # Return thread back to base for management
def do_back(self, args=None):
return True
def do_run(self, args=None):
if args:
_args = framework.parse_args(args)
else:
_args = (" ")
if not self.options.required_set():
self.allow_print.set()
self.print_error("Required options not set")
self.print_error("Check 'Required' column\n")
self.show_options()
self.allow_print.clear()
return
self.stopper.clear()
self.allow_print.set()
# Wrap the module in a Thread object and return to base
if self.FORCE_THREAD or _args[0].lower() in ('job', 'thread', 'j', 't'):
if self.FORCE_THREAD:
self.print_output("Module must be run in background!")
self.allow_print.clear()
t = threads.ModuleThread(target=self, args=[self.stopper, self.allow_print, self.module_id, self.stdin_q])
t.daemon = True
self.thread_to_return = t
return True
else:
# Normal run in foreground
try:
self.run_module()
# Exit the module cleanly without exiting framework
except KeyboardInterrupt:
pass
finally:
self.cleanup_exit()
def show_info(self, args=None):
print("\n\tModule Name: ", self.name)
print("\tAuthors: ", self.author)
print("\tContact: ", self.contact)
print("\tInfo: ", self.info)
print("\tVersion: ", self.version)
print()
def pre_run(self, args=None):
pass
def run_module(self, args=None):
pass
def post_run(self, args=None):
pass
def cleanup_exit(self):
self.print_debug("Cleaning up...")
self.stopper.clear()
self.post_run()
self.allow_print.clear()
self.print_output("Exiting module...")
return True
| 20.919786
| 146
| 0.674335
|
import threading
import builtins
import sys
from random import randint
from harness.core import framework
from harness.core import threads
from collections import namedtuple
from queue import Queue
class ModuleFrame(framework.Framework):
def __init__(self, about):
self.stopper = threading.Event()
self.stopper.clear()
self.allow_print = threading.Event()
self.allow_print.isSet()
self.stdin_q = Queue()
self.FORCE_THREAD = False
framework.Framework.__init__(self)
self.prompt = "H_MOD(" + about["name"] + ") "
self.thread_to_return = None
self.module_id = randint(1, 100000)
self.name = about['name']
self.author = about['author']
self.info = about['info']
self.contact = about['contact']
self.version = about['version']
def isrunning(self):
if self.stopper.isSet():
return False
return True
def print(self, *objects, sep=' ', end='\n', file=sys.stdout, flush=False):
if self.allow_print.isSet():
return builtins.print(*objects, sep=sep, end=end, file=file, flush=flush)
def print_error(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_error(self, outstr)
def print_output(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_output(self, outstr)
def print_debug(self, outstr):
if self.allow_print.isSet():
framework.Framework.print_debug(self, outstr)
def add_session(self, remote_conn_info=None, local_conn_info=None, stype=None):
return framework.Framework.add_session(self, remote_conn_info=remote_conn_info, local_conn_info=local_conn_info, id=self.module_id, stype=stype)
def go(self, _globals):
self.framework_globals = _globals
self.cmdloop()
return self.thread_to_return, self.framework_globals
def do_back(self, args=None):
return True
def do_run(self, args=None):
if args:
_args = framework.parse_args(args)
else:
_args = (" ")
if not self.options.required_set():
self.allow_print.set()
self.print_error("Required options not set")
self.print_error("Check 'Required' column\n")
self.show_options()
self.allow_print.clear()
return
self.stopper.clear()
self.allow_print.set()
if self.FORCE_THREAD or _args[0].lower() in ('job', 'thread', 'j', 't'):
if self.FORCE_THREAD:
self.print_output("Module must be run in background!")
self.allow_print.clear()
t = threads.ModuleThread(target=self, args=[self.stopper, self.allow_print, self.module_id, self.stdin_q])
t.daemon = True
self.thread_to_return = t
return True
else:
try:
self.run_module()
except KeyboardInterrupt:
pass
finally:
self.cleanup_exit()
def show_info(self, args=None):
print("\n\tModule Name: ", self.name)
print("\tAuthors: ", self.author)
print("\tContact: ", self.contact)
print("\tInfo: ", self.info)
print("\tVersion: ", self.version)
print()
def pre_run(self, args=None):
pass
def run_module(self, args=None):
pass
def post_run(self, args=None):
pass
def cleanup_exit(self):
self.print_debug("Cleaning up...")
self.stopper.clear()
self.post_run()
self.allow_print.clear()
self.print_output("Exiting module...")
return True
| true
| true
|
790600eda278d5c052a5c55dedcda50e613e5a22
| 1,137
|
py
|
Python
|
backend/src/settings/prod.py
|
JumboCode/YEF
|
433b9215e61794730362d9ad9749b88236875be5
|
[
"MIT"
] | 2
|
2018-12-10T03:14:31.000Z
|
2019-03-27T16:20:36.000Z
|
backend/src/settings/prod.py
|
JumboCode/YEF
|
433b9215e61794730362d9ad9749b88236875be5
|
[
"MIT"
] | 22
|
2018-12-06T23:54:20.000Z
|
2019-04-17T18:15:43.000Z
|
backend/src/settings/prod.py
|
JumboCode/YEF
|
433b9215e61794730362d9ad9749b88236875be5
|
[
"MIT"
] | 1
|
2020-11-03T05:27:10.000Z
|
2020-11-03T05:27:10.000Z
|
#
# These are settings for Heroku Production Environment
#
from .common import *
import dj_database_url
# We don't want any debug warnings giving
# away unnecessary information to attackers
DEBUG = False
# We grab the secret key from the environment because it is
# our production key and no can know it
SECRET_KEY = os.environ.get('SECRET_KEY')
# We redirect any http requests to their https equivalents
SECURE_SSL_REDIRECT = True
ALLOWED_HOSTS = ["yefbackend.herokuapp.com", "localhost"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# In a real production environment, we would likely want to
# handle static files on a different machine.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# We let the dj_database_url package pull the database info from heroku
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config(conn_max_age=600, ssl_require=True)
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': DEFAULT_RENDERER_CLASSES
}
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'yefclient.herokuapp.com'
)
| 23.204082
| 73
| 0.761653
|
from .common import *
import dj_database_url
# away unnecessary information to attackers
DEBUG = False
# We grab the secret key from the environment because it is
# our production key and no can know it
SECRET_KEY = os.environ.get('SECRET_KEY')
# We redirect any http requests to their https equivalents
SECURE_SSL_REDIRECT = True
ALLOWED_HOSTS = ["yefbackend.herokuapp.com", "localhost"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# In a real production environment, we would likely want to
# handle static files on a different machine.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# We let the dj_database_url package pull the database info from heroku
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config(conn_max_age=600, ssl_require=True)
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': DEFAULT_RENDERER_CLASSES
}
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'yefclient.herokuapp.com'
)
| true
| true
|
79060167d83c35a47ab9b2566f2706e578470c44
| 918
|
py
|
Python
|
external/unbound/libunbound/python/doc/examples/example8-1.py
|
simplixcurrency/simplix
|
dd313f6fe5a42cf508b19aea3f49cb8ba6b5dbf1
|
[
"BSD-3-Clause"
] | 1,751
|
2016-11-03T18:25:34.000Z
|
2022-03-30T17:43:26.000Z
|
external/unbound/libunbound/python/doc/examples/example8-1.py
|
simplixcurrency/simplix
|
dd313f6fe5a42cf508b19aea3f49cb8ba6b5dbf1
|
[
"BSD-3-Clause"
] | 603
|
2017-03-03T19:51:58.000Z
|
2022-03-31T12:56:58.000Z
|
external/unbound/libunbound/python/doc/examples/example8-1.py
|
simplixcurrency/simplix
|
dd313f6fe5a42cf508b19aea3f49cb8ba6b5dbf1
|
[
"BSD-3-Clause"
] | 296
|
2016-11-14T07:00:11.000Z
|
2022-03-29T00:56:58.000Z
|
#!/usr/bin/python
# vim:fileencoding=utf-8
#
# Lookup for MX and NS records
#
import unbound
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_MX, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.mx_list:
print " priority:%d address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.address_list:
print " address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_NS, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.domain_list:
print " host: %s" % k
| 28.6875
| 79
| 0.650327
|
import unbound
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_MX, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.mx_list:
print " priority:%d address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.address_list:
print " address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_NS, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.domain_list:
print " host: %s" % k
| false
| true
|
790601e77a63a916758ca33f28d24b47806a8269
| 2,354
|
py
|
Python
|
backend/mqtt_react/python_bugg/paho.mqtt.python/test/lib/03-publish-c2b-qos2-disconnect.py
|
Jegeva/BruCON_2021
|
81e0ccdeaad3c7518e34c2ac80b0221c95e04d97
|
[
"Unlicense"
] | null | null | null |
backend/mqtt_react/python_bugg/paho.mqtt.python/test/lib/03-publish-c2b-qos2-disconnect.py
|
Jegeva/BruCON_2021
|
81e0ccdeaad3c7518e34c2ac80b0221c95e04d97
|
[
"Unlicense"
] | null | null | null |
backend/mqtt_react/python_bugg/paho.mqtt.python/test/lib/03-publish-c2b-qos2-disconnect.py
|
Jegeva/BruCON_2021
|
81e0ccdeaad3c7518e34c2ac80b0221c95e04d97
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# Test whether a client sends a correct PUBLISH to a topic with QoS 2 and responds to a disconnect.
import context
import paho_test
rc = 1
keepalive = 60
connect_packet = paho_test.gen_connect(
"publish-qos2-test", keepalive=keepalive, clean_session=False,
)
connack_packet = paho_test.gen_connack(rc=0)
disconnect_packet = paho_test.gen_disconnect()
mid = 1
publish_packet = paho_test.gen_publish(
u"pub/qos2/test", qos=2, mid=mid, payload="message".encode('utf-8'))
publish_dup_packet = paho_test.gen_publish(
u"pub/qos2/test", qos=2, mid=mid, payload="message".encode('utf-8'), dup=True)
pubrec_packet = paho_test.gen_pubrec(mid)
pubrel_packet = paho_test.gen_pubrel(mid)
pubcomp_packet = paho_test.gen_pubcomp(mid)
sock = paho_test.create_server_socket()
client = context.start_client()
try:
(conn, address) = sock.accept()
conn.settimeout(5)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "publish", publish_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "retried publish", publish_dup_packet):
conn.send(pubrec_packet)
if paho_test.expect_packet(conn, "pubrel", pubrel_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
# Complete connection and message flow.
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "retried pubrel", pubrel_packet):
conn.send(pubcomp_packet)
if paho_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| 31.810811
| 99
| 0.614274
|
import context
import paho_test
rc = 1
keepalive = 60
connect_packet = paho_test.gen_connect(
"publish-qos2-test", keepalive=keepalive, clean_session=False,
)
connack_packet = paho_test.gen_connack(rc=0)
disconnect_packet = paho_test.gen_disconnect()
mid = 1
publish_packet = paho_test.gen_publish(
u"pub/qos2/test", qos=2, mid=mid, payload="message".encode('utf-8'))
publish_dup_packet = paho_test.gen_publish(
u"pub/qos2/test", qos=2, mid=mid, payload="message".encode('utf-8'), dup=True)
pubrec_packet = paho_test.gen_pubrec(mid)
pubrel_packet = paho_test.gen_pubrel(mid)
pubcomp_packet = paho_test.gen_pubcomp(mid)
sock = paho_test.create_server_socket()
client = context.start_client()
try:
(conn, address) = sock.accept()
conn.settimeout(5)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "publish", publish_packet):
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "retried publish", publish_dup_packet):
conn.send(pubrec_packet)
if paho_test.expect_packet(conn, "pubrel", pubrel_packet):
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "retried pubrel", pubrel_packet):
conn.send(pubcomp_packet)
if paho_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| true
| true
|
790602682ee66420580b116a8a3bfa441c16b5b6
| 873
|
py
|
Python
|
test/test_gzippy.py
|
seomoz/gzippy
|
a0b7469707005d878588c5a877943fee5b8a4d5e
|
[
"MIT"
] | 4
|
2017-09-09T17:28:11.000Z
|
2019-08-07T13:42:04.000Z
|
test/test_gzippy.py
|
seomoz/gzippy
|
a0b7469707005d878588c5a877943fee5b8a4d5e
|
[
"MIT"
] | null | null | null |
test/test_gzippy.py
|
seomoz/gzippy
|
a0b7469707005d878588c5a877943fee5b8a4d5e
|
[
"MIT"
] | 2
|
2017-05-27T08:16:42.000Z
|
2018-07-28T15:53:21.000Z
|
'''Tests about the gzippy top-level functions.'''
import unittest
from test import scratch_file
import gzippy
class GzippyTest(unittest.TestCase):
'''Tests about the gzippy top-level functions.'''
def test_open_with_plus(self):
'''Opening with r+ is not allowed.'''
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'r+') as fin:
pass
def test_open_with_append(self):
'''Opening in append mode is not allowed.'''
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'ab') as fout:
pass
| 26.454545
| 53
| 0.561283
|
import unittest
from test import scratch_file
import gzippy
class GzippyTest(unittest.TestCase):
def test_open_with_plus(self):
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'r+') as fin:
pass
def test_open_with_append(self):
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'ab') as fout:
pass
| true
| true
|
79060277fc71a87642cbaf0bb850f5303a501cf4
| 372
|
py
|
Python
|
piccolo/columns/indexes.py
|
smythp/piccolo
|
26d5742c5d56ef6308598eb264d53a247082bbc7
|
[
"MIT"
] | 6
|
2021-09-27T14:33:08.000Z
|
2021-11-18T13:52:34.000Z
|
piccolo/columns/indexes.py
|
smythp/piccolo
|
26d5742c5d56ef6308598eb264d53a247082bbc7
|
[
"MIT"
] | 5
|
2021-09-27T13:58:35.000Z
|
2022-03-08T01:11:51.000Z
|
piccolo/columns/indexes.py
|
smythp/piccolo
|
26d5742c5d56ef6308598eb264d53a247082bbc7
|
[
"MIT"
] | null | null | null |
from enum import Enum
class IndexMethod(str, Enum):
"""
Used to specify the index method for a
:class:`Column <piccolo.columns.base.Column>`.
"""
btree = "btree"
hash = "hash"
gist = "gist"
gin = "gin"
def __str__(self):
return f"{self.__class__.__name__}.{self.name}"
def __repr__(self):
return self.__str__()
| 18.6
| 55
| 0.594086
|
from enum import Enum
class IndexMethod(str, Enum):
btree = "btree"
hash = "hash"
gist = "gist"
gin = "gin"
def __str__(self):
return f"{self.__class__.__name__}.{self.name}"
def __repr__(self):
return self.__str__()
| true
| true
|
7906028a5fbe724469338caf018345a138d2fe4c
| 1,021
|
py
|
Python
|
test/test_entity_creation_dto.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | null | null | null |
test/test_entity_creation_dto.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | 7
|
2021-05-25T14:06:04.000Z
|
2021-11-05T15:42:14.000Z
|
test/test_entity_creation_dto.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
OpenSilex API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: INSTANCE-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opensilexClientToolsPython
from opensilexClientToolsPython.models.entity_creation_dto import EntityCreationDTO # noqa: E501
from opensilexClientToolsPython.rest import ApiException
class TestEntityCreationDTO(unittest.TestCase):
"""EntityCreationDTO unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEntityCreationDTO(self):
"""Test EntityCreationDTO"""
# FIXME: construct object with mandatory attributes with example values
# model = opensilexClientToolsPython.models.entity_creation_dto.EntityCreationDTO() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.902439
| 119
| 0.734574
|
from __future__ import absolute_import
import unittest
import opensilexClientToolsPython
from opensilexClientToolsPython.models.entity_creation_dto import EntityCreationDTO
from opensilexClientToolsPython.rest import ApiException
class TestEntityCreationDTO(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEntityCreationDTO(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
79060354e4f3994a141daeb85f0af6464eb8d0aa
| 1,522
|
py
|
Python
|
scripts/india_rbi/below_poverty_line/preprocess_test.py
|
hanlu09205/data
|
76e82fd199bc99543e2d54ad5809343ccdf11b32
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:27:56.000Z
|
2021-01-01T05:27:56.000Z
|
scripts/india_rbi/below_poverty_line/preprocess_test.py
|
hanlu09205/data
|
76e82fd199bc99543e2d54ad5809343ccdf11b32
|
[
"Apache-2.0"
] | null | null | null |
scripts/india_rbi/below_poverty_line/preprocess_test.py
|
hanlu09205/data
|
76e82fd199bc99543e2d54ad5809343ccdf11b32
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:27:58.000Z
|
2021-01-01T05:27:58.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filecmp
import os
import json
import tempfile
import unittest
from india_rbi.below_poverty_line.preprocess import BelowPovertyLineDataLoader
# module_dir_ is the path to where this test is running from.
module_dir_ = os.path.dirname(__file__)
class TestPreprocess(unittest.TestCase):
def test_create_csv(self):
with tempfile.TemporaryDirectory() as tmp_dir:
xlsx_file = os.path.join(module_dir_, 'test_data/test.XLSX')
expected_file = os.path.join(module_dir_, 'test_data/expected.csv')
result_file = os.path.join(tmp_dir, 'test_cleaed.csv')
loader = BelowPovertyLineDataLoader(xlsx_file)
loader.download()
loader.process()
loader.save(csv_file_path=result_file)
same = filecmp.cmp(result_file, expected_file)
os.remove(result_file)
self.assertTrue(same)
if __name__ == '__main__':
unittest.main()
| 32.382979
| 79
| 0.720762
|
import filecmp
import os
import json
import tempfile
import unittest
from india_rbi.below_poverty_line.preprocess import BelowPovertyLineDataLoader
module_dir_ = os.path.dirname(__file__)
class TestPreprocess(unittest.TestCase):
def test_create_csv(self):
with tempfile.TemporaryDirectory() as tmp_dir:
xlsx_file = os.path.join(module_dir_, 'test_data/test.XLSX')
expected_file = os.path.join(module_dir_, 'test_data/expected.csv')
result_file = os.path.join(tmp_dir, 'test_cleaed.csv')
loader = BelowPovertyLineDataLoader(xlsx_file)
loader.download()
loader.process()
loader.save(csv_file_path=result_file)
same = filecmp.cmp(result_file, expected_file)
os.remove(result_file)
self.assertTrue(same)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7906038051dc108ff1e0e7a4f2b46fa5598ec9db
| 464
|
py
|
Python
|
source/python/airflow/runtime/plugins/helpers/sql_data_quality_queries.py
|
paulo3011/opendatafrombrasil
|
cc15ffadaaccb853e1d73a685de39c2bc5340c7c
|
[
"MIT"
] | null | null | null |
source/python/airflow/runtime/plugins/helpers/sql_data_quality_queries.py
|
paulo3011/opendatafrombrasil
|
cc15ffadaaccb853e1d73a685de39c2bc5340c7c
|
[
"MIT"
] | null | null | null |
source/python/airflow/runtime/plugins/helpers/sql_data_quality_queries.py
|
paulo3011/opendatafrombrasil
|
cc15ffadaaccb853e1d73a685de39c2bc5340c7c
|
[
"MIT"
] | null | null | null |
class SqlDataQualityQueries:
establisment_company_relation_check = ("""
-- looks for registration without relation with compay
-- for have a database with full information needs to return total equal to zero (establisment + company)
SELECT count(e.basiccnpj) as total_without_relation from open_data.fact_establishment e
LEFT JOIN open_data.dim_company c ON c.basiccnpj = e.basiccnpj
WHERE c.basiccnpj is null;
""", "== 1", "== 0")
| 58
| 109
| 0.732759
|
class SqlDataQualityQueries:
establisment_company_relation_check = ("""
-- looks for registration without relation with compay
-- for have a database with full information needs to return total equal to zero (establisment + company)
SELECT count(e.basiccnpj) as total_without_relation from open_data.fact_establishment e
LEFT JOIN open_data.dim_company c ON c.basiccnpj = e.basiccnpj
WHERE c.basiccnpj is null;
""", "== 1", "== 0")
| true
| true
|
7906061a9a46653f31e7943cf6210088547bf9e8
| 680
|
py
|
Python
|
build/navigation/amcl/catkin_generated/pkg.develspace.context.pc.py
|
lty1994/ros_project
|
d55ce07c592d545f9a43330fa6bf96af6651575f
|
[
"BSD-2-Clause"
] | null | null | null |
build/navigation/amcl/catkin_generated/pkg.develspace.context.pc.py
|
lty1994/ros_project
|
d55ce07c592d545f9a43330fa6bf96af6651575f
|
[
"BSD-2-Clause"
] | null | null | null |
build/navigation/amcl/catkin_generated/pkg.develspace.context.pc.py
|
lty1994/ros_project
|
d55ce07c592d545f9a43330fa6bf96af6651575f
|
[
"BSD-2-Clause"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include".split(';') if "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rosbag;roscpp;dynamic_reconfigure;tf;nav_msgs;std_srvs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lamcl_sensors;-lamcl_map;-lamcl_pf".split(';') if "-lamcl_sensors;-lamcl_map;-lamcl_pf" != "" else []
PROJECT_NAME = "amcl"
PROJECT_SPACE_DIR = "/home/autolabor/catkin_ws/devel"
PROJECT_VERSION = "1.14.3"
| 75.555556
| 253
| 0.777941
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include".split(';') if "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rosbag;roscpp;dynamic_reconfigure;tf;nav_msgs;std_srvs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lamcl_sensors;-lamcl_map;-lamcl_pf".split(';') if "-lamcl_sensors;-lamcl_map;-lamcl_pf" != "" else []
PROJECT_NAME = "amcl"
PROJECT_SPACE_DIR = "/home/autolabor/catkin_ws/devel"
PROJECT_VERSION = "1.14.3"
| true
| true
|
7906063fe8bfd262d70fe981b681e8b1d68b3e5c
| 4,294
|
py
|
Python
|
Trabalho 02/Resolucao/code/backtracking/Labirinto.py
|
RafaelAmauri/Projeto-e-Analise-de-Algoritmos
|
76a8d834ff03c752c09715c6ffe5f4a95a9fb1e5
|
[
"MIT"
] | null | null | null |
Trabalho 02/Resolucao/code/backtracking/Labirinto.py
|
RafaelAmauri/Projeto-e-Analise-de-Algoritmos
|
76a8d834ff03c752c09715c6ffe5f4a95a9fb1e5
|
[
"MIT"
] | null | null | null |
Trabalho 02/Resolucao/code/backtracking/Labirinto.py
|
RafaelAmauri/Projeto-e-Analise-de-Algoritmos
|
76a8d834ff03c752c09715c6ffe5f4a95a9fb1e5
|
[
"MIT"
] | null | null | null |
import Celula
class Labirinto:
def __init__(self, num_rows, num_columns, order_to_check):
# Indica a ordem que vai os vizinhos vao ser checados
self.order_to_check = order_to_check
# Numero de linhas no grid
self.num_rows = num_rows
# Numero de colunas no grid
self.num_columns = num_columns
self.grid = []
# Preenche o grid
tmp_cell = Celula.Celula(0)
for i in range(self.num_columns):
self.grid.append([tmp_cell for x in range(self.num_rows)])
# Printar o grid
def __str__(self):
grid_as_string = ""
for i in range(self.num_columns):
for j in range(self.num_rows):
grid_as_string += f"{self.grid[i][j].get_value()} "
grid_as_string += "\n"
return grid_as_string
# Adiciona a celula cell em [pos_y][pos_x]
def insert(self, cell_value, pos_y, pos_x):
self.grid[pos_y][pos_x] = Celula.Celula(cell_value)
# Jeito rapido de resolver IndexError porque nao quero gastar muito tempo nesse codigo
try:
# Verificar se existe uma celula em cima
if self.grid[pos_y-1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_up(self.grid[pos_y-1][pos_x])
self.grid[pos_y-1][pos_x].set_down(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
# Verificar se existe uma celula embaixo
if self.grid[pos_y+1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_down(self.grid[pos_y+1][pos_x])
self.grid[pos_y+1][pos_x].set_up(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
# Verificar se existe uma celula na esquerda
if self.grid[pos_y][pos_x-1].get_value() != 0:
self.grid[pos_y][pos_x].set_left(self.grid[pos_y][pos_x])
self.grid[pos_y][pos_x-1].set_right(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
# Verificar se existe uma celula na direita
if self.grid[pos_y+1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_right(self.grid[pos_y+1][pos_x])
self.grid[pos_y+1][pos_x].set_left(self.grid[pos_y][pos_x])
except IndexError:
pass
def find_path(self, pos_x, pos_y):
self.grid[pos_y][pos_x].visited = True
# Se for a saida, printar a posicao dela!
if self.grid[pos_y][pos_x].value == 2:
print(f"Saida encontrada na posicao [{pos_x}][{pos_y}]!")
# Verificar na ordem que foi recebida pela funcao
for i in self.order_to_check:
# Se existe alguem em cima, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "up" and self.grid[pos_y][pos_x].up != None and self.grid[pos_y][pos_x].up != 0:
if not self.grid[pos_y][pos_x].up.visited:
self.find_path(pos_x, pos_y-1)
# Se existe alguem na esquerda, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "left" and self.grid[pos_y][pos_x].left != None and self.grid[pos_y][pos_x].left != 0:
if not self.grid[pos_y][pos_x].left.visited:
self.find_path(pos_x-1, pos_y)
# Se existe alguem embaixo, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "down" and self.grid[pos_y][pos_x].down != None and self.grid[pos_y][pos_x].down != 0:
if not self.grid[pos_y][pos_x].down.visited:
self.find_path(pos_x, pos_y+1)
# Se existe alguem na direita, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "right" and self.grid[pos_y][pos_x].right != None and self.grid[pos_y][pos_x].right != 0:
if not self.grid[pos_y][pos_x].right.visited:
self.find_path(pos_x+1, pos_y)
| 38.339286
| 126
| 0.581742
|
import Celula
class Labirinto:
def __init__(self, num_rows, num_columns, order_to_check):
self.order_to_check = order_to_check
self.num_rows = num_rows
self.num_columns = num_columns
self.grid = []
tmp_cell = Celula.Celula(0)
for i in range(self.num_columns):
self.grid.append([tmp_cell for x in range(self.num_rows)])
def __str__(self):
grid_as_string = ""
for i in range(self.num_columns):
for j in range(self.num_rows):
grid_as_string += f"{self.grid[i][j].get_value()} "
grid_as_string += "\n"
return grid_as_string
def insert(self, cell_value, pos_y, pos_x):
self.grid[pos_y][pos_x] = Celula.Celula(cell_value)
try:
if self.grid[pos_y-1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_up(self.grid[pos_y-1][pos_x])
self.grid[pos_y-1][pos_x].set_down(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
if self.grid[pos_y+1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_down(self.grid[pos_y+1][pos_x])
self.grid[pos_y+1][pos_x].set_up(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
if self.grid[pos_y][pos_x-1].get_value() != 0:
self.grid[pos_y][pos_x].set_left(self.grid[pos_y][pos_x])
self.grid[pos_y][pos_x-1].set_right(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
if self.grid[pos_y+1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_right(self.grid[pos_y+1][pos_x])
self.grid[pos_y+1][pos_x].set_left(self.grid[pos_y][pos_x])
except IndexError:
pass
def find_path(self, pos_x, pos_y):
self.grid[pos_y][pos_x].visited = True
if self.grid[pos_y][pos_x].value == 2:
print(f"Saida encontrada na posicao [{pos_x}][{pos_y}]!")
for i in self.order_to_check:
if i == "up" and self.grid[pos_y][pos_x].up != None and self.grid[pos_y][pos_x].up != 0:
if not self.grid[pos_y][pos_x].up.visited:
self.find_path(pos_x, pos_y-1)
if i == "left" and self.grid[pos_y][pos_x].left != None and self.grid[pos_y][pos_x].left != 0:
if not self.grid[pos_y][pos_x].left.visited:
self.find_path(pos_x-1, pos_y)
if i == "down" and self.grid[pos_y][pos_x].down != None and self.grid[pos_y][pos_x].down != 0:
if not self.grid[pos_y][pos_x].down.visited:
self.find_path(pos_x, pos_y+1)
if i == "right" and self.grid[pos_y][pos_x].right != None and self.grid[pos_y][pos_x].right != 0:
if not self.grid[pos_y][pos_x].right.visited:
self.find_path(pos_x+1, pos_y)
| true
| true
|
79060674fbdf9209104d2086ac2b555a515cdb1e
| 2,474
|
py
|
Python
|
src/ploomber/cli/io.py
|
abhishak3/ploomber
|
6041bcd381b7fd9a7525f94edd0ae1b03b14bb8d
|
[
"Apache-2.0"
] | null | null | null |
src/ploomber/cli/io.py
|
abhishak3/ploomber
|
6041bcd381b7fd9a7525f94edd0ae1b03b14bb8d
|
[
"Apache-2.0"
] | 37
|
2021-10-02T06:12:57.000Z
|
2021-12-27T22:24:29.000Z
|
src/ploomber/cli/io.py
|
abhishak3/ploomber
|
6041bcd381b7fd9a7525f94edd0ae1b03b14bb8d
|
[
"Apache-2.0"
] | null | null | null |
from functools import wraps
import sys
import traceback
from ploomber.io import TerminalWriter
from ploomber.exceptions import DAGBuildError, DAGRenderError
# TODO: there are two types of cli commands: the ones that execute user's
# code (ploomber build/task) and the ones that parse a dag/task but do not
# execute it. For the former, we want to capture errors and display them with
# colors so it's easier for the user to understand what went wrong with their
# code. For the latter, the errors are raise by us, hence, we only need to
# print the message and exit. Currently, all CLI end points (except ploomber
# nb) are decorated with @cli_endpoint but we should change it to
# @command_endpoint
def cli_endpoint(fn):
"""
Decorator for command line endpoints that execute dags or tasks. It runs
the decorated function, captures exception (if any), sends a colored
traceback to standard error and exits with code 1.
Notes
-----
Functions decorated with this must be called with keyword arguments
Call some_endpoint(catch_exception=False) to disable this behavior (e.g.
for testing)
"""
@wraps(fn)
def wrapper(catch_exception=True, **kwargs):
if catch_exception:
try:
fn(**kwargs)
# these already color output
except (DAGBuildError, DAGRenderError):
error = traceback.format_exc()
color = False
except Exception:
error = traceback.format_exc()
color = True
else:
error = None
if error:
if color:
tw = TerminalWriter(file=sys.stderr)
tw._write_source(error.splitlines())
else:
print(error, file=sys.stderr)
sys.exit(1)
else:
fn(**kwargs)
return wrapper
# FIXME: capture only certain types of exceptions. If it's something we dind't
# raise, we'd like to see the full traceback
def command_endpoint(fn):
"""
Decorator for command line endpoints that only parse dags or tasks but do
not execute them. If it tails, it prints error message to stderror, then
calls with exit code 1.
"""
@wraps(fn)
def wrapper(**kwargs):
try:
fn(**kwargs)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
sys.exit(1)
return wrapper
| 32.552632
| 78
| 0.630558
|
from functools import wraps
import sys
import traceback
from ploomber.io import TerminalWriter
from ploomber.exceptions import DAGBuildError, DAGRenderError
# code (ploomber build/task) and the ones that parse a dag/task but do not
# execute it. For the former, we want to capture errors and display them with
# colors so it's easier for the user to understand what went wrong with their
def cli_endpoint(fn):
@wraps(fn)
def wrapper(catch_exception=True, **kwargs):
if catch_exception:
try:
fn(**kwargs)
except (DAGBuildError, DAGRenderError):
error = traceback.format_exc()
color = False
except Exception:
error = traceback.format_exc()
color = True
else:
error = None
if error:
if color:
tw = TerminalWriter(file=sys.stderr)
tw._write_source(error.splitlines())
else:
print(error, file=sys.stderr)
sys.exit(1)
else:
fn(**kwargs)
return wrapper
def command_endpoint(fn):
@wraps(fn)
def wrapper(**kwargs):
try:
fn(**kwargs)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
sys.exit(1)
return wrapper
| true
| true
|
7906070b76e68e6410082fbc43347068a13fada4
| 9,012
|
py
|
Python
|
python/scfmm/__init__.py
|
serokell/segmented-cfmm
|
bca32e931d250c94acecc997cdf63a67c85cda4f
|
[
"MIT",
"Unlicense"
] | null | null | null |
python/scfmm/__init__.py
|
serokell/segmented-cfmm
|
bca32e931d250c94acecc997cdf63a67c85cda4f
|
[
"MIT",
"Unlicense"
] | 24
|
2021-07-20T16:13:14.000Z
|
2021-12-06T16:25:17.000Z
|
python/scfmm/__init__.py
|
serokell/segmented-cfmm
|
bca32e931d250c94acecc997cdf63a67c85cda4f
|
[
"MIT",
"Unlicense"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Arthur Breitman
# SPDX-License-Identifier: LicenseRef-MIT-Arthur-Breitman
import math
from collections import defaultdict
from pycfmm.data import AutoRepr
infinity = 10 ** 100
class Tick(AutoRepr):
"""
An initialized tick, marking the beginning or end of a position
"""
def __init__(self, i_prev, i_next, feeGrowthOutside):
"""
:type i_prev: int
:type i_next: int
"""
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0
class Position(AutoRepr):
"""
A LP's position
"""
def __init__(self, L=0):
self.L = L
self.feeGrowthInsideLast = XY()
class XY(AutoRepr):
"""
A pair of balances in asset X and Y
"""
def __init__(self, x=0, y=0):
self.x, self.y = x, y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return XY(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return XY(x, y)
def __neg__(self):
return XY(-self.x, -self.y)
def __mul__(self, other):
return XY(other * self.x, other * self.y)
def __eq__(self, other):
return isinstance(other, XY) and self.x == other.x and self.y == other.y
class Contract(AutoRepr):
"""
A contract in the fashion of Uniswap v3
"""
@staticmethod
def tick(srp):
"""
Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price
"""
if srp == infinity:
return infinity
else:
return math.floor(math.log(srp) / math.log(math.sqrt(1.0001)))
@staticmethod
def srp(tick):
"""
Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price
"""
if tick == infinity:
return infinity
return math.pow(math.sqrt(1.0001), tick)
def __init__(self, X, Y, fee=0.3 / 100):
self.balance = XY(X, Y)
self.srP = math.sqrt(Y / X)
self.i_a = self.tick(self.srP)
self.L = math.floor(math.sqrt(X * Y))
self.fee = fee
self.i_l = -infinity
self.ticks = {-infinity: Tick(-infinity, infinity, XY()), infinity: Tick(-infinity, infinity, XY())}
self.positions = defaultdict(Position)
self.feeGrowth = XY()
def initialize_tick(self, i, i_l):
"""
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
"""
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if i_next > i:
self.ticks[i_l].i_next = i
# find an instance where i_a = i and we set XY(0, 0) and that's wrong
self.ticks[i] = Tick(i_l, i_next, self.feeGrowth if self.i_a >= i else XY())
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next)
def collect_fees(self, user, i_l, i_u):
key = (user, i_l, i_u)
position = self.positions[key]
f_a = self.feeGrowth - self.ticks[i_u].feeGrowthOutside if self.i_a >= i_u else self.ticks[i_u].feeGrowthOutside
f_b = self.ticks[i_l].feeGrowthOutside if self.i_a >= i_l else self.feeGrowth - self.ticks[i_l].feeGrowthOutside
feeGrowthInside = self.feeGrowth - f_a - f_b
fees = (feeGrowthInside - position.feeGrowthInsideLast) * position.L
position.feeGrowthInsideLast = feeGrowthInside
return fees
def set_position(self, user, i_l, i_l_l, i_u, i_u_l, Delta_L):
assert (i_l_l <= i_l)
if i_l not in self.ticks:
self.initialize_tick(i_l, i_l_l)
assert (i_u_l <= i_u)
if i_u not in self.ticks:
self.initialize_tick(i_u, i_u_l)
position_key = (user, i_l, i_u)
fees = self.collect_fees(user, i_l, i_u)
self.positions[position_key].L += Delta_L
assert (self.positions[position_key].L >= 0)
# todo, garbage collect if we are unwinding the position completely?
Delta = XY()
# Add or remove liquidity above the current tick
if self.i_a < i_l:
Delta.x = Delta_L * (1 / self.srp(i_l) - 1 / self.srp(i_u))
Delta.y = 0
# Add or remove liquidity around the current tick
elif i_l <= self.i_a < i_u:
# update interval we are in if need be
if i_l > self.i_l:
self.i_l = i_l
Delta.x = Delta_L * (1 / self.srP - 1 / self.srp(i_u))
Delta.y = Delta_L * (self.srP - self.srp(i_l))
self.L += Delta_L
else: # i_a >= i_u
Delta.x = 0
Delta.y = Delta_L * (self.srp(i_u) - self.srp(i_l))
Delta -= fees
# make a note of how much liquidity is gained or lost when
# entering this interval
self.ticks[i_l].Delta_L += Delta_L
self.ticks[i_u].Delta_L -= Delta_L
self.balance += Delta
return -Delta
def X_to_Y(self, dX, fee=None):
# dX must be positive
assert (dX >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(dX * fee, 0)
srp_new = 1.0 / (1.0 / self.srP + (dX - fees.x) / self.L)
i_l = self.i_l
tick_new = self.tick(srp_new)
if tick_new >= i_l: # we didn't pushed past the interval
dY = - (dX - fees.x) * self.srP * srp_new
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
# compute what we got up til i_u and how much it cost
# well, what delta_X would have taken me there?
self.i_l = self.ticks[self.i_l].i_prev
srP_l = self.srp(i_l)
dY = self.L * (srP_l - self.srP)
dX_ = - dY / (self.srP * srP_l)
tmp = dX_ / (1.0 - fee)
dX_, fees = tmp, XY(tmp - dX_, 0)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
# remove the liquidity we used to have
self.L -= self.ticks[i_l].Delta_L
# flip feeGrowth
self.ticks[i_l].feeGrowthOutside = self.feeGrowth - self.ticks[i_l].feeGrowthOutside
self.srP = self.srp(i_l) - 1e-16 # todo can we do better than this crutch?
user = XY(-dX_, -dY)
self.balance -= user
return user + self.X_to_Y(dX - dX_, fee)
def Y_to_X(self, dY, fee=None):
# dY must be positive
assert (dY >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(0, dY * fee)
srp_new = self.srP + (dY - fees.y) / self.L
i_u = self.ticks[self.i_l].i_next
tick_new = self.tick(srp_new)
if tick_new < i_u: # we did not push past the interval
dX = - (dY - fees.y) / (self.srP * srp_new)
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = i_u
srP_u = self.srp(i_u)
dY_ = self.L * (srP_u - self.srP)
dX = - dY_ / (self.srP * srP_u)
tmp = dY_ / (1.0 - fee)
dY_, fees = tmp, XY(0, tmp - dY_)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
self.L += self.ticks[i_u].Delta_L
self.ticks[i_u].feeGrowthOutside = self.feeGrowth - self.ticks[i_u].feeGrowthOutside
self.srP = srP_u
user = XY(-dX, -dY_)
self.balance -= user
return user + self.Y_to_X(dY - dY_, fee)
| 34.007547
| 120
| 0.552818
|
import math
from collections import defaultdict
from pycfmm.data import AutoRepr
infinity = 10 ** 100
class Tick(AutoRepr):
def __init__(self, i_prev, i_next, feeGrowthOutside):
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0
class Position(AutoRepr):
def __init__(self, L=0):
self.L = L
self.feeGrowthInsideLast = XY()
class XY(AutoRepr):
def __init__(self, x=0, y=0):
self.x, self.y = x, y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return XY(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return XY(x, y)
def __neg__(self):
return XY(-self.x, -self.y)
def __mul__(self, other):
return XY(other * self.x, other * self.y)
def __eq__(self, other):
return isinstance(other, XY) and self.x == other.x and self.y == other.y
class Contract(AutoRepr):
@staticmethod
def tick(srp):
if srp == infinity:
return infinity
else:
return math.floor(math.log(srp) / math.log(math.sqrt(1.0001)))
@staticmethod
def srp(tick):
if tick == infinity:
return infinity
return math.pow(math.sqrt(1.0001), tick)
def __init__(self, X, Y, fee=0.3 / 100):
self.balance = XY(X, Y)
self.srP = math.sqrt(Y / X)
self.i_a = self.tick(self.srP)
self.L = math.floor(math.sqrt(X * Y))
self.fee = fee
self.i_l = -infinity
self.ticks = {-infinity: Tick(-infinity, infinity, XY()), infinity: Tick(-infinity, infinity, XY())}
self.positions = defaultdict(Position)
self.feeGrowth = XY()
def initialize_tick(self, i, i_l):
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if i_next > i:
self.ticks[i_l].i_next = i
self.ticks[i] = Tick(i_l, i_next, self.feeGrowth if self.i_a >= i else XY())
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next)
def collect_fees(self, user, i_l, i_u):
key = (user, i_l, i_u)
position = self.positions[key]
f_a = self.feeGrowth - self.ticks[i_u].feeGrowthOutside if self.i_a >= i_u else self.ticks[i_u].feeGrowthOutside
f_b = self.ticks[i_l].feeGrowthOutside if self.i_a >= i_l else self.feeGrowth - self.ticks[i_l].feeGrowthOutside
feeGrowthInside = self.feeGrowth - f_a - f_b
fees = (feeGrowthInside - position.feeGrowthInsideLast) * position.L
position.feeGrowthInsideLast = feeGrowthInside
return fees
def set_position(self, user, i_l, i_l_l, i_u, i_u_l, Delta_L):
assert (i_l_l <= i_l)
if i_l not in self.ticks:
self.initialize_tick(i_l, i_l_l)
assert (i_u_l <= i_u)
if i_u not in self.ticks:
self.initialize_tick(i_u, i_u_l)
position_key = (user, i_l, i_u)
fees = self.collect_fees(user, i_l, i_u)
self.positions[position_key].L += Delta_L
assert (self.positions[position_key].L >= 0)
# todo, garbage collect if we are unwinding the position completely?
Delta = XY()
# Add or remove liquidity above the current tick
if self.i_a < i_l:
Delta.x = Delta_L * (1 / self.srp(i_l) - 1 / self.srp(i_u))
Delta.y = 0
# Add or remove liquidity around the current tick
elif i_l <= self.i_a < i_u:
# update interval we are in if need be
if i_l > self.i_l:
self.i_l = i_l
Delta.x = Delta_L * (1 / self.srP - 1 / self.srp(i_u))
Delta.y = Delta_L * (self.srP - self.srp(i_l))
self.L += Delta_L
else: # i_a >= i_u
Delta.x = 0
Delta.y = Delta_L * (self.srp(i_u) - self.srp(i_l))
Delta -= fees
# make a note of how much liquidity is gained or lost when
# entering this interval
self.ticks[i_l].Delta_L += Delta_L
self.ticks[i_u].Delta_L -= Delta_L
self.balance += Delta
return -Delta
def X_to_Y(self, dX, fee=None):
# dX must be positive
assert (dX >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(dX * fee, 0)
srp_new = 1.0 / (1.0 / self.srP + (dX - fees.x) / self.L)
i_l = self.i_l
tick_new = self.tick(srp_new)
if tick_new >= i_l: # we didn't pushed past the interval
dY = - (dX - fees.x) * self.srP * srp_new
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = self.ticks[self.i_l].i_prev
srP_l = self.srp(i_l)
dY = self.L * (srP_l - self.srP)
dX_ = - dY / (self.srP * srP_l)
tmp = dX_ / (1.0 - fee)
dX_, fees = tmp, XY(tmp - dX_, 0)
self.feeGrowth += fees * (1.0 / self.L)
self.L -= self.ticks[i_l].Delta_L
self.ticks[i_l].feeGrowthOutside = self.feeGrowth - self.ticks[i_l].feeGrowthOutside
self.srP = self.srp(i_l) - 1e-16
user = XY(-dX_, -dY)
self.balance -= user
return user + self.X_to_Y(dX - dX_, fee)
def Y_to_X(self, dY, fee=None):
assert (dY >= 0)
if fee is None:
fee = self.fee
if self.L == 0:
self.i_a = self.tick(
self.srP)
return XY()
fees = XY(0, dY * fee)
srp_new = self.srP + (dY - fees.y) / self.L
i_u = self.ticks[self.i_l].i_next
tick_new = self.tick(srp_new)
if tick_new < i_u:
dX = - (dY - fees.y) / (self.srP * srp_new)
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = i_u
srP_u = self.srp(i_u)
dY_ = self.L * (srP_u - self.srP)
dX = - dY_ / (self.srP * srP_u)
tmp = dY_ / (1.0 - fee)
dY_, fees = tmp, XY(0, tmp - dY_)
self.feeGrowth += fees * (1.0 / self.L)
self.L += self.ticks[i_u].Delta_L
self.ticks[i_u].feeGrowthOutside = self.feeGrowth - self.ticks[i_u].feeGrowthOutside
self.srP = srP_u
user = XY(-dX, -dY_)
self.balance -= user
return user + self.Y_to_X(dY - dY_, fee)
| true
| true
|
790607b684cf5290dac94eb39bd4bd4620b6a450
| 13,432
|
py
|
Python
|
snorkel/candidates.py
|
silencehero/snorkel
|
afe2563a91e3d292d1a1d8a1ca6a2d39e8cd09c2
|
[
"Apache-2.0"
] | 2
|
2019-01-08T02:30:35.000Z
|
2019-03-13T07:00:34.000Z
|
snorkel/candidates.py
|
silencehero/snorkel
|
afe2563a91e3d292d1a1d8a1ca6a2d39e8cd09c2
|
[
"Apache-2.0"
] | null | null | null |
snorkel/candidates.py
|
silencehero/snorkel
|
afe2563a91e3d292d1a1d8a1ca6a2d39e8cd09c2
|
[
"Apache-2.0"
] | 2
|
2018-12-01T17:10:01.000Z
|
2018-12-28T09:16:41.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
from copy import deepcopy
from itertools import product
import re
from sqlalchemy.sql import select
from .models import Candidate, TemporarySpan, Sentence
from .udf import UDF, UDFRunner
QUEUE_COLLECT_TIMEOUT = 5
class CandidateExtractor(UDFRunner):
"""
An operator to extract Candidate objects from a Context.
:param candidate_class: The type of relation to extract, defined using
:func:`snorkel.models.candidate_subclass <snorkel.models.candidate.candidate_subclass>`
:param cspaces: one or list of :class:`CandidateSpace` objects, one for each relation argument. Defines space of
Contexts to consider
:param matchers: one or list of :class:`snorkel.matchers.Matcher` objects, one for each relation argument. Only tuples of
Contexts for which each element is accepted by the corresponding Matcher will be returned as Candidates
:param self_relations: Boolean indicating whether to extract Candidates that relate the same context.
Only applies to binary relations. Default is False.
:param nested_relations: Boolean indicating whether to extract Candidates that relate one Context with another
that contains it. Only applies to binary relations. Default is False.
:param symmetric_relations: Boolean indicating whether to extract symmetric Candidates, i.e., rel(A,B) and rel(B,A),
where A and B are Contexts. Only applies to binary relations. Default is False.
"""
def __init__(self, candidate_class, cspaces, matchers, self_relations=False, nested_relations=False, symmetric_relations=False):
super(CandidateExtractor, self).__init__(CandidateExtractorUDF,
candidate_class=candidate_class,
cspaces=cspaces,
matchers=matchers,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations)
def apply(self, xs, split=0, **kwargs):
super(CandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class CandidateExtractorUDF(UDF):
def __init__(self, candidate_class, cspaces, matchers, self_relations, nested_relations, symmetric_relations, **kwargs):
self.candidate_class = candidate_class
# Note: isinstance is the way to check types -- not type(x) in [...]!
self.candidate_spaces = cspaces if isinstance(cspaces, (list, tuple)) else [cspaces]
self.matchers = matchers if isinstance(matchers, (list, tuple)) else [matchers]
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
# Check that arity is same
if len(self.candidate_spaces) != len(self.matchers):
raise ValueError("Mismatched arity of candidate space and matcher.")
else:
self.arity = len(self.candidate_spaces)
# Make sure the candidate spaces are different so generators aren't expended!
self.candidate_spaces = list(map(deepcopy, self.candidate_spaces))
# Preallocates internal data structures
self.child_context_sets = [None] * self.arity
for i in range(self.arity):
self.child_context_sets[i] = set()
super(CandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, **kwargs):
# Generate TemporaryContexts that are children of the context using the candidate_space and filtered
# by the Matcher
for i in range(self.arity):
self.child_context_sets[i].clear()
for tc in self.matchers[i].apply(self.candidate_spaces[i].apply(context)):
tc.load_id_or_insert(self.session)
self.child_context_sets[i].add(tc)
# Generates and persists candidates
extracted = set()
candidate_args = {'split': split}
for args in product(*[enumerate(child_contexts) for child_contexts in self.child_context_sets]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations. For symmetric relations, if mentions are of the same type, maintain
# their order in the sentence.
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ((b, a) in extracted or
(self.matchers[0] == self.matchers[1] and a.char_start > b.char_start)):
continue
# Keep track of extracted
extracted.add((a,b))
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
# Checking for existence
if not clear:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
class CandidateSpace(object):
"""
Defines the **space** of candidate objects
Calling _apply(x)_ given an object _x_ returns a generator over candidates in _x_.
"""
def __init__(self):
pass
def apply(self, x):
raise NotImplementedError()
class Ngrams(CandidateSpace):
"""
Defines the space of candidates as all n-grams (n <= n_max) in a Sentence _x_,
indexing by **character offset**.
"""
def __init__(self, n_max=5, split_tokens=('-', '/')):
CandidateSpace.__init__(self)
self.n_max = n_max
self.split_rgx = r'('+r'|'.join(split_tokens)+r')' if split_tokens and len(split_tokens) > 0 else None
def apply(self, context):
# These are the character offset--**relative to the sentence start**--for each _token_
offsets = context.char_offsets
# Loop over all n-grams in **reverse** order (to facilitate longest-match semantics)
L = len(offsets)
seen = set()
for l in range(1, self.n_max+1)[::-1]:
for i in range(L-l+1):
w = context.words[i+l-1]
start = offsets[i]
end = offsets[i+l-1] + len(w) - 1
ts = TemporarySpan(char_start=start, char_end=end, sentence=context)
if ts not in seen:
seen.add(ts)
yield ts
# Check for split
# NOTE: For simplicity, we only split single tokens right now!
if l == 1 and self.split_rgx is not None and end - start > 0:
m = re.search(self.split_rgx, context.text[start-offsets[0]:end-offsets[0]+1])
if m is not None and l < self.n_max + 1:
ts1 = TemporarySpan(char_start=start, char_end=start + m.start(1) - 1, sentence=context)
if ts1 not in seen:
seen.add(ts1)
yield ts
ts2 = TemporarySpan(char_start=start + m.end(1), char_end=end, sentence=context)
if ts2 not in seen:
seen.add(ts2)
yield ts2
class PretaggedCandidateExtractor(UDFRunner):
"""UDFRunner for PretaggedCandidateExtractorUDF"""
def __init__(self, candidate_class, entity_types, self_relations=False,
nested_relations=False, symmetric_relations=True, entity_sep='~@~'):
super(PretaggedCandidateExtractor, self).__init__(
PretaggedCandidateExtractorUDF, candidate_class=candidate_class,
entity_types=entity_types, self_relations=self_relations,
nested_relations=nested_relations, entity_sep=entity_sep,
symmetric_relations=symmetric_relations,
)
def apply(self, xs, split=0, **kwargs):
super(PretaggedCandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class PretaggedCandidateExtractorUDF(UDF):
"""
An extractor for Sentences with entities pre-tagged, and stored in the entity_types and entity_cids
fields.
"""
def __init__(self, candidate_class, entity_types, self_relations=False, nested_relations=False, symmetric_relations=False, entity_sep='~@~', **kwargs):
self.candidate_class = candidate_class
self.entity_types = entity_types
self.arity = len(entity_types)
self.self_relations = self_relations
self.nested_relations = nested_relations
self.symmetric_relations = symmetric_relations
self.entity_sep = entity_sep
super(PretaggedCandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, check_for_existing=True, **kwargs):
"""Extract Candidates from a Context"""
# For now, just handle Sentences
if not isinstance(context, Sentence):
raise NotImplementedError("%s is currently only implemented for Sentence contexts." % self.__name__)
# Do a first pass to collect all mentions by entity type / cid
entity_idxs = dict((et, defaultdict(list)) for et in set(self.entity_types))
L = len(context.words)
for i in range(L):
if context.entity_types[i] is not None:
ets = context.entity_types[i].split(self.entity_sep)
cids = context.entity_cids[i].split(self.entity_sep)
for et, cid in zip(ets, cids):
if et in entity_idxs:
entity_idxs[et][cid].append(i)
# Form entity Spans
entity_spans = defaultdict(list)
entity_cids = {}
for et, cid_idxs in iteritems(entity_idxs):
for cid, idxs in iteritems(entity_idxs[et]):
while len(idxs) > 0:
i = idxs.pop(0)
char_start = context.char_offsets[i]
char_end = char_start + len(context.words[i]) - 1
while len(idxs) > 0 and idxs[0] == i + 1:
i = idxs.pop(0)
char_end = context.char_offsets[i] + len(context.words[i]) - 1
# Insert / load temporary span, also store map to entity CID
tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context)
tc.load_id_or_insert(self.session)
entity_cids[tc.id] = cid
entity_spans[et].append(tc)
# Generates and persists candidates
candidate_args = {'split' : split}
for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id]
# Checking for existence
if check_for_existing:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
| 46.638889
| 155
| 0.602814
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
from copy import deepcopy
from itertools import product
import re
from sqlalchemy.sql import select
from .models import Candidate, TemporarySpan, Sentence
from .udf import UDF, UDFRunner
QUEUE_COLLECT_TIMEOUT = 5
class CandidateExtractor(UDFRunner):
def __init__(self, candidate_class, cspaces, matchers, self_relations=False, nested_relations=False, symmetric_relations=False):
super(CandidateExtractor, self).__init__(CandidateExtractorUDF,
candidate_class=candidate_class,
cspaces=cspaces,
matchers=matchers,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations)
def apply(self, xs, split=0, **kwargs):
super(CandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class CandidateExtractorUDF(UDF):
def __init__(self, candidate_class, cspaces, matchers, self_relations, nested_relations, symmetric_relations, **kwargs):
self.candidate_class = candidate_class
self.candidate_spaces = cspaces if isinstance(cspaces, (list, tuple)) else [cspaces]
self.matchers = matchers if isinstance(matchers, (list, tuple)) else [matchers]
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
if len(self.candidate_spaces) != len(self.matchers):
raise ValueError("Mismatched arity of candidate space and matcher.")
else:
self.arity = len(self.candidate_spaces)
self.candidate_spaces = list(map(deepcopy, self.candidate_spaces))
# Preallocates internal data structures
self.child_context_sets = [None] * self.arity
for i in range(self.arity):
self.child_context_sets[i] = set()
super(CandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, **kwargs):
# Generate TemporaryContexts that are children of the context using the candidate_space and filtered
# by the Matcher
for i in range(self.arity):
self.child_context_sets[i].clear()
for tc in self.matchers[i].apply(self.candidate_spaces[i].apply(context)):
tc.load_id_or_insert(self.session)
self.child_context_sets[i].add(tc)
# Generates and persists candidates
extracted = set()
candidate_args = {'split': split}
for args in product(*[enumerate(child_contexts) for child_contexts in self.child_context_sets]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations. For symmetric relations, if mentions are of the same type, maintain
# their order in the sentence.
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ((b, a) in extracted or
(self.matchers[0] == self.matchers[1] and a.char_start > b.char_start)):
continue
# Keep track of extracted
extracted.add((a,b))
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
# Checking for existence
if not clear:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
class CandidateSpace(object):
def __init__(self):
pass
def apply(self, x):
raise NotImplementedError()
class Ngrams(CandidateSpace):
def __init__(self, n_max=5, split_tokens=('-', '/')):
CandidateSpace.__init__(self)
self.n_max = n_max
self.split_rgx = r'('+r'|'.join(split_tokens)+r')' if split_tokens and len(split_tokens) > 0 else None
def apply(self, context):
# These are the character offset--**relative to the sentence start**--for each _token_
offsets = context.char_offsets
# Loop over all n-grams in **reverse** order (to facilitate longest-match semantics)
L = len(offsets)
seen = set()
for l in range(1, self.n_max+1)[::-1]:
for i in range(L-l+1):
w = context.words[i+l-1]
start = offsets[i]
end = offsets[i+l-1] + len(w) - 1
ts = TemporarySpan(char_start=start, char_end=end, sentence=context)
if ts not in seen:
seen.add(ts)
yield ts
# Check for split
# NOTE: For simplicity, we only split single tokens right now!
if l == 1 and self.split_rgx is not None and end - start > 0:
m = re.search(self.split_rgx, context.text[start-offsets[0]:end-offsets[0]+1])
if m is not None and l < self.n_max + 1:
ts1 = TemporarySpan(char_start=start, char_end=start + m.start(1) - 1, sentence=context)
if ts1 not in seen:
seen.add(ts1)
yield ts
ts2 = TemporarySpan(char_start=start + m.end(1), char_end=end, sentence=context)
if ts2 not in seen:
seen.add(ts2)
yield ts2
class PretaggedCandidateExtractor(UDFRunner):
def __init__(self, candidate_class, entity_types, self_relations=False,
nested_relations=False, symmetric_relations=True, entity_sep='~@~'):
super(PretaggedCandidateExtractor, self).__init__(
PretaggedCandidateExtractorUDF, candidate_class=candidate_class,
entity_types=entity_types, self_relations=self_relations,
nested_relations=nested_relations, entity_sep=entity_sep,
symmetric_relations=symmetric_relations,
)
def apply(self, xs, split=0, **kwargs):
super(PretaggedCandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class PretaggedCandidateExtractorUDF(UDF):
def __init__(self, candidate_class, entity_types, self_relations=False, nested_relations=False, symmetric_relations=False, entity_sep='~@~', **kwargs):
self.candidate_class = candidate_class
self.entity_types = entity_types
self.arity = len(entity_types)
self.self_relations = self_relations
self.nested_relations = nested_relations
self.symmetric_relations = symmetric_relations
self.entity_sep = entity_sep
super(PretaggedCandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, check_for_existing=True, **kwargs):
# For now, just handle Sentences
if not isinstance(context, Sentence):
raise NotImplementedError("%s is currently only implemented for Sentence contexts." % self.__name__)
# Do a first pass to collect all mentions by entity type / cid
entity_idxs = dict((et, defaultdict(list)) for et in set(self.entity_types))
L = len(context.words)
for i in range(L):
if context.entity_types[i] is not None:
ets = context.entity_types[i].split(self.entity_sep)
cids = context.entity_cids[i].split(self.entity_sep)
for et, cid in zip(ets, cids):
if et in entity_idxs:
entity_idxs[et][cid].append(i)
# Form entity Spans
entity_spans = defaultdict(list)
entity_cids = {}
for et, cid_idxs in iteritems(entity_idxs):
for cid, idxs in iteritems(entity_idxs[et]):
while len(idxs) > 0:
i = idxs.pop(0)
char_start = context.char_offsets[i]
char_end = char_start + len(context.words[i]) - 1
while len(idxs) > 0 and idxs[0] == i + 1:
i = idxs.pop(0)
char_end = context.char_offsets[i] + len(context.words[i]) - 1
# Insert / load temporary span, also store map to entity CID
tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context)
tc.load_id_or_insert(self.session)
entity_cids[tc.id] = cid
entity_spans[et].append(tc)
# Generates and persists candidates
candidate_args = {'split' : split}
for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id]
# Checking for existence
if check_for_existing:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
| true
| true
|
790607d45455f8cdf20fe8f993a75c221959ed7e
| 5,944
|
py
|
Python
|
asposewordscloud/models/error_details.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/error_details.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/error_details.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="error_details.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
import json
class ErrorDetails(object):
"""The error details.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_date_time': 'datetime',
'request_id': 'str'
}
attribute_map = {
'error_date_time': 'ErrorDateTime',
'request_id': 'RequestId'
}
def __init__(self, error_date_time=None, request_id=None): # noqa: E501
"""ErrorDetails - a model defined in Swagger""" # noqa: E501
self._error_date_time = None
self._request_id = None
self.discriminator = None
if error_date_time is not None:
self.error_date_time = error_date_time
if request_id is not None:
self.request_id = request_id
@property
def error_date_time(self):
"""Gets the error_date_time of this ErrorDetails. # noqa: E501
Error datetime. # noqa: E501
:return: The error_date_time of this ErrorDetails. # noqa: E501
:rtype: datetime
"""
return self._error_date_time
@error_date_time.setter
def error_date_time(self, error_date_time):
"""Sets the error_date_time of this ErrorDetails.
Error datetime. # noqa: E501
:param error_date_time: The error_date_time of this ErrorDetails. # noqa: E501
:type: datetime
"""
self._error_date_time = error_date_time
@property
def request_id(self):
"""Gets the request_id of this ErrorDetails. # noqa: E501
The request id. # noqa: E501
:return: The request_id of this ErrorDetails. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ErrorDetails.
The request id. # noqa: E501
:param request_id: The request_id of this ErrorDetails. # noqa: E501
:type: str
"""
self._request_id = request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.772727
| 87
| 0.580585
|
import pprint
import re
import six
import json
class ErrorDetails(object):
swagger_types = {
'error_date_time': 'datetime',
'request_id': 'str'
}
attribute_map = {
'error_date_time': 'ErrorDateTime',
'request_id': 'RequestId'
}
def __init__(self, error_date_time=None, request_id=None):
self._error_date_time = None
self._request_id = None
self.discriminator = None
if error_date_time is not None:
self.error_date_time = error_date_time
if request_id is not None:
self.request_id = request_id
@property
def error_date_time(self):
return self._error_date_time
@error_date_time.setter
def error_date_time(self, error_date_time):
self._error_date_time = error_date_time
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, request_id):
self._request_id = request_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ErrorDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7906091591a1d85069c2da57aabf2e329ff7c1d8
| 2,161
|
py
|
Python
|
src/main/python/ecir2019_ccrf/generate_runs.py
|
kasys-lab/anserini
|
a31d386e23d399da8d2841d45b9e500f71fe1c9b
|
[
"Apache-2.0"
] | 626
|
2019-04-22T03:34:05.000Z
|
2022-03-31T03:56:05.000Z
|
src/main/python/ecir2019_ccrf/generate_runs.py
|
kasys-lab/anserini
|
a31d386e23d399da8d2841d45b9e500f71fe1c9b
|
[
"Apache-2.0"
] | 1,001
|
2019-04-22T12:35:59.000Z
|
2022-03-31T01:47:49.000Z
|
src/main/python/ecir2019_ccrf/generate_runs.py
|
kasys-lab/anserini
|
a31d386e23d399da8d2841d45b9e500f71fe1c9b
|
[
"Apache-2.0"
] | 290
|
2019-04-21T22:34:34.000Z
|
2022-03-27T16:59:13.000Z
|
import argparse
import logging
import json
import os
def submission(origin_file, topics, runtag, output_file):
with open(output_file, 'a') as fout, open(origin_file, 'r') as fin:
for line in fin:
data = line.strip().split(' ')
if data[0] in topics:
continue
data[-1] = runtag
fout.write(' '.join(data) + '\n')
def ensemble(folder, ratio, clf_list, runtag, output):
ensemble_dict = {}
for clf in clf_list:
with open('{}/{}/rerank_{}.txt'.format(folder, clf, ratio), 'r') as f:
for line in f:
data = line.split()
topic, docid, score = data[0], data[2], float(data[4])
if topic not in ensemble_dict:
ensemble_dict[topic] = {}
if docid not in ensemble_dict[topic]:
ensemble_dict[topic][docid] = 0
ensemble_dict[topic][docid] += score
with open(output, 'w') as f:
for topic in ensemble_dict:
for rank, (docid, score) in enumerate(sorted(ensemble_dict[topic].items(),
key=lambda x: -x[1])):
f.write('{} Q0 {} {} {} {}\n'.format(topic, docid, rank + 1, score, runtag))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, help='config file', required=True)
args = parser.parse_args()
config_file = args.config
# Load configuration
with open(config_file) as f:
config = json.load(f)
model_directory = os.path.join(config['working_directory'], 'models')
assert os.path.isdir(model_directory)
for run in config['runs']:
runtag = run['runtag']
weight = run['weight']
output = os.path.join(config['working_directory'], run['output'])
logging.info(f'Preparing run for {runtag}')
ensemble(model_directory, weight, run['classifiers'], runtag, output)
submission(config['target']['run'], config['topics'], runtag, output)
| 36.016667
| 92
| 0.574734
|
import argparse
import logging
import json
import os
def submission(origin_file, topics, runtag, output_file):
with open(output_file, 'a') as fout, open(origin_file, 'r') as fin:
for line in fin:
data = line.strip().split(' ')
if data[0] in topics:
continue
data[-1] = runtag
fout.write(' '.join(data) + '\n')
def ensemble(folder, ratio, clf_list, runtag, output):
ensemble_dict = {}
for clf in clf_list:
with open('{}/{}/rerank_{}.txt'.format(folder, clf, ratio), 'r') as f:
for line in f:
data = line.split()
topic, docid, score = data[0], data[2], float(data[4])
if topic not in ensemble_dict:
ensemble_dict[topic] = {}
if docid not in ensemble_dict[topic]:
ensemble_dict[topic][docid] = 0
ensemble_dict[topic][docid] += score
with open(output, 'w') as f:
for topic in ensemble_dict:
for rank, (docid, score) in enumerate(sorted(ensemble_dict[topic].items(),
key=lambda x: -x[1])):
f.write('{} Q0 {} {} {} {}\n'.format(topic, docid, rank + 1, score, runtag))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, help='config file', required=True)
args = parser.parse_args()
config_file = args.config
with open(config_file) as f:
config = json.load(f)
model_directory = os.path.join(config['working_directory'], 'models')
assert os.path.isdir(model_directory)
for run in config['runs']:
runtag = run['runtag']
weight = run['weight']
output = os.path.join(config['working_directory'], run['output'])
logging.info(f'Preparing run for {runtag}')
ensemble(model_directory, weight, run['classifiers'], runtag, output)
submission(config['target']['run'], config['topics'], runtag, output)
| true
| true
|
79060a0ace72893ccbef8058f2e2b755b342b47a
| 694
|
py
|
Python
|
src/pretix/base/migrations/0105_auto_20190112_1512.py
|
fabm3n/pretix
|
520fb620888d5c434665a6a4a33cb2ab22dd42c7
|
[
"Apache-2.0"
] | 1,248
|
2015-04-24T13:32:06.000Z
|
2022-03-29T07:01:36.000Z
|
src/pretix/base/migrations/0105_auto_20190112_1512.py
|
fabm3n/pretix
|
520fb620888d5c434665a6a4a33cb2ab22dd42c7
|
[
"Apache-2.0"
] | 2,113
|
2015-02-18T18:58:16.000Z
|
2022-03-31T11:12:32.000Z
|
src/pretix/base/migrations/0105_auto_20190112_1512.py
|
fabm3n/pretix
|
520fb620888d5c434665a6a4a33cb2ab22dd42c7
|
[
"Apache-2.0"
] | 453
|
2015-05-13T09:29:06.000Z
|
2022-03-24T13:39:16.000Z
|
# Generated by Django 2.1 on 2019-01-12 15:12
import django.db.models.deletion
from django.db import migrations, models
import pretix.base.models.fields
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0104_auto_20181114_1526'),
]
operations = [
migrations.AddField(
model_name='invoiceaddress',
name='beneficiary',
field=models.TextField(blank=True, verbose_name='Beneficiary'),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_beneficiary',
field=models.TextField(blank=True, null=True, verbose_name='Beneficiary'),
),
]
| 25.703704
| 86
| 0.635447
|
import django.db.models.deletion
from django.db import migrations, models
import pretix.base.models.fields
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0104_auto_20181114_1526'),
]
operations = [
migrations.AddField(
model_name='invoiceaddress',
name='beneficiary',
field=models.TextField(blank=True, verbose_name='Beneficiary'),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_beneficiary',
field=models.TextField(blank=True, null=True, verbose_name='Beneficiary'),
),
]
| true
| true
|
79060aa623d051989c5194e74c7e2b5719228d8d
| 14,028
|
py
|
Python
|
scanpy/_settings.py
|
gamazeps/scanpy
|
a1949935dc4b45b64ddad1a53c2a7679395cf2ed
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/_settings.py
|
gamazeps/scanpy
|
a1949935dc4b45b64ddad1a53c2a7679395cf2ed
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/_settings.py
|
gamazeps/scanpy
|
a1949935dc4b45b64ddad1a53c2a7679395cf2ed
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import sys
from enum import IntEnum
from pathlib import Path
from time import time
from logging import getLevelName
from typing import Tuple, Union, Any, List, Iterable, TextIO, Optional
from . import logging
from .logging import _set_log_level, _set_log_file, RootLogger
_VERBOSITY_TO_LOGLEVEL = {
'error': 'ERROR',
'warning': 'WARNING',
'info': 'INFO',
'hint': 'HINT',
'debug': 'DEBUG',
}
# Python 3.7 ensures iteration order
for v, level in enumerate(list(_VERBOSITY_TO_LOGLEVEL.values())):
_VERBOSITY_TO_LOGLEVEL[v] = level
class Verbosity(IntEnum):
error = 0
warn = 1
info = 2
hint = 3
debug = 4
@property
def level(self) -> int:
# getLevelName(str) returns the int level…
return getLevelName(_VERBOSITY_TO_LOGLEVEL[self])
def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]):
if isinstance(var, types):
return
if isinstance(types, type):
possible_types_str = types.__name__
else:
type_names = [t.__name__ for t in types]
possible_types_str = "{} or {}".format(
", ".join(type_names[:-1]), type_names[-1]
)
raise TypeError(f"{varname} must be of type {possible_types_str}")
class ScanpyConfig:
"""Config manager for scanpy.
"""
def __init__(
self,
*,
verbosity: str = "warning",
plot_suffix: str = "",
file_format_data: str = "h5ad",
file_format_figs: str = "pdf",
autosave: bool = False,
autoshow: bool = True,
writedir: Union[str, Path] = "./write/",
cachedir: Union[str, Path] = "./cache/",
datasetdir: Union[str, Path] = "./data/",
figdir: Union[str, Path] = "./figures/",
max_memory=15,
n_jobs=1,
logfile: Union[str, Path, None] = None,
categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"),
_frameon: bool = True,
_vector_friendly: bool = False,
_low_resolution_warning: bool = True,
):
# logging
self._root_logger = RootLogger(logging.INFO) # level will be replaced
self.logfile = logfile
self.verbosity = verbosity
# rest
self.plot_suffix = plot_suffix
self.file_format_data = file_format_data
self.file_format_figs = file_format_figs
self.autosave = autosave
self.autoshow = autoshow
self.writedir = writedir
self.cachedir = cachedir
self.datasetdir = datasetdir
self.figdir = figdir
self.max_memory = max_memory
self.n_jobs = n_jobs
self.categories_to_ignore = categories_to_ignore
self._frameon = _frameon
"""bool: See set_figure_params."""
self._vector_friendly = _vector_friendly
"""Set to true if you want to include pngs in svgs and pdfs."""
self._low_resolution_warning = _low_resolution_warning
"""Print warning when saving a figure with low resolution."""
self._start = time()
"""Time when the settings module is first imported."""
self._previous_time = self._start
"""Variable for timing program parts."""
self._previous_memory_usage = -1
"""Stores the previous memory usage."""
@property
def verbosity(self) -> Verbosity:
"""
Verbosity level (default `warning`)
Level 0: only show 'error' messages.
Level 1: also show 'warning' messages.
Level 2: also show 'info' messages.
Level 3: also show 'hint' messages.
Level 4: also show very detailed progress for 'debug'ging.
"""
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity: Union[Verbosity, int, str]):
verbosity_str_options = [
v for v in _VERBOSITY_TO_LOGLEVEL
if isinstance(v, str)
]
if isinstance(verbosity, Verbosity):
self._verbosity = verbosity
elif isinstance(verbosity, int):
self._verbosity = Verbosity(verbosity)
elif isinstance(verbosity, str):
verbosity = verbosity.lower()
if verbosity not in verbosity_str_options:
raise ValueError(
f"Cannot set verbosity to {verbosity}. "
f"Accepted string values are: {verbosity_str_options}"
)
else:
self._verbosity = Verbosity(verbosity_str_options.index(verbosity))
else:
_type_check(verbosity, "verbosity", (str, int))
_set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity])
@property
def plot_suffix(self) -> str:
"""Global suffix that is appended to figure filenames.
"""
return self._plot_suffix
@plot_suffix.setter
def plot_suffix(self, plot_suffix: str):
_type_check(plot_suffix, "plot_suffix", str)
self._plot_suffix = plot_suffix
@property
def file_format_data(self) -> str:
"""File format for saving AnnData objects.
Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad'
(hdf5) for lossless saving.
"""
return self._file_format_data
@file_format_data.setter
def file_format_data(self, file_format: str):
_type_check(file_format, "file_format_data", str)
file_format_options = {"txt", "csv", "h5ad"}
if file_format not in file_format_options:
raise ValueError(
f"Cannot set file_format_data to {file_format}. "
f"Must be one of {file_format_options}"
)
self._file_format_data = file_format
@property
def file_format_figs(self) -> str:
"""File format for saving figures.
For example 'png', 'pdf' or 'svg'. Many other formats work as well (see
`matplotlib.pyplot.savefig`).
"""
return self._file_format_figs
@file_format_figs.setter
def file_format_figs(self, figure_format: str):
_type_check(figure_format, "figure_format_data", str)
self._file_format_figs = figure_format
@property
def autosave(self) -> bool:
"""\
Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`).
Do not show plots/figures interactively.
"""
return self._autosave
@autosave.setter
def autosave(self, autosave: bool):
_type_check(autosave, "autosave", bool)
self._autosave = autosave
@property
def autoshow(self) -> bool:
"""\
Automatically show figures if `autosave == False` (default `True`).
There is no need to call the matplotlib pl.show() in this case.
"""
return self._autoshow
@autoshow.setter
def autoshow(self, autoshow: bool):
_type_check(autoshow, "autoshow", bool)
self._autoshow = autoshow
@property
def writedir(self) -> Path:
"""\
Directory where the function scanpy.write writes to by default.
"""
return self._writedir
@writedir.setter
def writedir(self, writedir: Union[str, Path]):
_type_check(writedir, "writedir", (str, Path))
self._writedir = Path(writedir)
@property
def cachedir(self) -> Path:
"""\
Directory for cache files (default `'./cache/'`).
"""
return self._cachedir
@cachedir.setter
def cachedir(self, cachedir: Union[str, Path]):
_type_check(cachedir, "cachedir", (str, Path))
self._cachedir = Path(cachedir)
@property
def datasetdir(self) -> Path:
"""\
Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).
"""
return self._datasetdir
@datasetdir.setter
def datasetdir(self, datasetdir: Union[str, Path]):
_type_check(datasetdir, "datasetdir", (str, Path))
self._datasetdir = Path(datasetdir).resolve()
@property
def figdir(self) -> Path:
"""\
Directory for saving figures (default `'./figures/'`).
"""
return self._figdir
@figdir.setter
def figdir(self, figdir: Union[str, Path]):
_type_check(figdir, "figdir", (str, Path))
self._figdir = Path(figdir)
@property
def max_memory(self) -> Union[int, float]:
"""\
Maximal memory usage in Gigabyte.
Is currently not well respected....
"""
return self._max_memory
@max_memory.setter
def max_memory(self, max_memory: Union[int, float]):
_type_check(max_memory, "max_memory", (int, float))
self._max_memory = max_memory
@property
def n_jobs(self) -> int:
"""\
Default number of jobs/ CPUs to use for parallel computing.
"""
return self._n_jobs
@n_jobs.setter
def n_jobs(self, n_jobs: int):
_type_check(n_jobs, "n_jobs", int)
self._n_jobs = n_jobs
@property
def logpath(self) -> Optional[Path]:
"""\
The file path `logfile` was set to.
"""
return self._logpath
@logpath.setter
def logpath(self, logpath: Union[str, Path, None]):
_type_check(logpath, "logfile", (str, Path))
# set via “file object” branch of logfile.setter
self.logfile = Path(logpath).open('a')
self._logpath = Path(logpath)
@property
def logfile(self) -> TextIO:
"""\
The open file to write logs to.
Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one.
The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks
and to :obj:`sys.stderr` otherwise.
For backwards compatibility, setting it to `''` behaves like setting it to `None`.
"""
return self._logfile
@logfile.setter
def logfile(self, logfile: Union[str, Path, TextIO, None]):
if not hasattr(logfile, 'write') and logfile:
self.logpath = logfile
else: # file object
if not logfile: # None or ''
logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr
self._logfile = logfile
self._logpath = None
_set_log_file(self)
@property
def categories_to_ignore(self) -> List[str]:
"""\
Categories that are omitted in plotting etc.
"""
return self._categories_to_ignore
@categories_to_ignore.setter
def categories_to_ignore(self, categories_to_ignore: Iterable[str]):
categories_to_ignore = list(categories_to_ignore)
for i, cat in enumerate(categories_to_ignore):
_type_check(cat, f"categories_to_ignore[{i}]", str)
self._categories_to_ignore = categories_to_ignore
# --------------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------------
def set_figure_params(
self,
scanpy: bool = True,
dpi: int = 80,
dpi_save: int = 150,
frameon: bool = True,
vector_friendly: bool = True,
fontsize: int = 14,
color_map: Optional[str] = None,
format: Union[str, Iterable[str]] = "pdf",
transparent: bool = False,
ipython_format: str = "png2x",
):
"""\
Set resolution/size, styling and format of figures.
Parameters
----------
scanpy
Init default values for :obj:`matplotlib.rcParams` suited for Scanpy.
dpi
Resolution of rendered figures - this influences the size of figures in notebooks.
dpi_save
Resolution of saved figures. This should typically be higher to achieve
publication quality.
frameon
Add frames and axes labels to scatter plots.
vector_friendly
Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.
fontsize
Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.
color_map
Convenience method for setting the default color map. Ignored if `scanpy=False`.
format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`)
This sets the default format for saving figures: `file_format_figs`.
transparent
Save figures with transparent back ground. Sets
`rcParams['savefig.transparent']`.
ipython_format
Only concerns the notebook/IPython environment; see
:func:`~IPython.display.set_matplotlib_formats` for details.
"""
try:
import IPython
if isinstance(ipython_format, str):
ipython_format = [ipython_format]
IPython.display.set_matplotlib_formats(*ipython_format)
except Exception:
pass
from matplotlib import rcParams
self._vector_friendly = vector_friendly
self.file_format_figs = format
if dpi is not None:
rcParams["figure.dpi"] = dpi
if dpi_save is not None:
rcParams["savefig.dpi"] = dpi_save
if transparent is not None:
rcParams["savefig.transparent"] = transparent
if scanpy:
from .plotting._rcmod import set_rcParams_scanpy
set_rcParams_scanpy(fontsize=fontsize, color_map=color_map)
self._frameon = frameon
@staticmethod
def _is_run_from_ipython():
"""Determines whether run from Ipython.
Only affects progress bars.
"""
try:
__IPYTHON__
return True
except NameError:
return False
def __str__(self) -> str:
return '\n'.join(
f'{k} = {v!r}'
for k, v in inspect.getmembers(self)
if not k.startswith("_") and not k == 'getdoc'
)
settings = ScanpyConfig()
| 32.397229
| 102
| 0.597662
|
import inspect
import sys
from enum import IntEnum
from pathlib import Path
from time import time
from logging import getLevelName
from typing import Tuple, Union, Any, List, Iterable, TextIO, Optional
from . import logging
from .logging import _set_log_level, _set_log_file, RootLogger
_VERBOSITY_TO_LOGLEVEL = {
'error': 'ERROR',
'warning': 'WARNING',
'info': 'INFO',
'hint': 'HINT',
'debug': 'DEBUG',
}
for v, level in enumerate(list(_VERBOSITY_TO_LOGLEVEL.values())):
_VERBOSITY_TO_LOGLEVEL[v] = level
class Verbosity(IntEnum):
error = 0
warn = 1
info = 2
hint = 3
debug = 4
@property
def level(self) -> int:
return getLevelName(_VERBOSITY_TO_LOGLEVEL[self])
def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]):
if isinstance(var, types):
return
if isinstance(types, type):
possible_types_str = types.__name__
else:
type_names = [t.__name__ for t in types]
possible_types_str = "{} or {}".format(
", ".join(type_names[:-1]), type_names[-1]
)
raise TypeError(f"{varname} must be of type {possible_types_str}")
class ScanpyConfig:
def __init__(
self,
*,
verbosity: str = "warning",
plot_suffix: str = "",
file_format_data: str = "h5ad",
file_format_figs: str = "pdf",
autosave: bool = False,
autoshow: bool = True,
writedir: Union[str, Path] = "./write/",
cachedir: Union[str, Path] = "./cache/",
datasetdir: Union[str, Path] = "./data/",
figdir: Union[str, Path] = "./figures/",
max_memory=15,
n_jobs=1,
logfile: Union[str, Path, None] = None,
categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"),
_frameon: bool = True,
_vector_friendly: bool = False,
_low_resolution_warning: bool = True,
):
self._root_logger = RootLogger(logging.INFO)
self.logfile = logfile
self.verbosity = verbosity
self.plot_suffix = plot_suffix
self.file_format_data = file_format_data
self.file_format_figs = file_format_figs
self.autosave = autosave
self.autoshow = autoshow
self.writedir = writedir
self.cachedir = cachedir
self.datasetdir = datasetdir
self.figdir = figdir
self.max_memory = max_memory
self.n_jobs = n_jobs
self.categories_to_ignore = categories_to_ignore
self._frameon = _frameon
self._vector_friendly = _vector_friendly
self._low_resolution_warning = _low_resolution_warning
self._start = time()
self._previous_time = self._start
self._previous_memory_usage = -1
@property
def verbosity(self) -> Verbosity:
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity: Union[Verbosity, int, str]):
verbosity_str_options = [
v for v in _VERBOSITY_TO_LOGLEVEL
if isinstance(v, str)
]
if isinstance(verbosity, Verbosity):
self._verbosity = verbosity
elif isinstance(verbosity, int):
self._verbosity = Verbosity(verbosity)
elif isinstance(verbosity, str):
verbosity = verbosity.lower()
if verbosity not in verbosity_str_options:
raise ValueError(
f"Cannot set verbosity to {verbosity}. "
f"Accepted string values are: {verbosity_str_options}"
)
else:
self._verbosity = Verbosity(verbosity_str_options.index(verbosity))
else:
_type_check(verbosity, "verbosity", (str, int))
_set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity])
@property
def plot_suffix(self) -> str:
return self._plot_suffix
@plot_suffix.setter
def plot_suffix(self, plot_suffix: str):
_type_check(plot_suffix, "plot_suffix", str)
self._plot_suffix = plot_suffix
@property
def file_format_data(self) -> str:
return self._file_format_data
@file_format_data.setter
def file_format_data(self, file_format: str):
_type_check(file_format, "file_format_data", str)
file_format_options = {"txt", "csv", "h5ad"}
if file_format not in file_format_options:
raise ValueError(
f"Cannot set file_format_data to {file_format}. "
f"Must be one of {file_format_options}"
)
self._file_format_data = file_format
@property
def file_format_figs(self) -> str:
return self._file_format_figs
@file_format_figs.setter
def file_format_figs(self, figure_format: str):
_type_check(figure_format, "figure_format_data", str)
self._file_format_figs = figure_format
@property
def autosave(self) -> bool:
return self._autosave
@autosave.setter
def autosave(self, autosave: bool):
_type_check(autosave, "autosave", bool)
self._autosave = autosave
@property
def autoshow(self) -> bool:
return self._autoshow
@autoshow.setter
def autoshow(self, autoshow: bool):
_type_check(autoshow, "autoshow", bool)
self._autoshow = autoshow
@property
def writedir(self) -> Path:
return self._writedir
@writedir.setter
def writedir(self, writedir: Union[str, Path]):
_type_check(writedir, "writedir", (str, Path))
self._writedir = Path(writedir)
@property
def cachedir(self) -> Path:
return self._cachedir
@cachedir.setter
def cachedir(self, cachedir: Union[str, Path]):
_type_check(cachedir, "cachedir", (str, Path))
self._cachedir = Path(cachedir)
@property
def datasetdir(self) -> Path:
return self._datasetdir
@datasetdir.setter
def datasetdir(self, datasetdir: Union[str, Path]):
_type_check(datasetdir, "datasetdir", (str, Path))
self._datasetdir = Path(datasetdir).resolve()
@property
def figdir(self) -> Path:
return self._figdir
@figdir.setter
def figdir(self, figdir: Union[str, Path]):
_type_check(figdir, "figdir", (str, Path))
self._figdir = Path(figdir)
@property
def max_memory(self) -> Union[int, float]:
return self._max_memory
@max_memory.setter
def max_memory(self, max_memory: Union[int, float]):
_type_check(max_memory, "max_memory", (int, float))
self._max_memory = max_memory
@property
def n_jobs(self) -> int:
return self._n_jobs
@n_jobs.setter
def n_jobs(self, n_jobs: int):
_type_check(n_jobs, "n_jobs", int)
self._n_jobs = n_jobs
@property
def logpath(self) -> Optional[Path]:
return self._logpath
@logpath.setter
def logpath(self, logpath: Union[str, Path, None]):
_type_check(logpath, "logfile", (str, Path))
self.logfile = Path(logpath).open('a')
self._logpath = Path(logpath)
@property
def logfile(self) -> TextIO:
return self._logfile
@logfile.setter
def logfile(self, logfile: Union[str, Path, TextIO, None]):
if not hasattr(logfile, 'write') and logfile:
self.logpath = logfile
else:
if not logfile:
logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr
self._logfile = logfile
self._logpath = None
_set_log_file(self)
@property
def categories_to_ignore(self) -> List[str]:
return self._categories_to_ignore
@categories_to_ignore.setter
def categories_to_ignore(self, categories_to_ignore: Iterable[str]):
categories_to_ignore = list(categories_to_ignore)
for i, cat in enumerate(categories_to_ignore):
_type_check(cat, f"categories_to_ignore[{i}]", str)
self._categories_to_ignore = categories_to_ignore
def set_figure_params(
self,
scanpy: bool = True,
dpi: int = 80,
dpi_save: int = 150,
frameon: bool = True,
vector_friendly: bool = True,
fontsize: int = 14,
color_map: Optional[str] = None,
format: Union[str, Iterable[str]] = "pdf",
transparent: bool = False,
ipython_format: str = "png2x",
):
try:
import IPython
if isinstance(ipython_format, str):
ipython_format = [ipython_format]
IPython.display.set_matplotlib_formats(*ipython_format)
except Exception:
pass
from matplotlib import rcParams
self._vector_friendly = vector_friendly
self.file_format_figs = format
if dpi is not None:
rcParams["figure.dpi"] = dpi
if dpi_save is not None:
rcParams["savefig.dpi"] = dpi_save
if transparent is not None:
rcParams["savefig.transparent"] = transparent
if scanpy:
from .plotting._rcmod import set_rcParams_scanpy
set_rcParams_scanpy(fontsize=fontsize, color_map=color_map)
self._frameon = frameon
@staticmethod
def _is_run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
def __str__(self) -> str:
return '\n'.join(
f'{k} = {v!r}'
for k, v in inspect.getmembers(self)
if not k.startswith("_") and not k == 'getdoc'
)
settings = ScanpyConfig()
| true
| true
|
79060b9bb3f6d7260b99d143dbf8615f9dd467fe
| 36,469
|
py
|
Python
|
pyiron_atomistics/vasp/outcar.py
|
pyiron/pyiron_atomistic
|
0cd4c910806f44dfc829ddd642e323efcf7e36d5
|
[
"BSD-3-Clause"
] | 14
|
2021-01-18T10:03:56.000Z
|
2022-03-01T20:59:35.000Z
|
pyiron_atomistics/vasp/outcar.py
|
pyiron/pyiron_atomistics
|
0cd4c910806f44dfc829ddd642e323efcf7e36d5
|
[
"BSD-3-Clause"
] | 569
|
2018-04-12T06:37:14.000Z
|
2022-03-31T18:06:27.000Z
|
pyiron_atomistics/vasp/outcar.py
|
pyiron/pyiron_atomistic
|
0cd4c910806f44dfc829ddd642e323efcf7e36d5
|
[
"BSD-3-Clause"
] | 6
|
2018-10-23T09:48:48.000Z
|
2022-02-13T12:13:00.000Z
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from collections import OrderedDict
import numpy as np
import warnings
import scipy.constants
import re
__author__ = "Sudarsan Surendralal"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Sudarsan Surendralal"
__email__ = "surendralal@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2017"
KBAR_TO_EVA = (
scipy.constants.physical_constants["joule-electron volt relationship"][0] / 1e22
)
class Outcar(object):
"""
This module is used to parse VASP OUTCAR files.
Attributes:
parse_dict (dict): A dictionary with all the useful quantities parsed from an OUTCAR file after from_file() is
executed
"""
def __init__(self):
self.parse_dict = dict()
def from_file(self, filename="OUTCAR"):
"""
Parse and store relevant quantities from the OUTCAR file into parse_dict.
Args:
filename (str): Filename of the OUTCAR file to parse
"""
with open(filename, "r") as f:
lines = f.readlines()
energies = self.get_total_energies(filename=filename, lines=lines)
energies_int = self.get_energy_without_entropy(filename=filename, lines=lines)
energies_zero = self.get_energy_sigma_0(filename=filename, lines=lines)
scf_energies = self.get_all_total_energies(filename=filename, lines=lines)
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
forces = self.get_forces(filename=filename, lines=lines, n_atoms=n_atoms)
positions = self.get_positions(filename=filename, lines=lines, n_atoms=n_atoms)
cells = self.get_cells(filename=filename, lines=lines)
steps = self.get_steps(filename=filename, lines=lines)
temperatures = self.get_temperatures(filename=filename, lines=lines)
time = self.get_time(filename=filename, lines=lines)
fermi_level = self.get_fermi_level(filename=filename, lines=lines)
scf_moments = self.get_dipole_moments(filename=filename, lines=lines)
kin_energy_error = self.get_kinetic_energy_error(filename=filename, lines=lines)
stresses = self.get_stresses(filename=filename, si_unit=False, lines=lines)
n_elect = self.get_nelect(filename=filename, lines=lines)
e_fermi_list, vbm_list, cbm_list = self.get_band_properties(filename=filename, lines=lines)
elastic_constants = self.get_elastic_constants(filename=filename, lines=lines)
try:
irreducible_kpoints = self.get_irreducible_kpoints(
filename=filename, lines=lines
)
except ValueError:
print("irreducible kpoints not parsed !")
irreducible_kpoints = None
magnetization, final_magmom_lst = self.get_magnetization(
filename=filename, lines=lines
)
broyden_mixing = self.get_broyden_mixing_mesh(filename=filename, lines=lines)
self.parse_dict["energies"] = energies
self.parse_dict["energies_int"] = energies_int
self.parse_dict["energies_zero"] = energies_zero
self.parse_dict["scf_energies"] = scf_energies
self.parse_dict["forces"] = forces
self.parse_dict["positions"] = positions
self.parse_dict["cells"] = cells
self.parse_dict["steps"] = steps
self.parse_dict["temperatures"] = temperatures
self.parse_dict["time"] = time
self.parse_dict["fermi_level"] = fermi_level
self.parse_dict["scf_dipole_moments"] = scf_moments
self.parse_dict["kin_energy_error"] = kin_energy_error
self.parse_dict["stresses"] = stresses
self.parse_dict["irreducible_kpoints"] = irreducible_kpoints
self.parse_dict["magnetization"] = magnetization
self.parse_dict["final_magmoms"] = final_magmom_lst
self.parse_dict["broyden_mixing"] = broyden_mixing
self.parse_dict["n_elect"] = n_elect
self.parse_dict["e_fermi_list"] = e_fermi_list
self.parse_dict["vbm_list"] = vbm_list
self.parse_dict["cbm_list"] = cbm_list
self.parse_dict["elastic_constants"] = elastic_constants
try:
self.parse_dict["pressures"] = (
np.average(stresses[:, 0:3], axis=1) * KBAR_TO_EVA
)
except IndexError:
self.parse_dict["pressures"] = np.zeros(len(steps))
def to_hdf(self, hdf, group_name="outcar"):
"""
Store output in an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
hdf5_output[key] = self.parse_dict[key]
def to_hdf_minimal(self, hdf, group_name="outcar"):
"""
Store minimal output in an HDF5 file (output unique to OUTCAR)
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
unique_quantities = [
"kin_energy_error",
"broyden_mixing",
"stresses",
"irreducible_kpoints",
]
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
if key in unique_quantities:
hdf5_output[key] = self.parse_dict[key]
def from_hdf(self, hdf, group_name="outcar"):
"""
Load output from an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
with hdf.open(group_name) as hdf5_output:
for key in hdf5_output.list_nodes():
self.parse_dict[key] = hdf5_output[key]
def get_positions_and_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the forces and positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=True,
)
def get_positions(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of positions in $\AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=False,
)
def get_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the forces for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=False,
force_flag=True,
)
def get_cells(self, filename="OUTCAR", lines=None):
"""
Gets the cell size and shape for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="VOLUME and BASIS-vectors are now :"
)
return self._get_cells_praser(lines=lines, trigger_indices=trigger_indices)
@staticmethod
def get_stresses(filename="OUTCAR", lines=None, si_unit=True):
"""
Args:
filename (str): Input filename
lines (list/None): lines read from the file
si_unit (bool): True SI units are used
Returns:
numpy.ndarray: An array of stress values
"""
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FORCE on cell =-STRESS in cart. coord. units (eV):",
)
pullay_stress_lst = []
for j in trigger_indices:
try:
if si_unit:
pullay_stress_lst.append(
[float(l) for l in lines[j + 13].split()[1:7]]
)
else:
pullay_stress_lst.append(
[float(l) for l in lines[j + 14].split()[2:8]]
)
except ValueError:
if si_unit:
pullay_stress_lst.append([float("NaN")] * 6)
else:
pullay_stress_lst.append([float("NaN")] * 6)
return np.array(pullay_stress_lst)
@staticmethod
def get_irreducible_kpoints(
filename="OUTCAR", reciprocal=True, weight=True, planewaves=True, lines=None
):
"""
Function to extract the irreducible kpoints from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
reciprocal (bool): Get either the reciprocal or the cartesian coordinates
weight (bool): Get the weight assigned to the irreducible kpoints
planewaves (bool): Get the planewaves assigned to the irreducible kpoints
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of k-points
"""
kpoint_lst = []
weight_lst = []
planewaves_lst = []
trigger_number_str = "Subroutine IBZKPT returns following result:"
trigger_plane_waves_str = "k-point 1 :"
trigger_number = 0
trigger_plane_waves = 0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger_number_str in line:
trigger_number = int(i)
elif planewaves:
if trigger_plane_waves_str in line:
trigger_plane_waves = int(i)
number_irr_kpoints = int(lines[trigger_number + 3].split()[1])
if reciprocal:
trigger_start = trigger_number + 7
else:
trigger_start = trigger_number + 10 + number_irr_kpoints
for line in lines[trigger_start : trigger_start + number_irr_kpoints]:
line = line.strip()
line = _clean_line(line)
kpoint_lst.append([float(l) for l in line.split()[0:3]])
if weight:
weight_lst.append(float(line.split()[3]))
if planewaves and trigger_plane_waves != 0:
for line in lines[
trigger_plane_waves : trigger_plane_waves + number_irr_kpoints
]:
line = line.strip()
line = _clean_line(line)
planewaves_lst.append(float(line.split()[-1]))
if weight and planewaves:
return np.array(kpoint_lst), np.array(weight_lst), np.array(planewaves_lst)
elif weight:
return np.array(kpoint_lst), np.array(weight_lst)
elif planewaves:
return np.array(kpoint_lst), np.array(planewaves_lst)
else:
return np.array(kpoint_lst)
@staticmethod
def get_total_energies(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_total_energies_from_line(line):
return float(_clean_line(line.strip()).split()[-2])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_total_energies_from_line(lines[j + 2]) for j in trigger_indices]
)
@staticmethod
def get_energy_without_entropy(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_energy_without_entropy_from_line(line):
return float(_clean_line(line.strip()).split()[3])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[
get_energy_without_entropy_from_line(lines[j + 4])
for j in trigger_indices
]
)
@staticmethod
def get_energy_sigma_0(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_energy_sigma_0_from_line(line):
return float(_clean_line(line.strip()).split()[-1])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_energy_sigma_0_from_line(lines[j + 4]) for j in trigger_indices]
)
@staticmethod
def get_all_total_energies(filename="OUTCAR", lines=None):
"""
Gets the energy at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of energie for every electronic step at every ionic step
"""
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "free energy TOTEN ="
scf_energies = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
scf_energies.append(np.array(istep_energies))
istep_energies = list()
if electronic_trigger in line:
line = _clean_line(line)
ene = float(line.split()[-2])
istep_energies.append(ene)
return scf_energies
@staticmethod
def get_magnetization(filename="OUTCAR", lines=None):
"""
Gets the magnetization
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list with the mgnetization values
"""
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "eigenvalue-minimisations"
nion_trigger = "NIONS ="
mag_lst = list()
local_spin_trigger = False
n_atoms = None
mag_dict = dict()
mag_dict["x"] = list()
mag_dict["y"] = list()
mag_dict["z"] = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
final_magmom_lst = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
mag_lst.append(np.array(istep_energies))
istep_energies = list()
if "Atomic Wigner-Seitz radii" in line:
local_spin_trigger = True
if electronic_trigger in line:
try:
line = lines[i + 2].split("magnetization")[-1]
if line != " \n":
spin_str_lst = line.split()
spin_str_len = len(spin_str_lst)
if spin_str_len == 1:
ene = float(line)
elif spin_str_len == 3:
ene = [
float(spin_str_lst[0]),
float(spin_str_lst[1]),
float(spin_str_lst[2]),
]
else:
warnings.warn("Unrecognized spin configuration.")
return mag_lst, final_magmom_lst
istep_energies.append(ene)
except ValueError:
warnings.warn("Something went wrong in parsing the magnetization")
if n_atoms is None:
if nion_trigger in line:
n_atoms = int(line.split(nion_trigger)[-1])
if local_spin_trigger:
try:
for ind_dir, direc in enumerate(["x", "y", "z"]):
if "magnetization ({})".format(direc) in line:
mag_dict[direc].append(
[
float(lines[i + 4 + atom_index].split()[-1])
for atom_index in range(n_atoms)
]
)
except ValueError:
warnings.warn(
"Something went wrong in parsing the magnetic moments"
)
if len(mag_dict["x"]) > 0:
if len(mag_dict["y"]) == 0:
final_mag = np.array(mag_dict["x"])
else:
n_ionic_steps = np.array(mag_dict["x"]).shape[0]
final_mag = np.abs(np.zeros((n_ionic_steps, n_atoms, 3)))
final_mag[:, :, 0] = np.array(mag_dict["x"])
final_mag[:, :, 1] = np.array(mag_dict["y"])
final_mag[:, :, 2] = np.array(mag_dict["z"])
final_magmom_lst = final_mag.tolist()
return mag_lst, final_magmom_lst
@staticmethod
def get_broyden_mixing_mesh(filename="OUTCAR", lines=None):
"""
Gets the Broyden mixing mesh size
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
int: Mesh size
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="gives a total of "
)
if len(trigger_indices) > 0:
line_ngx = lines[trigger_indices[0] - 2]
else:
warnings.warn(
"Unable to parse the Broyden mixing mesh. Returning 0 instead"
)
return 0
# Exclude all alphabets, and spaces. Then split based on '='
str_list = re.sub(
r"[a-zA-Z]", r"", line_ngx.replace(" ", "").replace("\n", "")
).split("=")
return np.prod([int(val) for val in str_list[1:]])
@staticmethod
def get_temperatures(filename="OUTCAR", lines=None):
"""
Gets the temperature at each ionic step (applicable for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of temperatures in Kelvin
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="kin. lattice EKIN_LAT= "
)
temperatures = []
if len(trigger_indices) > 0:
for j in trigger_indices:
line = lines[j].strip()
line = _clean_line(line)
temperatures.append(float(line.split()[-2]))
else:
temperatures = np.zeros(
len(
_get_trigger(
lines=lines,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
return_lines=False,
)
)
)
return np.array(temperatures)
@staticmethod
def get_steps(filename="OUTCAR", lines=None):
"""
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: Steps during the simulation
"""
nblock_trigger = "NBLOCK ="
trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
trigger_indices = list()
read_nblock = True
n_block = 1
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
trigger_indices.append(i)
if read_nblock is None:
if nblock_trigger in line:
line = _clean_line(line)
n_block = int(line.split(nblock_trigger)[-1])
return n_block * np.linspace(0, len(trigger_indices))
def get_time(self, filename="OUTCAR", lines=None):
"""
Time after each simulation step (for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of time values in fs
"""
potim_trigger = "POTIM ="
read_potim = True
potim = 1.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if read_potim is None:
if potim_trigger in line:
line = _clean_line(line)
potim = float(line.split(potim_trigger)[0])
return potim * self.get_steps(filename)
@staticmethod
def get_kinetic_energy_error(filename="OUTCAR", lines=None):
"""
Get the kinetic energy error
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The kinetic energy error in eV
"""
trigger = "kinetic energy error for atom="
e_kin_err = list()
n_species_list = list()
nion_trigger = "ions per type ="
tot_kin_error = 0.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
e_kin_err.append(float(line.split()[5]))
if nion_trigger in line:
n_species_list = [
float(val) for val in line.split(nion_trigger)[-1].strip().split()
]
if len(n_species_list) > 0 and len(n_species_list) == len(e_kin_err):
tot_kin_error = np.sum(np.array(n_species_list) * np.array(e_kin_err))
return tot_kin_error
@staticmethod
def get_fermi_level(filename="OUTCAR", lines=None):
"""
Getting the Fermi-level (Kohn_Sham) from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The Kohn-Sham Fermi level in eV
"""
trigger = "E-fermi :"
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=trigger
)
if len(trigger_indices) != 0:
try:
return float(lines[trigger_indices[-1]].split(trigger)[-1].split()[0])
except ValueError:
return
else:
return
@staticmethod
def get_dipole_moments(filename="OUTCAR", lines=None):
"""
Get the electric dipole moment at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of dipole moments in (eA) for each electronic step
"""
moment_trigger = "dipolmoment"
istep_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
dip_moms = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_mom = list()
for i, line in enumerate(lines):
line = line.strip()
if istep_trigger in line:
dip_moms.append(np.array(istep_mom))
istep_mom = list()
if moment_trigger in line:
line = _clean_line(line)
mom = np.array([float(val) for val in line.split()[1:4]])
istep_mom.append(mom)
return dip_moms
@staticmethod
def get_nelect(filename="OUTCAR", lines=None):
"""
Returns the number of electrons in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
float: The number of electrons in the simulation
"""
nelect_trigger = "NELECT"
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if nelect_trigger in line:
return float(line.split()[2])
@staticmethod
def get_number_of_atoms(filename="OUTCAR", lines=None):
"""
Returns the number of ions in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
int: The number of ions in the simulation
"""
ions_trigger = "NIONS ="
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=ions_trigger
)
if len(trigger_indices) != 0:
return int(lines[trigger_indices[0]].split(ions_trigger)[-1])
else:
raise ValueError()
@staticmethod
def get_band_properties(filename="OUTCAR", lines=None):
fermi_trigger = "E-fermi"
fermi_trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=fermi_trigger
)
fermi_level_list = list()
vbm_level_dict = OrderedDict()
cbm_level_dict = OrderedDict()
for ind in fermi_trigger_indices:
fermi_level_list.append(float(lines[ind].strip().split()[2]))
band_trigger = "band No. band energies occupation"
is_spin_polarized = False
for n, ind in enumerate(fermi_trigger_indices):
if n == len(fermi_trigger_indices) - 1:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:-1], filename=filename, trigger=band_trigger
)
else:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:fermi_trigger_indices[n+1]], filename=filename, trigger=band_trigger
)
band_data = list()
for ind in trigger_indices:
if "spin component" in lines_new[ind-3]:
is_spin_polarized = True
for line in lines_new[ind+1:]:
data = line.strip().split()
if len(data) != 3:
break
band_data.append([float(d) for d in data[1:]])
if is_spin_polarized:
band_data_per_spin = [np.array(band_data[0:int(len(band_data)/2)]).tolist(),
np.array(band_data[int(len(band_data)/2):]).tolist()]
else:
band_data_per_spin = [band_data]
for spin, band_data in enumerate(band_data_per_spin):
if spin in cbm_level_dict.keys():
pass
else:
cbm_level_dict[spin] = list()
if spin in vbm_level_dict.keys():
pass
else:
vbm_level_dict[spin] = list()
if len(band_data) > 0:
band_energy, band_occ = [np.array(band_data)[:, i] for i in range(2)]
args = np.argsort(band_energy)
band_occ = band_occ[args]
band_energy = band_energy[args]
cbm_bool = np.abs(band_occ) < 1e-6
if any(cbm_bool):
cbm_level_dict[spin].append(band_energy[np.abs(band_occ) < 1e-6][0])
else:
cbm_level_dict[spin].append(band_energy[-1])
# If spin channel is completely empty, setting vbm=cbm
if all(cbm_bool):
vbm_level_dict[spin].append(cbm_level_dict[spin][-1])
else:
vbm_level_dict[spin].append(band_energy[~cbm_bool][-1])
return np.array(fermi_level_list), np.array([val for val
in vbm_level_dict.values()]), np.array([val
for val in
cbm_level_dict.values()])
@staticmethod
def get_elastic_constants(filename="OUTCAR", lines=None):
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indices = _get_trigger(lines=lines, filename=filename, trigger="TOTAL ELASTIC MODULI (kBar)", return_lines=False)
if len(trigger_indices) != 1:
return None
else:
start_index = trigger_indices[0] + 3
end_index = start_index + 6
elastic_constants = []
for line in lines[start_index:end_index]:
elastic_constants.append(line.split()[1:])
elastic_GPa = np.array(elastic_constants, dtype=float) / 10
return elastic_GPa
@staticmethod
def _get_positions_and_forces_parser(
lines, trigger_indices, n_atoms, pos_flag=True, force_flag=True
):
"""
Parser to get the forces and or positions for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
pos_flag (bool): parse position
force_flag (bool): parse forces
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
positions = []
forces = []
for j in trigger_indices:
pos = []
force = []
for line in lines[j + 2 : j + n_atoms + 2]:
line = line.strip()
line = _clean_line(line)
if pos_flag:
pos.append([float(l) for l in line.split()[0:3]])
if force_flag:
force.append([float(l) for l in line.split()[3:]])
forces.append(force)
positions.append(pos)
if pos_flag and force_flag:
return np.array(positions), np.array(forces)
elif pos_flag:
return np.array(positions)
elif force_flag:
return np.array(forces)
@staticmethod
def _get_cells_praser(lines, trigger_indices):
"""
Parser to get the cell size and shape for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
"""
cells = []
try:
for j in trigger_indices:
cell = []
for line in lines[j + 5: j + 8]:
line = line.strip()
line = _clean_line(line)
cell.append([float(l) for l in line.split()[0:3]])
cells.append(cell)
return np.array(cells)
except ValueError:
warnings.warn("Unable to parse the cells from the OUTCAR file")
return
def _clean_line(line):
return line.replace("-", " -")
def _get_trigger(trigger, filename=None, lines=None, return_lines=True):
"""
Find the lines where a specific trigger appears.
Args:
trigger (str): string pattern to search for
lines (list/None): list of lines
filename (str/None): file to read lines from
Returns:
list: indicies of the lines where the trigger string was found and list of lines
"""
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indicies = [i for i, line in enumerate(lines) if trigger in line.strip()]
if return_lines:
return trigger_indicies, lines
else:
return trigger_indicies
def _get_lines_from_file(filename, lines=None):
"""
If lines is None read the lines from the file with the filename filename.
Args:
filename (str): file to read lines from
lines (list/ None): list of lines
Returns:
list: list of lines
"""
if lines is None:
with open(filename, "r") as f:
lines = f.readlines()
return lines
| 37.365779
| 129
| 0.56681
|
from collections import OrderedDict
import numpy as np
import warnings
import scipy.constants
import re
__author__ = "Sudarsan Surendralal"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Sudarsan Surendralal"
__email__ = "surendralal@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2017"
KBAR_TO_EVA = (
scipy.constants.physical_constants["joule-electron volt relationship"][0] / 1e22
)
class Outcar(object):
def __init__(self):
self.parse_dict = dict()
def from_file(self, filename="OUTCAR"):
with open(filename, "r") as f:
lines = f.readlines()
energies = self.get_total_energies(filename=filename, lines=lines)
energies_int = self.get_energy_without_entropy(filename=filename, lines=lines)
energies_zero = self.get_energy_sigma_0(filename=filename, lines=lines)
scf_energies = self.get_all_total_energies(filename=filename, lines=lines)
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
forces = self.get_forces(filename=filename, lines=lines, n_atoms=n_atoms)
positions = self.get_positions(filename=filename, lines=lines, n_atoms=n_atoms)
cells = self.get_cells(filename=filename, lines=lines)
steps = self.get_steps(filename=filename, lines=lines)
temperatures = self.get_temperatures(filename=filename, lines=lines)
time = self.get_time(filename=filename, lines=lines)
fermi_level = self.get_fermi_level(filename=filename, lines=lines)
scf_moments = self.get_dipole_moments(filename=filename, lines=lines)
kin_energy_error = self.get_kinetic_energy_error(filename=filename, lines=lines)
stresses = self.get_stresses(filename=filename, si_unit=False, lines=lines)
n_elect = self.get_nelect(filename=filename, lines=lines)
e_fermi_list, vbm_list, cbm_list = self.get_band_properties(filename=filename, lines=lines)
elastic_constants = self.get_elastic_constants(filename=filename, lines=lines)
try:
irreducible_kpoints = self.get_irreducible_kpoints(
filename=filename, lines=lines
)
except ValueError:
print("irreducible kpoints not parsed !")
irreducible_kpoints = None
magnetization, final_magmom_lst = self.get_magnetization(
filename=filename, lines=lines
)
broyden_mixing = self.get_broyden_mixing_mesh(filename=filename, lines=lines)
self.parse_dict["energies"] = energies
self.parse_dict["energies_int"] = energies_int
self.parse_dict["energies_zero"] = energies_zero
self.parse_dict["scf_energies"] = scf_energies
self.parse_dict["forces"] = forces
self.parse_dict["positions"] = positions
self.parse_dict["cells"] = cells
self.parse_dict["steps"] = steps
self.parse_dict["temperatures"] = temperatures
self.parse_dict["time"] = time
self.parse_dict["fermi_level"] = fermi_level
self.parse_dict["scf_dipole_moments"] = scf_moments
self.parse_dict["kin_energy_error"] = kin_energy_error
self.parse_dict["stresses"] = stresses
self.parse_dict["irreducible_kpoints"] = irreducible_kpoints
self.parse_dict["magnetization"] = magnetization
self.parse_dict["final_magmoms"] = final_magmom_lst
self.parse_dict["broyden_mixing"] = broyden_mixing
self.parse_dict["n_elect"] = n_elect
self.parse_dict["e_fermi_list"] = e_fermi_list
self.parse_dict["vbm_list"] = vbm_list
self.parse_dict["cbm_list"] = cbm_list
self.parse_dict["elastic_constants"] = elastic_constants
try:
self.parse_dict["pressures"] = (
np.average(stresses[:, 0:3], axis=1) * KBAR_TO_EVA
)
except IndexError:
self.parse_dict["pressures"] = np.zeros(len(steps))
def to_hdf(self, hdf, group_name="outcar"):
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
hdf5_output[key] = self.parse_dict[key]
def to_hdf_minimal(self, hdf, group_name="outcar"):
unique_quantities = [
"kin_energy_error",
"broyden_mixing",
"stresses",
"irreducible_kpoints",
]
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
if key in unique_quantities:
hdf5_output[key] = self.parse_dict[key]
def from_hdf(self, hdf, group_name="outcar"):
with hdf.open(group_name) as hdf5_output:
for key in hdf5_output.list_nodes():
self.parse_dict[key] = hdf5_output[key]
def get_positions_and_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=True,
)
def get_positions(self, filename="OUTCAR", lines=None, n_atoms=None):
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=False,
)
def get_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=False,
force_flag=True,
)
def get_cells(self, filename="OUTCAR", lines=None):
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="VOLUME and BASIS-vectors are now :"
)
return self._get_cells_praser(lines=lines, trigger_indices=trigger_indices)
@staticmethod
def get_stresses(filename="OUTCAR", lines=None, si_unit=True):
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FORCE on cell =-STRESS in cart. coord. units (eV):",
)
pullay_stress_lst = []
for j in trigger_indices:
try:
if si_unit:
pullay_stress_lst.append(
[float(l) for l in lines[j + 13].split()[1:7]]
)
else:
pullay_stress_lst.append(
[float(l) for l in lines[j + 14].split()[2:8]]
)
except ValueError:
if si_unit:
pullay_stress_lst.append([float("NaN")] * 6)
else:
pullay_stress_lst.append([float("NaN")] * 6)
return np.array(pullay_stress_lst)
@staticmethod
def get_irreducible_kpoints(
filename="OUTCAR", reciprocal=True, weight=True, planewaves=True, lines=None
):
kpoint_lst = []
weight_lst = []
planewaves_lst = []
trigger_number_str = "Subroutine IBZKPT returns following result:"
trigger_plane_waves_str = "k-point 1 :"
trigger_number = 0
trigger_plane_waves = 0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger_number_str in line:
trigger_number = int(i)
elif planewaves:
if trigger_plane_waves_str in line:
trigger_plane_waves = int(i)
number_irr_kpoints = int(lines[trigger_number + 3].split()[1])
if reciprocal:
trigger_start = trigger_number + 7
else:
trigger_start = trigger_number + 10 + number_irr_kpoints
for line in lines[trigger_start : trigger_start + number_irr_kpoints]:
line = line.strip()
line = _clean_line(line)
kpoint_lst.append([float(l) for l in line.split()[0:3]])
if weight:
weight_lst.append(float(line.split()[3]))
if planewaves and trigger_plane_waves != 0:
for line in lines[
trigger_plane_waves : trigger_plane_waves + number_irr_kpoints
]:
line = line.strip()
line = _clean_line(line)
planewaves_lst.append(float(line.split()[-1]))
if weight and planewaves:
return np.array(kpoint_lst), np.array(weight_lst), np.array(planewaves_lst)
elif weight:
return np.array(kpoint_lst), np.array(weight_lst)
elif planewaves:
return np.array(kpoint_lst), np.array(planewaves_lst)
else:
return np.array(kpoint_lst)
@staticmethod
def get_total_energies(filename="OUTCAR", lines=None):
def get_total_energies_from_line(line):
return float(_clean_line(line.strip()).split()[-2])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_total_energies_from_line(lines[j + 2]) for j in trigger_indices]
)
@staticmethod
def get_energy_without_entropy(filename="OUTCAR", lines=None):
def get_energy_without_entropy_from_line(line):
return float(_clean_line(line.strip()).split()[3])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[
get_energy_without_entropy_from_line(lines[j + 4])
for j in trigger_indices
]
)
@staticmethod
def get_energy_sigma_0(filename="OUTCAR", lines=None):
def get_energy_sigma_0_from_line(line):
return float(_clean_line(line.strip()).split()[-1])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_energy_sigma_0_from_line(lines[j + 4]) for j in trigger_indices]
)
@staticmethod
def get_all_total_energies(filename="OUTCAR", lines=None):
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "free energy TOTEN ="
scf_energies = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
scf_energies.append(np.array(istep_energies))
istep_energies = list()
if electronic_trigger in line:
line = _clean_line(line)
ene = float(line.split()[-2])
istep_energies.append(ene)
return scf_energies
@staticmethod
def get_magnetization(filename="OUTCAR", lines=None):
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "eigenvalue-minimisations"
nion_trigger = "NIONS ="
mag_lst = list()
local_spin_trigger = False
n_atoms = None
mag_dict = dict()
mag_dict["x"] = list()
mag_dict["y"] = list()
mag_dict["z"] = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
final_magmom_lst = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
mag_lst.append(np.array(istep_energies))
istep_energies = list()
if "Atomic Wigner-Seitz radii" in line:
local_spin_trigger = True
if electronic_trigger in line:
try:
line = lines[i + 2].split("magnetization")[-1]
if line != " \n":
spin_str_lst = line.split()
spin_str_len = len(spin_str_lst)
if spin_str_len == 1:
ene = float(line)
elif spin_str_len == 3:
ene = [
float(spin_str_lst[0]),
float(spin_str_lst[1]),
float(spin_str_lst[2]),
]
else:
warnings.warn("Unrecognized spin configuration.")
return mag_lst, final_magmom_lst
istep_energies.append(ene)
except ValueError:
warnings.warn("Something went wrong in parsing the magnetization")
if n_atoms is None:
if nion_trigger in line:
n_atoms = int(line.split(nion_trigger)[-1])
if local_spin_trigger:
try:
for ind_dir, direc in enumerate(["x", "y", "z"]):
if "magnetization ({})".format(direc) in line:
mag_dict[direc].append(
[
float(lines[i + 4 + atom_index].split()[-1])
for atom_index in range(n_atoms)
]
)
except ValueError:
warnings.warn(
"Something went wrong in parsing the magnetic moments"
)
if len(mag_dict["x"]) > 0:
if len(mag_dict["y"]) == 0:
final_mag = np.array(mag_dict["x"])
else:
n_ionic_steps = np.array(mag_dict["x"]).shape[0]
final_mag = np.abs(np.zeros((n_ionic_steps, n_atoms, 3)))
final_mag[:, :, 0] = np.array(mag_dict["x"])
final_mag[:, :, 1] = np.array(mag_dict["y"])
final_mag[:, :, 2] = np.array(mag_dict["z"])
final_magmom_lst = final_mag.tolist()
return mag_lst, final_magmom_lst
@staticmethod
def get_broyden_mixing_mesh(filename="OUTCAR", lines=None):
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="gives a total of "
)
if len(trigger_indices) > 0:
line_ngx = lines[trigger_indices[0] - 2]
else:
warnings.warn(
"Unable to parse the Broyden mixing mesh. Returning 0 instead"
)
return 0
str_list = re.sub(
r"[a-zA-Z]", r"", line_ngx.replace(" ", "").replace("\n", "")
).split("=")
return np.prod([int(val) for val in str_list[1:]])
@staticmethod
def get_temperatures(filename="OUTCAR", lines=None):
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="kin. lattice EKIN_LAT= "
)
temperatures = []
if len(trigger_indices) > 0:
for j in trigger_indices:
line = lines[j].strip()
line = _clean_line(line)
temperatures.append(float(line.split()[-2]))
else:
temperatures = np.zeros(
len(
_get_trigger(
lines=lines,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
return_lines=False,
)
)
)
return np.array(temperatures)
@staticmethod
def get_steps(filename="OUTCAR", lines=None):
nblock_trigger = "NBLOCK ="
trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
trigger_indices = list()
read_nblock = True
n_block = 1
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
trigger_indices.append(i)
if read_nblock is None:
if nblock_trigger in line:
line = _clean_line(line)
n_block = int(line.split(nblock_trigger)[-1])
return n_block * np.linspace(0, len(trigger_indices))
def get_time(self, filename="OUTCAR", lines=None):
potim_trigger = "POTIM ="
read_potim = True
potim = 1.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if read_potim is None:
if potim_trigger in line:
line = _clean_line(line)
potim = float(line.split(potim_trigger)[0])
return potim * self.get_steps(filename)
@staticmethod
def get_kinetic_energy_error(filename="OUTCAR", lines=None):
trigger = "kinetic energy error for atom="
e_kin_err = list()
n_species_list = list()
nion_trigger = "ions per type ="
tot_kin_error = 0.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
e_kin_err.append(float(line.split()[5]))
if nion_trigger in line:
n_species_list = [
float(val) for val in line.split(nion_trigger)[-1].strip().split()
]
if len(n_species_list) > 0 and len(n_species_list) == len(e_kin_err):
tot_kin_error = np.sum(np.array(n_species_list) * np.array(e_kin_err))
return tot_kin_error
@staticmethod
def get_fermi_level(filename="OUTCAR", lines=None):
trigger = "E-fermi :"
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=trigger
)
if len(trigger_indices) != 0:
try:
return float(lines[trigger_indices[-1]].split(trigger)[-1].split()[0])
except ValueError:
return
else:
return
@staticmethod
def get_dipole_moments(filename="OUTCAR", lines=None):
moment_trigger = "dipolmoment"
istep_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
dip_moms = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_mom = list()
for i, line in enumerate(lines):
line = line.strip()
if istep_trigger in line:
dip_moms.append(np.array(istep_mom))
istep_mom = list()
if moment_trigger in line:
line = _clean_line(line)
mom = np.array([float(val) for val in line.split()[1:4]])
istep_mom.append(mom)
return dip_moms
@staticmethod
def get_nelect(filename="OUTCAR", lines=None):
nelect_trigger = "NELECT"
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if nelect_trigger in line:
return float(line.split()[2])
@staticmethod
def get_number_of_atoms(filename="OUTCAR", lines=None):
ions_trigger = "NIONS ="
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=ions_trigger
)
if len(trigger_indices) != 0:
return int(lines[trigger_indices[0]].split(ions_trigger)[-1])
else:
raise ValueError()
@staticmethod
def get_band_properties(filename="OUTCAR", lines=None):
fermi_trigger = "E-fermi"
fermi_trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=fermi_trigger
)
fermi_level_list = list()
vbm_level_dict = OrderedDict()
cbm_level_dict = OrderedDict()
for ind in fermi_trigger_indices:
fermi_level_list.append(float(lines[ind].strip().split()[2]))
band_trigger = "band No. band energies occupation"
is_spin_polarized = False
for n, ind in enumerate(fermi_trigger_indices):
if n == len(fermi_trigger_indices) - 1:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:-1], filename=filename, trigger=band_trigger
)
else:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:fermi_trigger_indices[n+1]], filename=filename, trigger=band_trigger
)
band_data = list()
for ind in trigger_indices:
if "spin component" in lines_new[ind-3]:
is_spin_polarized = True
for line in lines_new[ind+1:]:
data = line.strip().split()
if len(data) != 3:
break
band_data.append([float(d) for d in data[1:]])
if is_spin_polarized:
band_data_per_spin = [np.array(band_data[0:int(len(band_data)/2)]).tolist(),
np.array(band_data[int(len(band_data)/2):]).tolist()]
else:
band_data_per_spin = [band_data]
for spin, band_data in enumerate(band_data_per_spin):
if spin in cbm_level_dict.keys():
pass
else:
cbm_level_dict[spin] = list()
if spin in vbm_level_dict.keys():
pass
else:
vbm_level_dict[spin] = list()
if len(band_data) > 0:
band_energy, band_occ = [np.array(band_data)[:, i] for i in range(2)]
args = np.argsort(band_energy)
band_occ = band_occ[args]
band_energy = band_energy[args]
cbm_bool = np.abs(band_occ) < 1e-6
if any(cbm_bool):
cbm_level_dict[spin].append(band_energy[np.abs(band_occ) < 1e-6][0])
else:
cbm_level_dict[spin].append(band_energy[-1])
if all(cbm_bool):
vbm_level_dict[spin].append(cbm_level_dict[spin][-1])
else:
vbm_level_dict[spin].append(band_energy[~cbm_bool][-1])
return np.array(fermi_level_list), np.array([val for val
in vbm_level_dict.values()]), np.array([val
for val in
cbm_level_dict.values()])
@staticmethod
def get_elastic_constants(filename="OUTCAR", lines=None):
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indices = _get_trigger(lines=lines, filename=filename, trigger="TOTAL ELASTIC MODULI (kBar)", return_lines=False)
if len(trigger_indices) != 1:
return None
else:
start_index = trigger_indices[0] + 3
end_index = start_index + 6
elastic_constants = []
for line in lines[start_index:end_index]:
elastic_constants.append(line.split()[1:])
elastic_GPa = np.array(elastic_constants, dtype=float) / 10
return elastic_GPa
@staticmethod
def _get_positions_and_forces_parser(
lines, trigger_indices, n_atoms, pos_flag=True, force_flag=True
):
positions = []
forces = []
for j in trigger_indices:
pos = []
force = []
for line in lines[j + 2 : j + n_atoms + 2]:
line = line.strip()
line = _clean_line(line)
if pos_flag:
pos.append([float(l) for l in line.split()[0:3]])
if force_flag:
force.append([float(l) for l in line.split()[3:]])
forces.append(force)
positions.append(pos)
if pos_flag and force_flag:
return np.array(positions), np.array(forces)
elif pos_flag:
return np.array(positions)
elif force_flag:
return np.array(forces)
@staticmethod
def _get_cells_praser(lines, trigger_indices):
cells = []
try:
for j in trigger_indices:
cell = []
for line in lines[j + 5: j + 8]:
line = line.strip()
line = _clean_line(line)
cell.append([float(l) for l in line.split()[0:3]])
cells.append(cell)
return np.array(cells)
except ValueError:
warnings.warn("Unable to parse the cells from the OUTCAR file")
return
def _clean_line(line):
return line.replace("-", " -")
def _get_trigger(trigger, filename=None, lines=None, return_lines=True):
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indicies = [i for i, line in enumerate(lines) if trigger in line.strip()]
if return_lines:
return trigger_indicies, lines
else:
return trigger_indicies
def _get_lines_from_file(filename, lines=None):
if lines is None:
with open(filename, "r") as f:
lines = f.readlines()
return lines
| true
| true
|
79060c598a010e9b185e6e1a60ba2bc854aec0bf
| 2,282
|
py
|
Python
|
venv/Lib/site-packages/astroid/brain/brain_nose.py
|
professorbee/randomplushmiku
|
b2db186a5d081da0cb00b8c73dee9eff6047b1f1
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/astroid/brain/brain_nose.py
|
professorbee/randomplushmiku
|
b2db186a5d081da0cb00b8c73dee9eff6047b1f1
|
[
"MIT"
] | 1
|
2021-04-12T16:20:40.000Z
|
2021-04-12T16:20:40.000Z
|
venv/Lib/site-packages/astroid/brain/brain_nose.py
|
professorbee/randomplushmiku
|
b2db186a5d081da0cb00b8c73dee9eff6047b1f1
|
[
"MIT"
] | 1
|
2021-04-12T15:52:04.000Z
|
2021-04-12T15:52:04.000Z
|
# Copyright (c) 2015-2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Hooks for nose library."""
import re
import textwrap
import astroid
import astroid.builder
_BUILDER = astroid.builder.AstroidBuilder(astroid.MANAGER)
def _pep8(name, caps=re.compile("([A-Z])")):
return caps.sub(lambda m: "_" + m.groups()[0].lower(), name)
def _nose_tools_functions():
"""Get an iterator of names and bound methods."""
module = _BUILDER.string_build(
textwrap.dedent(
"""
import unittest
class Test(unittest.TestCase):
pass
a = Test()
"""
)
)
try:
case = next(module["a"].infer())
except astroid.InferenceError:
return
for method in case.methods():
if method.name.startswith("assert") and "_" not in method.name:
pep8_name = _pep8(method.name)
yield pep8_name, astroid.BoundMethod(method, case)
if method.name == "assertEqual":
# nose also exports assert_equals.
yield "assert_equals", astroid.BoundMethod(method, case)
def _nose_tools_transform(node):
for method_name, method in _nose_tools_functions():
node.locals[method_name] = [method]
def _nose_tools_trivial_transform():
"""Custom transform for the nose.tools module."""
stub = _BUILDER.string_build("""__all__ = []""")
all_entries = ["ok_", "eq_"]
for pep8_name, method in _nose_tools_functions():
all_entries.append(pep8_name)
stub[pep8_name] = method
# Update the __all__ variable, since nose.tools
# does this manually with .append.
all_assign = stub["__all__"].parent
all_object = astroid.List(all_entries)
all_object.parent = all_assign
all_assign.value = all_object
return stub
astroid.register_module_extender(
astroid.MANAGER, "nose.tools.trivial", _nose_tools_trivial_transform
)
astroid.MANAGER.register_transform(
astroid.Module, _nose_tools_transform, lambda n: n.name == "nose.tools"
)
| 28.886076
| 85
| 0.678791
|
import re
import textwrap
import astroid
import astroid.builder
_BUILDER = astroid.builder.AstroidBuilder(astroid.MANAGER)
def _pep8(name, caps=re.compile("([A-Z])")):
return caps.sub(lambda m: "_" + m.groups()[0].lower(), name)
def _nose_tools_functions():
module = _BUILDER.string_build(
textwrap.dedent(
"""
import unittest
class Test(unittest.TestCase):
pass
a = Test()
"""
)
)
try:
case = next(module["a"].infer())
except astroid.InferenceError:
return
for method in case.methods():
if method.name.startswith("assert") and "_" not in method.name:
pep8_name = _pep8(method.name)
yield pep8_name, astroid.BoundMethod(method, case)
if method.name == "assertEqual":
yield "assert_equals", astroid.BoundMethod(method, case)
def _nose_tools_transform(node):
for method_name, method in _nose_tools_functions():
node.locals[method_name] = [method]
def _nose_tools_trivial_transform():
stub = _BUILDER.string_build("""__all__ = []""")
all_entries = ["ok_", "eq_"]
for pep8_name, method in _nose_tools_functions():
all_entries.append(pep8_name)
stub[pep8_name] = method
all_assign = stub["__all__"].parent
all_object = astroid.List(all_entries)
all_object.parent = all_assign
all_assign.value = all_object
return stub
astroid.register_module_extender(
astroid.MANAGER, "nose.tools.trivial", _nose_tools_trivial_transform
)
astroid.MANAGER.register_transform(
astroid.Module, _nose_tools_transform, lambda n: n.name == "nose.tools"
)
| true
| true
|
79060de957eed5903e574c1856b858a23543a8ff
| 2,004
|
py
|
Python
|
encodings/cp1026.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | 1
|
2020-03-12T16:44:10.000Z
|
2020-03-12T16:44:10.000Z
|
encodings/cp1026.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | null | null | null |
encodings/cp1026.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: encodings.cp1026
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x9c\t\x86\x7f\x97\x8d\x8e\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x9d\x85\x08\x87\x18\x19\x92\x8f\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\n\x17\x1b\x88\x89\x8a\x8b\x8c\x05\x06\x07\x90\x91\x16\x93\x94\x95\x96\x04\x98\x99\x9a\x9b\x14\x15\x9e\x1a \xa0\xe2\xe4\xe0\xe1\xe3\xe5{\xf1\xc7.<(+!&\xe9\xea\xeb\xe8\xed\xee\xef\xec\xdf\u011e\u0130*);^-/\xc2\xc4\xc0\xc1\xc3\xc5[\xd1\u015f,%_>?\xf8\xc9\xca\xcb\xc8\xcd\xce\xcf\xcc\u0131:\xd6\u015e\'=\xdc\xd8abcdefghi\xab\xbb}`\xa6\xb1\xb0jklmnopqr\xaa\xba\xe6\xb8\xc6\xa4\xb5\xf6stuvwxyz\xa1\xbf]$@\xae\xa2\xa3\xa5\xb7\xa9\xa7\xb6\xbc\xbd\xbe\xac|\xaf\xa8\xb4\xd7\xe7ABCDEFGHI\xad\xf4~\xf2\xf3\xf5\u011fJKLMNOPQR\xb9\xfb\\\xf9\xfa\xff\xfc\xf7STUVWXYZ\xb2\xd4#\xd2\xd3\xd50123456789\xb3\xdb"\xd9\xda\x9f'
encoding_table = codecs.charmap_build(decoding_table)
| 48.878049
| 767
| 0.749501
|
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x9c\t\x86\x7f\x97\x8d\x8e\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x9d\x85\x08\x87\x18\x19\x92\x8f\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\n\x17\x1b\x88\x89\x8a\x8b\x8c\x05\x06\x07\x90\x91\x16\x93\x94\x95\x96\x04\x98\x99\x9a\x9b\x14\x15\x9e\x1a \xa0\xe2\xe4\xe0\xe1\xe3\xe5{\xf1\xc7.<(+!&\xe9\xea\xeb\xe8\xed\xee\xef\xec\xdf\u011e\u0130*);^-/\xc2\xc4\xc0\xc1\xc3\xc5[\xd1\u015f,%_>?\xf8\xc9\xca\xcb\xc8\xcd\xce\xcf\xcc\u0131:\xd6\u015e\'=\xdc\xd8abcdefghi\xab\xbb}`\xa6\xb1\xb0jklmnopqr\xaa\xba\xe6\xb8\xc6\xa4\xb5\xf6stuvwxyz\xa1\xbf]$@\xae\xa2\xa3\xa5\xb7\xa9\xa7\xb6\xbc\xbd\xbe\xac|\xaf\xa8\xb4\xd7\xe7ABCDEFGHI\xad\xf4~\xf2\xf3\xf5\u011fJKLMNOPQR\xb9\xfb\\\xf9\xfa\xff\xfc\xf7STUVWXYZ\xb2\xd4
encoding_table = codecs.charmap_build(decoding_table)
| true
| true
|
79060ed31e804094d5d1064d217406f21e567529
| 3,079
|
py
|
Python
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/show_domain_quota_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/show_domain_quota_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/show_domain_quota_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDomainQuotaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'quotas': 'QuotaResult'
}
attribute_map = {
'quotas': 'quotas'
}
def __init__(self, quotas=None):
"""ShowDomainQuotaResponse - a model defined in huaweicloud sdk"""
super(ShowDomainQuotaResponse, self).__init__()
self._quotas = None
self.discriminator = None
if quotas is not None:
self.quotas = quotas
@property
def quotas(self):
"""Gets the quotas of this ShowDomainQuotaResponse.
:return: The quotas of this ShowDomainQuotaResponse.
:rtype: QuotaResult
"""
return self._quotas
@quotas.setter
def quotas(self, quotas):
"""Sets the quotas of this ShowDomainQuotaResponse.
:param quotas: The quotas of this ShowDomainQuotaResponse.
:type: QuotaResult
"""
self._quotas = quotas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDomainQuotaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.247788
| 79
| 0.560247
|
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDomainQuotaResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'quotas': 'QuotaResult'
}
attribute_map = {
'quotas': 'quotas'
}
def __init__(self, quotas=None):
super(ShowDomainQuotaResponse, self).__init__()
self._quotas = None
self.discriminator = None
if quotas is not None:
self.quotas = quotas
@property
def quotas(self):
return self._quotas
@quotas.setter
def quotas(self, quotas):
self._quotas = quotas
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ShowDomainQuotaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7906101b49d86dab3593f5151edccbf90eb5e00f
| 422
|
py
|
Python
|
src/states/state.py
|
Iain530/do-you-have-the-guts2018
|
2307a9cf9f6bb9d3cce987491f5db4511ea0b1a1
|
[
"MIT"
] | 1
|
2018-10-15T13:35:41.000Z
|
2018-10-15T13:35:41.000Z
|
src/states/state.py
|
Iain530/do-you-have-the-guts2018
|
2307a9cf9f6bb9d3cce987491f5db4511ea0b1a1
|
[
"MIT"
] | null | null | null |
src/states/state.py
|
Iain530/do-you-have-the-guts2018
|
2307a9cf9f6bb9d3cce987491f5db4511ea0b1a1
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from status import Status
class State(ABC):
def __init__(self, turret_controls, body_controls, status: Status):
self.turret_controls = turret_controls
self.body_controls = body_controls
self.status = status
@abstractmethod
def perform(self):
pass
@abstractmethod
def calculate_priority(self, is_current_state: bool):
pass
| 23.444444
| 71
| 0.699052
|
from abc import ABC, abstractmethod
from status import Status
class State(ABC):
def __init__(self, turret_controls, body_controls, status: Status):
self.turret_controls = turret_controls
self.body_controls = body_controls
self.status = status
@abstractmethod
def perform(self):
pass
@abstractmethod
def calculate_priority(self, is_current_state: bool):
pass
| true
| true
|
79061661a375ff2405ed7b25c287a48301ff5e33
| 1,420
|
py
|
Python
|
plugin/nodes/flavour.py
|
MSO4SC/cloudify-im-plugin
|
b8e6dfeb9a7902a38f602735780390a256fb72b7
|
[
"Apache-2.0"
] | 1
|
2018-09-24T12:04:29.000Z
|
2018-09-24T12:04:29.000Z
|
plugin/nodes/flavour.py
|
victorsndvg/cloudify-im-extension
|
b8e6dfeb9a7902a38f602735780390a256fb72b7
|
[
"Apache-2.0"
] | 6
|
2018-11-22T14:38:26.000Z
|
2021-08-02T08:01:31.000Z
|
plugin/nodes/flavour.py
|
victorsndvg/cloudify-im-extension
|
b8e6dfeb9a7902a38f602735780390a256fb72b7
|
[
"Apache-2.0"
] | 1
|
2018-12-09T17:45:13.000Z
|
2018-12-09T17:45:13.000Z
|
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify.decorators import operation
from cloudify.exceptions import *
from plugin.nodes.utils import *
def build_radl_flavour(config):
ctx.logger.debug('{0} Infrastructure Manager deployment info:'.format(get_log_indentation()))
increase_log_indentation()
type = get_child(dictionary=config, key='type', required=True)
cores = get_child(dictionary=config, key='cores', required=True)
memory = get_child(dictionary=config, key='memory', required=True)
flavour_radl = \
" instance_type = '" + str(type) + "' and \n" + \
" cpu.count = " + str(cores) + " and \n" + \
" memory.size = " + str(memory) + " and \n"
decrease_log_indentation()
return flavour_radl
@operation
def configure(config, simulate, **kwargs):
if (not simulate):
reset_log_indentation()
ctx.logger.debug('{0} Configure operation: Begin'.format(get_log_indentation()))
increase_log_indentation()
radl = get_child(ctx.instance.runtime_properties, key='settings')
if not radl:
radl = create_child(ctx.instance.runtime_properties, key='settings', value={})
radl_network = create_child(radl, key='flavour', value=build_radl_flavour(config))
decrease_log_indentation()
ctx.logger.debug('{0} Configure operation: End'.format(get_log_indentation()))
| 39.444444
| 97
| 0.697887
|
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify.decorators import operation
from cloudify.exceptions import *
from plugin.nodes.utils import *
def build_radl_flavour(config):
ctx.logger.debug('{0} Infrastructure Manager deployment info:'.format(get_log_indentation()))
increase_log_indentation()
type = get_child(dictionary=config, key='type', required=True)
cores = get_child(dictionary=config, key='cores', required=True)
memory = get_child(dictionary=config, key='memory', required=True)
flavour_radl = \
" instance_type = '" + str(type) + "' and \n" + \
" cpu.count = " + str(cores) + " and \n" + \
" memory.size = " + str(memory) + " and \n"
decrease_log_indentation()
return flavour_radl
@operation
def configure(config, simulate, **kwargs):
if (not simulate):
reset_log_indentation()
ctx.logger.debug('{0} Configure operation: Begin'.format(get_log_indentation()))
increase_log_indentation()
radl = get_child(ctx.instance.runtime_properties, key='settings')
if not radl:
radl = create_child(ctx.instance.runtime_properties, key='settings', value={})
radl_network = create_child(radl, key='flavour', value=build_radl_flavour(config))
decrease_log_indentation()
ctx.logger.debug('{0} Configure operation: End'.format(get_log_indentation()))
| true
| true
|
7906185726810a877740790b9a75afec09e2b587
| 840
|
py
|
Python
|
bundle/vim-python-mode/pymode/utils.py
|
ninegrid/dotfiles-vim
|
4604f8a2e114cb2e98d5d79f2f41048c4f564b02
|
[
"Unlicense"
] | null | null | null |
bundle/vim-python-mode/pymode/utils.py
|
ninegrid/dotfiles-vim
|
4604f8a2e114cb2e98d5d79f2f41048c4f564b02
|
[
"Unlicense"
] | null | null | null |
bundle/vim-python-mode/pymode/utils.py
|
ninegrid/dotfiles-vim
|
4604f8a2e114cb2e98d5d79f2f41048c4f564b02
|
[
"Unlicense"
] | 1
|
2020-10-01T18:51:49.000Z
|
2020-10-01T18:51:49.000Z
|
""" Pymode utils. """
import os.path
import sys
import threading
import warnings
from contextlib import contextmanager
import vim # noqa
from ._compat import StringIO, PY2
DEBUG = int(vim.eval('g:pymode_debug'))
warnings.filterwarnings('ignore')
@contextmanager
def silence_stderr():
""" Redirect stderr. """
if DEBUG:
yield
else:
with threading.Lock():
stderr = sys.stderr
sys.stderr = StringIO()
yield
with threading.Lock():
sys.stderr = stderr
def patch_paths():
""" Function description. """
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
if PY2:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs2'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs3'))
| 20
| 76
| 0.630952
|
import os.path
import sys
import threading
import warnings
from contextlib import contextmanager
import vim
from ._compat import StringIO, PY2
DEBUG = int(vim.eval('g:pymode_debug'))
warnings.filterwarnings('ignore')
@contextmanager
def silence_stderr():
if DEBUG:
yield
else:
with threading.Lock():
stderr = sys.stderr
sys.stderr = StringIO()
yield
with threading.Lock():
sys.stderr = stderr
def patch_paths():
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
if PY2:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs2'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs3'))
| true
| true
|
79061a43cf623b03f0da9d879e66018df245c279
| 299
|
py
|
Python
|
quiz/fake_db.py
|
KelstonClub/quiz
|
5f6fca87ca21c376937f50f00e1d3ff2fbe3425a
|
[
"MIT"
] | null | null | null |
quiz/fake_db.py
|
KelstonClub/quiz
|
5f6fca87ca21c376937f50f00e1d3ff2fbe3425a
|
[
"MIT"
] | null | null | null |
quiz/fake_db.py
|
KelstonClub/quiz
|
5f6fca87ca21c376937f50f00e1d3ff2fbe3425a
|
[
"MIT"
] | null | null | null |
#fake database to get the pygame running
import random
questions = ["Question 1?", "Question 2?", "Question 3?", "Question 4?"]
answers = ["Answer 1", "Answer 2", "Answer 3", "Answer 4"]
def get_question():
return(random.choice(questions))
def get_answer():
return(random.choice(answers))
| 27.181818
| 72
| 0.685619
|
import random
questions = ["Question 1?", "Question 2?", "Question 3?", "Question 4?"]
answers = ["Answer 1", "Answer 2", "Answer 3", "Answer 4"]
def get_question():
return(random.choice(questions))
def get_answer():
return(random.choice(answers))
| true
| true
|
79061a791d8df7a895d17b47ead6715c4f26a761
| 3,096
|
py
|
Python
|
pynet/configure.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | 8
|
2020-06-23T16:30:52.000Z
|
2021-07-27T15:07:18.000Z
|
pynet/configure.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | 8
|
2019-12-18T17:28:47.000Z
|
2021-02-12T09:10:58.000Z
|
pynet/configure.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | 18
|
2019-08-19T14:17:48.000Z
|
2021-12-20T03:56:39.000Z
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
This module checks that all the dependencies are installed properly.
"""
# System import
import logging
import importlib
import distutils
# Package import
from .info import __version__
from .info import REQUIRES
from .info import LICENSE
from .info import AUTHOR
from .utils import logo
# Global parameters
MAP = {
"progressbar2": "progressbar",
"scikit-learn": "sklearn",
"Pillow": "PIL",
"scikit-image": "skimage"
}
logger = logging.getLogger("pynet")
def _check_python_versions():
""" Check that all the Python dependencies are satisfied.
A dependency is expected to be formatted as follows:
<mod_name>>==<mod_min_version>
<mod_name>>>=<mod_min_version>
Returns
-------
versions: dict with 2-uplet
the minimum required version and the installed version for each module.
'?' means no package found.
"""
versions = {}
logger.debug("Checking install dependencies:")
logger.debug("Declared dependencies:\n{0}".format(REQUIRES))
for dependency in REQUIRES:
if ">=" in dependency:
operator = ">="
elif "==" in dependency:
operator = "=="
else:
raise ValueError("'{0}' dependency no formatted correctly.".format(
dependency))
mod_name, mod_min_version = dependency.split(operator)
if mod_name in MAP:
mod_name = MAP[mod_name]
logger.debug(" {0} {1} {2}.".format(
mod_name, operator, mod_min_version))
try:
mod_install_version = importlib.import_module(mod_name).__version__
except:
mod_install_version = "?"
logger.debug(" found {0}...".format(mod_install_version))
versions[mod_name] = (operator + mod_min_version, mod_install_version)
logger.debug("Check done.")
return versions
def info():
""" Dispaly some usefull information about the package.
Returns
-------
info: str
package information.
"""
logger.debug("Check module metadata & dependencies:")
logger.debug(" dependencies.")
dependencies = "Dependencies: \n\n"
dependencies_info = _check_python_versions()
for name, (min_version, install_version) in dependencies_info.items():
dependencies += "{0:15s}: {1:9s} - required | {2:9s} installed".format(
name, min_version, install_version)
dependencies += "\n"
logger.debug(" metadata.")
version = "Package version: {0}\n\n".format(__version__)
license = "License: {0}\n\n".format(LICENSE)
authors = "Authors: \n{0}\n".format(AUTHOR)
return logo() + "\n\n" + version + license + authors + dependencies
| 31.917526
| 79
| 0.614664
| true
| true
|
|
79061a8a45becdc178cac2e4723c05454bde9073
| 9,781
|
py
|
Python
|
test/pybind_test/din_fp32_2gpu.py
|
Chunshuizhao/HugeCTR
|
085b2e8ad2abaee5578e7bf43b8394d0b8473b58
|
[
"Apache-2.0"
] | null | null | null |
test/pybind_test/din_fp32_2gpu.py
|
Chunshuizhao/HugeCTR
|
085b2e8ad2abaee5578e7bf43b8394d0b8473b58
|
[
"Apache-2.0"
] | null | null | null |
test/pybind_test/din_fp32_2gpu.py
|
Chunshuizhao/HugeCTR
|
085b2e8ad2abaee5578e7bf43b8394d0b8473b58
|
[
"Apache-2.0"
] | null | null | null |
import hugectr
solver = hugectr.CreateSolver(max_eval_batches = 1,
batchsize_eval = 4096,
batchsize = 64,
lr = 0.001,
vvgpu = [[0,1]],
repeat_dataset = True,
i64_input_key = True,
use_cuda_graph = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Parquet,
source = ["./din_data/train/_file_list.txt"],
eval_source = "./din_data/valid/_file_list.txt",
check_type = hugectr.Check_t.Non,
slot_size_array = [192403, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 63001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 801])
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.000000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 0, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("UserID", 1, True, 1),
hugectr.DataReaderSparseParam("GoodID", 1, True, 11),
hugectr.DataReaderSparseParam("CateID", 1, True, 11)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 28,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_user",
bottom_name = "UserID",
optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 24,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_good",
bottom_name = "GoodID",
optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 10,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_cate",
bottom_name = "CateID",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.FusedReshapeConcat,
bottom_names = ["sparse_embedding_good", "sparse_embedding_cate"],
top_names = ["FusedReshapeConcat_item_his_em", "FusedReshapeConcat_item"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["FusedReshapeConcat_item"],
top_names = ["item1", "item2"],
ranges=[(0,36),(0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["FusedReshapeConcat_item_his_em"],
top_names = ["item_his1", "item_his2", "item_his3", "item_his4", "item_his5"],
ranges=[(0,36),(0, 36),(0, 36), (0, 36), (0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Scale,
bottom_names = ["item1"],
top_names = ["Scale_item"],
axis = 1, factor = 10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["Scale_item"],
top_names = ["Scale_item1", "Scale_item2", "Scale_item3"],
ranges=[(0,36),(0, 36),(0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Sub,
bottom_names = ["Scale_item1", "item_his1"],
top_names = ["sub_ih"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.DotProduct,
bottom_names = ["Scale_item2", "item_his2"],
top_names = ["DotProduct_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["Scale_item3", "item_his3", "sub_ih", "DotProduct_i"],
top_names = ["concat_i_h"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat_i_h"],
top_names = ["fc_att_i2"],
num_output=40))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["fc_att_i2"],
top_names = ["fc_att_i3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["fc_att_i3"],
top_names = ["reshape_score"],
leading_dim=10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Softmax,
bottom_names = ["reshape_score"],
top_names = ["softmax_att_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Scale,
bottom_names = ["softmax_att_i"],
top_names = ["Scale_i"],
axis = 0, factor = 36))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["item_his4"],
top_names = ["reshape_item_his"],
leading_dim=360))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.DotProduct,
bottom_names = ["Scale_i", "reshape_item_his"],
top_names = ["DotProduct_ih"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceSum,
bottom_names = ["DotProduct_ih"],
top_names = ["reduce_ih"],
axis = 1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["item_his5"],
top_names = ["reshape_his"],
leading_dim=36,
time_step =10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceMean,
bottom_names = ["reshape_his"],
top_names = ["reduce_item_his"],
axis = 1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["reduce_item_his"],
top_names = ["reshape_reduce_item_his"],
leading_dim=36))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding_user"],
top_names = ["reshape_user"],
leading_dim=18))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape_user", "reshape_reduce_item_his", "reduce_ih", "item2"],
top_names = ["concat_din_i"]))
# build_fcn_net
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat_din_i"],
top_names = ["fc_din_i1"],
num_output=200))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.PReLU_Dice,
bottom_names = ["fc_din_i1"],
top_names = ["dice_1"],
elu_alpha=0.2, eps=1e-8))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dice_1"],
top_names = ["fc_din_i2"],
num_output=80))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.PReLU_Dice,
bottom_names = ["fc_din_i2"],
top_names = ["dice_2"],
elu_alpha=0.2, eps=1e-8))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dice_2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.fit(max_iter = 6000, display = 1000, eval_interval = 1000, snapshot = 2000000, snapshot_prefix = "din")
model.eval()
metrics = model.get_eval_metrics()
print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iter, metrics[0][1]))
if metrics[0][1] <0.8:
raise RuntimeError("Cannot reach the AUC threshold {}".format(0.8))
sys.exit(1)
else:
print("Successfully reach the AUC threshold {}".format(metrics[0][1]))
| 58.568862
| 134
| 0.510991
|
import hugectr
solver = hugectr.CreateSolver(max_eval_batches = 1,
batchsize_eval = 4096,
batchsize = 64,
lr = 0.001,
vvgpu = [[0,1]],
repeat_dataset = True,
i64_input_key = True,
use_cuda_graph = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Parquet,
source = ["./din_data/train/_file_list.txt"],
eval_source = "./din_data/valid/_file_list.txt",
check_type = hugectr.Check_t.Non,
slot_size_array = [192403, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 63001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 801])
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.000000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 0, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("UserID", 1, True, 1),
hugectr.DataReaderSparseParam("GoodID", 1, True, 11),
hugectr.DataReaderSparseParam("CateID", 1, True, 11)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 28,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_user",
bottom_name = "UserID",
optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 24,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_good",
bottom_name = "GoodID",
optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 10,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_cate",
bottom_name = "CateID",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.FusedReshapeConcat,
bottom_names = ["sparse_embedding_good", "sparse_embedding_cate"],
top_names = ["FusedReshapeConcat_item_his_em", "FusedReshapeConcat_item"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["FusedReshapeConcat_item"],
top_names = ["item1", "item2"],
ranges=[(0,36),(0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["FusedReshapeConcat_item_his_em"],
top_names = ["item_his1", "item_his2", "item_his3", "item_his4", "item_his5"],
ranges=[(0,36),(0, 36),(0, 36), (0, 36), (0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Scale,
bottom_names = ["item1"],
top_names = ["Scale_item"],
axis = 1, factor = 10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["Scale_item"],
top_names = ["Scale_item1", "Scale_item2", "Scale_item3"],
ranges=[(0,36),(0, 36),(0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Sub,
bottom_names = ["Scale_item1", "item_his1"],
top_names = ["sub_ih"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.DotProduct,
bottom_names = ["Scale_item2", "item_his2"],
top_names = ["DotProduct_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["Scale_item3", "item_his3", "sub_ih", "DotProduct_i"],
top_names = ["concat_i_h"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat_i_h"],
top_names = ["fc_att_i2"],
num_output=40))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["fc_att_i2"],
top_names = ["fc_att_i3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["fc_att_i3"],
top_names = ["reshape_score"],
leading_dim=10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Softmax,
bottom_names = ["reshape_score"],
top_names = ["softmax_att_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Scale,
bottom_names = ["softmax_att_i"],
top_names = ["Scale_i"],
axis = 0, factor = 36))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["item_his4"],
top_names = ["reshape_item_his"],
leading_dim=360))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.DotProduct,
bottom_names = ["Scale_i", "reshape_item_his"],
top_names = ["DotProduct_ih"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceSum,
bottom_names = ["DotProduct_ih"],
top_names = ["reduce_ih"],
axis = 1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["item_his5"],
top_names = ["reshape_his"],
leading_dim=36,
time_step =10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceMean,
bottom_names = ["reshape_his"],
top_names = ["reduce_item_his"],
axis = 1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["reduce_item_his"],
top_names = ["reshape_reduce_item_his"],
leading_dim=36))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding_user"],
top_names = ["reshape_user"],
leading_dim=18))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape_user", "reshape_reduce_item_his", "reduce_ih", "item2"],
top_names = ["concat_din_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat_din_i"],
top_names = ["fc_din_i1"],
num_output=200))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.PReLU_Dice,
bottom_names = ["fc_din_i1"],
top_names = ["dice_1"],
elu_alpha=0.2, eps=1e-8))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dice_1"],
top_names = ["fc_din_i2"],
num_output=80))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.PReLU_Dice,
bottom_names = ["fc_din_i2"],
top_names = ["dice_2"],
elu_alpha=0.2, eps=1e-8))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dice_2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.fit(max_iter = 6000, display = 1000, eval_interval = 1000, snapshot = 2000000, snapshot_prefix = "din")
model.eval()
metrics = model.get_eval_metrics()
print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iter, metrics[0][1]))
if metrics[0][1] <0.8:
raise RuntimeError("Cannot reach the AUC threshold {}".format(0.8))
sys.exit(1)
else:
print("Successfully reach the AUC threshold {}".format(metrics[0][1]))
| true
| true
|
79061ab0da5df82f3ca2c2a7c29643e97fa15df4
| 1,338
|
py
|
Python
|
dataloader.py
|
manhph2211/Pytorch-Fb-Classification
|
cf5f9c0b356635020ff245c255d971e450d203fb
|
[
"MIT"
] | 1
|
2021-02-06T06:17:26.000Z
|
2021-02-06T06:17:26.000Z
|
dataloader.py
|
manhph2211/Pytorch-Fb-Classification
|
cf5f9c0b356635020ff245c255d971e450d203fb
|
[
"MIT"
] | null | null | null |
dataloader.py
|
manhph2211/Pytorch-Fb-Classification
|
cf5f9c0b356635020ff245c255d971e450d203fb
|
[
"MIT"
] | null | null | null |
import torch
import torchvision
from torchvision import transforms, utils, datasets
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from sklearn.metrics import classification_report, confusion_matrix
def makeDataSet(IMAGE_SHAPE = 300,DATA_PATH = './data_after_splitting/'):
image_transforms = {
"train": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
]),
"val": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
}
train_dataset = datasets.ImageFolder(root = DATA_PATH + "train",
transform = image_transforms["train"]
)
val_dataset = datasets.ImageFolder(root = DATA_PATH + "val",
transform = image_transforms["val"]
)
train_dataloader = DataLoader(train_dataset, batch_size=4, num_workers=2, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=4, num_workers=2, shuffle=True)
return train_dataloader,val_dataloader
| 37.166667
| 88
| 0.608371
|
import torch
import torchvision
from torchvision import transforms, utils, datasets
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from sklearn.metrics import classification_report, confusion_matrix
def makeDataSet(IMAGE_SHAPE = 300,DATA_PATH = './data_after_splitting/'):
image_transforms = {
"train": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
]),
"val": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
}
train_dataset = datasets.ImageFolder(root = DATA_PATH + "train",
transform = image_transforms["train"]
)
val_dataset = datasets.ImageFolder(root = DATA_PATH + "val",
transform = image_transforms["val"]
)
train_dataloader = DataLoader(train_dataset, batch_size=4, num_workers=2, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=4, num_workers=2, shuffle=True)
return train_dataloader,val_dataloader
| true
| true
|
79061b04e3ff23b56ce503eda27551352aefbd9c
| 645
|
py
|
Python
|
Apps/phdigitalshadows/dsapi/model/infrastructure_ssl.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 1
|
2021-01-18T16:56:55.000Z
|
2021-01-18T16:56:55.000Z
|
Apps/phdigitalshadows/dsapi/model/infrastructure_ssl.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | null | null | null |
Apps/phdigitalshadows/dsapi/model/infrastructure_ssl.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2017 Digital Shadows Ltd.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from ds_model import DSModel
class InfrastructureSSL(DSModel):
def __init__(self, id, payload):
self._id = id
self._payload = payload
@property
def id(self):
return self._id
@property
def payload(self):
return self._payload
def __str__(self):
return 'InfrastructureSSL[id={}, payload={}]'.format(self.id, self.payload)
@classmethod
def from_json(cls, json):
cast = DSModel.cast
return cls(cast(json.get('id'), long), json)
| 20.806452
| 83
| 0.634109
|
from ds_model import DSModel
class InfrastructureSSL(DSModel):
def __init__(self, id, payload):
self._id = id
self._payload = payload
@property
def id(self):
return self._id
@property
def payload(self):
return self._payload
def __str__(self):
return 'InfrastructureSSL[id={}, payload={}]'.format(self.id, self.payload)
@classmethod
def from_json(cls, json):
cast = DSModel.cast
return cls(cast(json.get('id'), long), json)
| true
| true
|
79061bf16478dfd4e21ce7aac8b9426486943061
| 589
|
py
|
Python
|
setup.py
|
jacobyxu/squirrel-and-friends
|
9fbd41953dd3b388fafa0fa963dfe6e59afef162
|
[
"MIT"
] | 2
|
2020-08-09T15:13:44.000Z
|
2020-09-04T21:44:23.000Z
|
setup.py
|
JacobXPX/squirrel-and-friends
|
9fbd41953dd3b388fafa0fa963dfe6e59afef162
|
[
"MIT"
] | 1
|
2021-11-10T19:43:40.000Z
|
2021-11-10T19:43:40.000Z
|
setup.py
|
JacobXPX/squirrel-and-friends
|
9fbd41953dd3b388fafa0fa963dfe6e59afef162
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="squirrel-and-friends",
version="0.1",
packages=find_packages(),
install_requires=[
"emoji==0.5.4", "nltk==3.5", "pyspellchecker==0.5.4",
"numerizer==0.1.5", "lightgbm==2.3.1",
"albumentations==0.5.2", "opencv-python==4.5.1.48",
"opencv-python-headless==4.5.1.48",
"torch==1.7.1", "imgaug==0.4.0",
"numpy==1.19.5", "pandas==0.25.1",
"tensorboard==2.4.1", "tensorboard-plugin-wit==1.8.0",
"tensorflow-estimator==2.4.0", "tensorflow-gpu==2.4.1"
]
)
| 32.722222
| 62
| 0.568761
|
from setuptools import setup, find_packages
setup(
name="squirrel-and-friends",
version="0.1",
packages=find_packages(),
install_requires=[
"emoji==0.5.4", "nltk==3.5", "pyspellchecker==0.5.4",
"numerizer==0.1.5", "lightgbm==2.3.1",
"albumentations==0.5.2", "opencv-python==4.5.1.48",
"opencv-python-headless==4.5.1.48",
"torch==1.7.1", "imgaug==0.4.0",
"numpy==1.19.5", "pandas==0.25.1",
"tensorboard==2.4.1", "tensorboard-plugin-wit==1.8.0",
"tensorflow-estimator==2.4.0", "tensorflow-gpu==2.4.1"
]
)
| true
| true
|
79061ccb351da6bf7c8eeede8615af4ae5543246
| 459
|
py
|
Python
|
{{cookiecutter.project_slug}}/app/api/database/execute/user_information.py
|
khanh41/fastapi-mongodb-base-project
|
3ac2f2424cf0e4e35766cfd44431e5402f845e76
|
[
"MIT"
] | 3
|
2021-11-13T04:27:34.000Z
|
2022-02-13T14:52:07.000Z
|
{{cookiecutter.project_slug}}/app/api/database/execute/user_information.py
|
dhuynguyen94/base-code-fastapi-mongodb
|
58ee6fac498597f45ecd0dae703f4ab78226ce7c
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/app/api/database/execute/user_information.py
|
dhuynguyen94/base-code-fastapi-mongodb
|
58ee6fac498597f45ecd0dae703f4ab78226ce7c
|
[
"MIT"
] | 4
|
2021-11-13T04:27:43.000Z
|
2022-01-28T19:32:16.000Z
|
from app.api.database.connect import user_information_collection
from app.api.database.execute.base_execute import BaseExecute
from app.api.helpers.convert_model2dict import user_information_helper
class ExerciseTrainerExecute(BaseExecute):
def __init__(self, data_collection, data_helper):
super().__init__(data_collection, data_helper)
user_information_execute = ExerciseTrainerExecute(user_information_collection, user_information_helper)
| 35.307692
| 103
| 0.849673
|
from app.api.database.connect import user_information_collection
from app.api.database.execute.base_execute import BaseExecute
from app.api.helpers.convert_model2dict import user_information_helper
class ExerciseTrainerExecute(BaseExecute):
def __init__(self, data_collection, data_helper):
super().__init__(data_collection, data_helper)
user_information_execute = ExerciseTrainerExecute(user_information_collection, user_information_helper)
| true
| true
|
79061cdc88fb7edb224662802b500c1aba1fad72
| 10,991
|
py
|
Python
|
CountMillionCharacter.py
|
onepseudoxy/Python
|
2c22205b10e53e7e49d6ad1ce3e12ff2190285e3
|
[
"MIT"
] | null | null | null |
CountMillionCharacter.py
|
onepseudoxy/Python
|
2c22205b10e53e7e49d6ad1ce3e12ff2190285e3
|
[
"MIT"
] | null | null | null |
CountMillionCharacter.py
|
onepseudoxy/Python
|
2c22205b10e53e7e49d6ad1ce3e12ff2190285e3
|
[
"MIT"
] | null | null | null |
"""
Simple million word count program.
main idea is Python pairs words
with the number of times
that number appears in the triple quoted string.
Credit to William J. Turkel and Adam Crymble for the word
frequency code used below. I just merged the two ideas.
"""
wordstring = '''SCENE I. Yorkshire. Gaultree Forest.
Enter the ARCHBISHOP OF YORK, MOWBRAY, LORD HASTINGS, and others
ARCHBISHOP OF YORK
What is this forest call'd?
HASTINGS
'Tis Gaultree Forest, an't shall please your grace.
ARCHBISHOP OF YORK
Here stand, my lords; and send discoverers forth
To know the numbers of our enemies.
HASTINGS
We have sent forth already.
ARCHBISHOP OF YORK
'Tis well done.
My friends and brethren in these great affairs,
I must acquaint you that I have received
New-dated letters from Northumberland;
Their cold intent, tenor and substance, thus:
Here doth he wish his person, with such powers
As might hold sortance with his quality,
The which he could not levy; whereupon
He is retired, to ripe his growing fortunes,
To Scotland: and concludes in hearty prayers
That your attempts may overlive the hazard
And fearful melting of their opposite.
MOWBRAY
Thus do the hopes we have in him touch ground
And dash themselves to pieces.
Enter a Messenger
HASTINGS
Now, what news?
Messenger
West of this forest, scarcely off a mile,
In goodly form comes on the enemy;
And, by the ground they hide, I judge their number
Upon or near the rate of thirty thousand.
MOWBRAY
The just proportion that we gave them out
Let us sway on and face them in the field.
ARCHBISHOP OF YORK
What well-appointed leader fronts us here?
Enter WESTMORELAND
MOWBRAY
I think it is my Lord of Westmoreland.
WESTMORELAND
Health and fair greeting from our general,
The prince, Lord John and Duke of Lancaster.
ARCHBISHOP OF YORK
Say on, my Lord of Westmoreland, in peace:
What doth concern your coming?
WESTMORELAND
Then, my lord,
Unto your grace do I in chief address
The substance of my speech. If that rebellion
Came like itself, in base and abject routs,
Led on by bloody youth, guarded with rags,
And countenanced by boys and beggary,
I say, if damn'd commotion so appear'd,
In his true, native and most proper shape,
You, reverend father, and these noble lords
Had not been here, to dress the ugly form
Of base and bloody insurrection
With your fair honours. You, lord archbishop,
Whose see is by a civil peace maintained,
Whose beard the silver hand of peace hath touch'd,
Whose learning and good letters peace hath tutor'd,
Whose white investments figure innocence,
The dove and very blessed spirit of peace,
Wherefore do you so ill translate ourself
Out of the speech of peace that bears such grace,
Into the harsh and boisterous tongue of war;
Turning your books to graves, your ink to blood,
Your pens to lances and your tongue divine
To a trumpet and a point of war?
ARCHBISHOP OF YORK
Wherefore do I this? so the question stands.
Briefly to this end: we are all diseased,
And with our surfeiting and wanton hours
Have brought ourselves into a burning fever,
And we must bleed for it; of which disease
Our late king, Richard, being infected, died.
But, my most noble Lord of Westmoreland,
I take not on me here as a physician,
Nor do I as an enemy to peace
Troop in the throngs of military men;
But rather show awhile like fearful war,
To diet rank minds sick of happiness
And purge the obstructions which begin to stop
Our very veins of life. Hear me more plainly.
I have in equal balance justly weigh'd
What wrongs our arms may do, what wrongs we suffer,
And find our griefs heavier than our offences.
We see which way the stream of time doth run,
And are enforced from our most quiet there
By the rough torrent of occasion;
And have the summary of all our griefs,
When time shall serve, to show in articles;
Which long ere this we offer'd to the king,
And might by no suit gain our audience:
When we are wrong'd and would unfold our griefs,
We are denied access unto his person
Even by those men that most have done us wrong.
The dangers of the days but newly gone,
Whose memory is written on the earth
With yet appearing blood, and the examples
Of every minute's instance, present now,
Hath put us in these ill-beseeming arms,
Not to break peace or any branch of it,
But to establish here a peace indeed,
Concurring both in name and quality.
WESTMORELAND
When ever yet was your appeal denied?
Wherein have you been galled by the king?
What peer hath been suborn'd to grate on you,
That you should seal this lawless bloody book
Of forged rebellion with a seal divine
And consecrate commotion's bitter edge?
ARCHBISHOP OF YORK
My brother general, the commonwealth,
To brother born an household cruelty,
I make my quarrel in particular.
WESTMORELAND
There is no need of any such redress;
Or if there were, it not belongs to you.
MOWBRAY
Why not to him in part, and to us all
That feel the bruises of the days before,
And suffer the condition of these times
To lay a heavy and unequal hand
Upon our honours?
WESTMORELAND
O, my good Lord Mowbray,
Construe the times to their necessities,
And you shall say indeed, it is the time,
And not the king, that doth you injuries.
Yet for your part, it not appears to me
Either from the king or in the present time
That you should have an inch of any ground
To build a grief on: were you not restored
To all the Duke of Norfolk's signories,
Your noble and right well remember'd father's?
MOWBRAY
What thing, in honour, had my father lost,
That need to be revived and breathed in me?
The king that loved him, as the state stood then,
Was force perforce compell'd to banish him:
And then that Harry Bolingbroke and he,
Being mounted and both roused in their seats,
Their neighing coursers daring of the spur,
Their armed staves in charge, their beavers down,
Their eyes of fire sparking through sights of steel
And the loud trumpet blowing them together,
Then, then, when there was nothing could have stay'd
My father from the breast of Bolingbroke,
O when the king did throw his warder down,
His own life hung upon the staff he threw;
Then threw he down himself and all their lives
That by indictment and by dint of sword
Have since miscarried under Bolingbroke.
WESTMORELAND
You speak, Lord Mowbray, now you know not what.
The Earl of Hereford was reputed then
In England the most valiant gentlemen:
Who knows on whom fortune would then have smiled?
But if your father had been victor there,
He ne'er had borne it out of Coventry:
For all the country in a general voice
Cried hate upon him; and all their prayers and love
Were set on Hereford, whom they doted on
And bless'd and graced indeed, more than the king.
But this is mere digression from my purpose.
Here come I from our princely general
To know your griefs; to tell you from his grace
That he will give you audience; and wherein
It shall appear that your demands are just,
You shall enjoy them, every thing set off
That might so much as think you enemies.
MOWBRAY
But he hath forced us to compel this offer;
And it proceeds from policy, not love.
WESTMORELAND
Mowbray, you overween to take it so;
This offer comes from mercy, not from fear:
For, lo! within a ken our army lies,
Upon mine honour, all too confident
To give admittance to a thought of fear.
Our battle is more full of names than yours,
Our men more perfect in the use of arms,
Our armour all as strong, our cause the best;
Then reason will our heart should be as good
Say you not then our offer is compell'd.
MOWBRAY
Well, by my will we shall admit no parley.
WESTMORELAND
That argues but the shame of your offence:
A rotten case abides no handling.
HASTINGS
Hath the Prince John a full commission,
In very ample virtue of his father,
To hear and absolutely to determine
Of what conditions we shall stand upon?
WESTMORELAND
That is intended in the general's name:
I muse you make so slight a question.
ARCHBISHOP OF YORK
Then take, my Lord of Westmoreland, this schedule,
For this contains our general grievances:
Each several article herein redress'd,
All members of our cause, both here and hence,
That are insinew'd to this action,
Acquitted by a true substantial form
And present execution of our wills
To us and to our purposes confined,
We come within our awful banks again
And knit our powers to the arm of peace.
WESTMORELAND
This will I show the general. Please you, lords,
In sight of both our battles we may meet;
And either end in peace, which God so frame!
Or to the place of difference call the swords
Which must decide it.
ARCHBISHOP OF YORK
My lord, we will do so.
Exit WESTMORELAND
MOWBRAY
There is a thing within my bosom tells me
That no conditions of our peace can stand.
HASTINGS
Fear you not that: if we can make our peace
Upon such large terms and so absolute
As our conditions shall consist upon,
Our peace shall stand as firm as rocky mountains.
MOWBRAY
Yea, but our valuation shall be such
That every slight and false-derived cause,
Yea, every idle, nice and wanton reason
Shall to the king taste of this action;
That, were our royal faiths martyrs in love,
We shall be winnow'd with so rough a wind
That even our corn shall seem as light as chaff
And good from bad find no partition.
ARCHBISHOP OF YORK
No, no, my lord. Note this; the king is weary
Of dainty and such picking grievances:
For he hath found to end one doubt by death
Revives two greater in the heirs of life,
And therefore will he wipe his tables clean
And keep no tell-tale to his memory
That may repeat and history his loss
To new remembrance; for full well he knows
He cannot so precisely weed this land
As his misdoubts present occasion:
His foes are so enrooted with his friends
That, plucking to unfix an enemy,
He doth unfasten so and shake a friend:
So that this land, like an offensive wife
That hath enraged him on to offer strokes,
As he is striking, holds his infant up
And hangs resolved correction in the arm
That was uprear'd to execution.
HASTINGS
Besides, the king hath wasted all his rods
On late offenders, that he now doth lack
The very instruments of chastisement:
So that his power, like to a fangless lion,
May offer, but not hold.
ARCHBISHOP OF YORK
'Tis very true:
And therefore be assured, my good lord marshal,
If we do now make our atonement well,
Our peace will, like a broken limb united,
Grow stronger for the breaking.
MOWBRAY
Be it so.
Here is return'd my Lord of Westmoreland.
Re-enter WESTMORELAND
WESTMORELAND
The prince is here at hand: pleaseth your lordship
To meet his grace just distance 'tween our armies.
MOWBRAY
Your grace of York, in God's name then, set forward.
ARCHBISHOP OF YORK
Before, and greet his grace: my lord, we come.
Exeunt'''
wordlist = wordstring.split()
wordfreq = [wordlist.count(w) for w in wordlist]
print("String\n {} \n".format(wordstring))
print("List\n {} \n".format(str(wordlist)))
print("Frequencies\n {} \n".format(str(wordfreq)))
print("Pairs\n {}".format(str(dict(zip(wordlist, wordfreq)))))
print("Edit I made to show how to pull from IntellijIdea")
print("Adding my two cents here")
| 36.154605
| 64
| 0.786189
|
wordstring = '''SCENE I. Yorkshire. Gaultree Forest.
Enter the ARCHBISHOP OF YORK, MOWBRAY, LORD HASTINGS, and others
ARCHBISHOP OF YORK
What is this forest call'd?
HASTINGS
'Tis Gaultree Forest, an't shall please your grace.
ARCHBISHOP OF YORK
Here stand, my lords; and send discoverers forth
To know the numbers of our enemies.
HASTINGS
We have sent forth already.
ARCHBISHOP OF YORK
'Tis well done.
My friends and brethren in these great affairs,
I must acquaint you that I have received
New-dated letters from Northumberland;
Their cold intent, tenor and substance, thus:
Here doth he wish his person, with such powers
As might hold sortance with his quality,
The which he could not levy; whereupon
He is retired, to ripe his growing fortunes,
To Scotland: and concludes in hearty prayers
That your attempts may overlive the hazard
And fearful melting of their opposite.
MOWBRAY
Thus do the hopes we have in him touch ground
And dash themselves to pieces.
Enter a Messenger
HASTINGS
Now, what news?
Messenger
West of this forest, scarcely off a mile,
In goodly form comes on the enemy;
And, by the ground they hide, I judge their number
Upon or near the rate of thirty thousand.
MOWBRAY
The just proportion that we gave them out
Let us sway on and face them in the field.
ARCHBISHOP OF YORK
What well-appointed leader fronts us here?
Enter WESTMORELAND
MOWBRAY
I think it is my Lord of Westmoreland.
WESTMORELAND
Health and fair greeting from our general,
The prince, Lord John and Duke of Lancaster.
ARCHBISHOP OF YORK
Say on, my Lord of Westmoreland, in peace:
What doth concern your coming?
WESTMORELAND
Then, my lord,
Unto your grace do I in chief address
The substance of my speech. If that rebellion
Came like itself, in base and abject routs,
Led on by bloody youth, guarded with rags,
And countenanced by boys and beggary,
I say, if damn'd commotion so appear'd,
In his true, native and most proper shape,
You, reverend father, and these noble lords
Had not been here, to dress the ugly form
Of base and bloody insurrection
With your fair honours. You, lord archbishop,
Whose see is by a civil peace maintained,
Whose beard the silver hand of peace hath touch'd,
Whose learning and good letters peace hath tutor'd,
Whose white investments figure innocence,
The dove and very blessed spirit of peace,
Wherefore do you so ill translate ourself
Out of the speech of peace that bears such grace,
Into the harsh and boisterous tongue of war;
Turning your books to graves, your ink to blood,
Your pens to lances and your tongue divine
To a trumpet and a point of war?
ARCHBISHOP OF YORK
Wherefore do I this? so the question stands.
Briefly to this end: we are all diseased,
And with our surfeiting and wanton hours
Have brought ourselves into a burning fever,
And we must bleed for it; of which disease
Our late king, Richard, being infected, died.
But, my most noble Lord of Westmoreland,
I take not on me here as a physician,
Nor do I as an enemy to peace
Troop in the throngs of military men;
But rather show awhile like fearful war,
To diet rank minds sick of happiness
And purge the obstructions which begin to stop
Our very veins of life. Hear me more plainly.
I have in equal balance justly weigh'd
What wrongs our arms may do, what wrongs we suffer,
And find our griefs heavier than our offences.
We see which way the stream of time doth run,
And are enforced from our most quiet there
By the rough torrent of occasion;
And have the summary of all our griefs,
When time shall serve, to show in articles;
Which long ere this we offer'd to the king,
And might by no suit gain our audience:
When we are wrong'd and would unfold our griefs,
We are denied access unto his person
Even by those men that most have done us wrong.
The dangers of the days but newly gone,
Whose memory is written on the earth
With yet appearing blood, and the examples
Of every minute's instance, present now,
Hath put us in these ill-beseeming arms,
Not to break peace or any branch of it,
But to establish here a peace indeed,
Concurring both in name and quality.
WESTMORELAND
When ever yet was your appeal denied?
Wherein have you been galled by the king?
What peer hath been suborn'd to grate on you,
That you should seal this lawless bloody book
Of forged rebellion with a seal divine
And consecrate commotion's bitter edge?
ARCHBISHOP OF YORK
My brother general, the commonwealth,
To brother born an household cruelty,
I make my quarrel in particular.
WESTMORELAND
There is no need of any such redress;
Or if there were, it not belongs to you.
MOWBRAY
Why not to him in part, and to us all
That feel the bruises of the days before,
And suffer the condition of these times
To lay a heavy and unequal hand
Upon our honours?
WESTMORELAND
O, my good Lord Mowbray,
Construe the times to their necessities,
And you shall say indeed, it is the time,
And not the king, that doth you injuries.
Yet for your part, it not appears to me
Either from the king or in the present time
That you should have an inch of any ground
To build a grief on: were you not restored
To all the Duke of Norfolk's signories,
Your noble and right well remember'd father's?
MOWBRAY
What thing, in honour, had my father lost,
That need to be revived and breathed in me?
The king that loved him, as the state stood then,
Was force perforce compell'd to banish him:
And then that Harry Bolingbroke and he,
Being mounted and both roused in their seats,
Their neighing coursers daring of the spur,
Their armed staves in charge, their beavers down,
Their eyes of fire sparking through sights of steel
And the loud trumpet blowing them together,
Then, then, when there was nothing could have stay'd
My father from the breast of Bolingbroke,
O when the king did throw his warder down,
His own life hung upon the staff he threw;
Then threw he down himself and all their lives
That by indictment and by dint of sword
Have since miscarried under Bolingbroke.
WESTMORELAND
You speak, Lord Mowbray, now you know not what.
The Earl of Hereford was reputed then
In England the most valiant gentlemen:
Who knows on whom fortune would then have smiled?
But if your father had been victor there,
He ne'er had borne it out of Coventry:
For all the country in a general voice
Cried hate upon him; and all their prayers and love
Were set on Hereford, whom they doted on
And bless'd and graced indeed, more than the king.
But this is mere digression from my purpose.
Here come I from our princely general
To know your griefs; to tell you from his grace
That he will give you audience; and wherein
It shall appear that your demands are just,
You shall enjoy them, every thing set off
That might so much as think you enemies.
MOWBRAY
But he hath forced us to compel this offer;
And it proceeds from policy, not love.
WESTMORELAND
Mowbray, you overween to take it so;
This offer comes from mercy, not from fear:
For, lo! within a ken our army lies,
Upon mine honour, all too confident
To give admittance to a thought of fear.
Our battle is more full of names than yours,
Our men more perfect in the use of arms,
Our armour all as strong, our cause the best;
Then reason will our heart should be as good
Say you not then our offer is compell'd.
MOWBRAY
Well, by my will we shall admit no parley.
WESTMORELAND
That argues but the shame of your offence:
A rotten case abides no handling.
HASTINGS
Hath the Prince John a full commission,
In very ample virtue of his father,
To hear and absolutely to determine
Of what conditions we shall stand upon?
WESTMORELAND
That is intended in the general's name:
I muse you make so slight a question.
ARCHBISHOP OF YORK
Then take, my Lord of Westmoreland, this schedule,
For this contains our general grievances:
Each several article herein redress'd,
All members of our cause, both here and hence,
That are insinew'd to this action,
Acquitted by a true substantial form
And present execution of our wills
To us and to our purposes confined,
We come within our awful banks again
And knit our powers to the arm of peace.
WESTMORELAND
This will I show the general. Please you, lords,
In sight of both our battles we may meet;
And either end in peace, which God so frame!
Or to the place of difference call the swords
Which must decide it.
ARCHBISHOP OF YORK
My lord, we will do so.
Exit WESTMORELAND
MOWBRAY
There is a thing within my bosom tells me
That no conditions of our peace can stand.
HASTINGS
Fear you not that: if we can make our peace
Upon such large terms and so absolute
As our conditions shall consist upon,
Our peace shall stand as firm as rocky mountains.
MOWBRAY
Yea, but our valuation shall be such
That every slight and false-derived cause,
Yea, every idle, nice and wanton reason
Shall to the king taste of this action;
That, were our royal faiths martyrs in love,
We shall be winnow'd with so rough a wind
That even our corn shall seem as light as chaff
And good from bad find no partition.
ARCHBISHOP OF YORK
No, no, my lord. Note this; the king is weary
Of dainty and such picking grievances:
For he hath found to end one doubt by death
Revives two greater in the heirs of life,
And therefore will he wipe his tables clean
And keep no tell-tale to his memory
That may repeat and history his loss
To new remembrance; for full well he knows
He cannot so precisely weed this land
As his misdoubts present occasion:
His foes are so enrooted with his friends
That, plucking to unfix an enemy,
He doth unfasten so and shake a friend:
So that this land, like an offensive wife
That hath enraged him on to offer strokes,
As he is striking, holds his infant up
And hangs resolved correction in the arm
That was uprear'd to execution.
HASTINGS
Besides, the king hath wasted all his rods
On late offenders, that he now doth lack
The very instruments of chastisement:
So that his power, like to a fangless lion,
May offer, but not hold.
ARCHBISHOP OF YORK
'Tis very true:
And therefore be assured, my good lord marshal,
If we do now make our atonement well,
Our peace will, like a broken limb united,
Grow stronger for the breaking.
MOWBRAY
Be it so.
Here is return'd my Lord of Westmoreland.
Re-enter WESTMORELAND
WESTMORELAND
The prince is here at hand: pleaseth your lordship
To meet his grace just distance 'tween our armies.
MOWBRAY
Your grace of York, in God's name then, set forward.
ARCHBISHOP OF YORK
Before, and greet his grace: my lord, we come.
Exeunt'''
wordlist = wordstring.split()
wordfreq = [wordlist.count(w) for w in wordlist]
print("String\n {} \n".format(wordstring))
print("List\n {} \n".format(str(wordlist)))
print("Frequencies\n {} \n".format(str(wordfreq)))
print("Pairs\n {}".format(str(dict(zip(wordlist, wordfreq)))))
print("Edit I made to show how to pull from IntellijIdea")
print("Adding my two cents here")
| true
| true
|
79061d383cf4c2a7f5a62388e67973f2bc64b30b
| 215
|
py
|
Python
|
frappe/patches/v7_0/rename_newsletter_list_to_email_group.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 5
|
2017-09-12T15:56:31.000Z
|
2022-03-09T13:50:21.000Z
|
frappe/patches/v7_0/rename_newsletter_list_to_email_group.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 212
|
2017-08-16T13:03:18.000Z
|
2020-10-06T12:26:21.000Z
|
frappe/patches/v7_0/rename_newsletter_list_to_email_group.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 14
|
2020-11-04T11:22:44.000Z
|
2022-02-01T20:59:37.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.rename_doc('DocType', 'Newsletter List', 'Email Group')
frappe.rename_doc('DocType', 'Newsletter List Subscriber', 'Email Group Member')
| 35.833333
| 81
| 0.781395
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.rename_doc('DocType', 'Newsletter List', 'Email Group')
frappe.rename_doc('DocType', 'Newsletter List Subscriber', 'Email Group Member')
| true
| true
|
79061d639c6464c7a7fdf5d79ff1a55b2471022c
| 2,691
|
py
|
Python
|
openpyxl/pivot/tests/test_record.py
|
hfutxqd/openpyxl
|
50d6e37e0592aac63bc1ffeaf7b13e3b863bb066
|
[
"MIT"
] | null | null | null |
openpyxl/pivot/tests/test_record.py
|
hfutxqd/openpyxl
|
50d6e37e0592aac63bc1ffeaf7b13e3b863bb066
|
[
"MIT"
] | null | null | null |
openpyxl/pivot/tests/test_record.py
|
hfutxqd/openpyxl
|
50d6e37e0592aac63bc1ffeaf7b13e3b863bb066
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010-2019 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.packaging.manifest import Manifest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
| 23.814159
| 92
| 0.528056
|
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.packaging.manifest import Manifest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
| true
| true
|
79061e1cfc2a540f18c2cab349a182091fd21bb6
| 1,686
|
py
|
Python
|
lib/django-1.4/django/contrib/gis/tests/geoapp/models.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-1.4/django/contrib/gis/tests/geoapp/models.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-1.4/django/contrib/gis/tests/geoapp/models.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __unicode__(self): return self.name
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __unicode__(self): return self.name
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class Truth(models.Model):
val = models.BooleanField()
objects = models.GeoManager()
if not spatialite:
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| 33.058824
| 79
| 0.716489
|
from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
null_flag = not mysql
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __unicode__(self): return self.name
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __unicode__(self): return self.name
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class Truth(models.Model):
val = models.BooleanField()
objects = models.GeoManager()
if not spatialite:
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| true
| true
|
79061ee7a253f642eccc4cffd1d7cb39c45bf4ae
| 2,056
|
py
|
Python
|
syft/execution/translation/torchscript.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | null | null | null |
syft/execution/translation/torchscript.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | null | null | null |
syft/execution/translation/torchscript.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 1
|
2021-09-04T16:27:41.000Z
|
2021-09-04T16:27:41.000Z
|
from torch import jit
from syft.execution.placeholder import PlaceHolder
from syft.execution.translation.abstract import AbstractPlanTranslator
class PlanTranslatorTorchscript(AbstractPlanTranslator):
"""Performs translation from 'list of ops' Plan into torchscript Plan"""
def __init__(self, plan):
super().__init__(plan)
def translate(self):
translation_plan = self.plan.copy()
translation_plan.forward = None
args = translation_plan.create_dummy_args()
# jit.trace clones input args and can change their type, so we have to skip types check
# TODO see if type check can be made less strict,
# e.g. tensor/custom tensor/nn.Parameter could be considered same type
translation_plan.validate_input_types = False
# To avoid storing Plan state tensors in torchscript, they will be sent as parameters
# we trace wrapper func, which accepts state parameters as last arg
# and sets them into the Plan before executing the Plan
def wrap_stateful_plan(*args):
role = translation_plan.role
state = args[-1]
if 0 < len(role.state.state_placeholders) == len(state) and isinstance(
state, (list, tuple)
):
state_placeholders = tuple(
role.placeholders[ph.id.value] for ph in role.state.state_placeholders
)
PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)
PlaceHolder.instantiate_placeholders(state_placeholders, state)
return translation_plan(*args[:-1])
plan_params = translation_plan.parameters()
if len(plan_params) > 0:
torchscript_plan = jit.trace(wrap_stateful_plan, (*args, plan_params))
else:
torchscript_plan = jit.trace(translation_plan, args)
self.plan.torchscript = torchscript_plan
return self.plan
def remove(self):
self.plan.torchscript = None
return self.plan
| 38.792453
| 95
| 0.664397
|
from torch import jit
from syft.execution.placeholder import PlaceHolder
from syft.execution.translation.abstract import AbstractPlanTranslator
class PlanTranslatorTorchscript(AbstractPlanTranslator):
def __init__(self, plan):
super().__init__(plan)
def translate(self):
translation_plan = self.plan.copy()
translation_plan.forward = None
args = translation_plan.create_dummy_args()
translation_plan.validate_input_types = False
def wrap_stateful_plan(*args):
role = translation_plan.role
state = args[-1]
if 0 < len(role.state.state_placeholders) == len(state) and isinstance(
state, (list, tuple)
):
state_placeholders = tuple(
role.placeholders[ph.id.value] for ph in role.state.state_placeholders
)
PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)
PlaceHolder.instantiate_placeholders(state_placeholders, state)
return translation_plan(*args[:-1])
plan_params = translation_plan.parameters()
if len(plan_params) > 0:
torchscript_plan = jit.trace(wrap_stateful_plan, (*args, plan_params))
else:
torchscript_plan = jit.trace(translation_plan, args)
self.plan.torchscript = torchscript_plan
return self.plan
def remove(self):
self.plan.torchscript = None
return self.plan
| true
| true
|
79061fa2e93a400e914d6565ebe19b7e30f0efe1
| 9,885
|
py
|
Python
|
test/fuzz/test_runner.py
|
BlockMechanic/crown
|
e6b1873ca79c484a3621e503eb8ce464f85dd2c7
|
[
"MIT"
] | 1
|
2021-10-12T05:27:56.000Z
|
2021-10-12T05:27:56.000Z
|
test/fuzz/test_runner.py
|
BlockMechanic/crown
|
e6b1873ca79c484a3621e503eb8ce464f85dd2c7
|
[
"MIT"
] | 15
|
2022-01-14T09:13:52.000Z
|
2022-03-21T09:40:29.000Z
|
test/fuzz/test_runner.py
|
BlockMechanic/crown
|
e6b1873ca79c484a3621e503eb8ce464f85dd2c7
|
[
"MIT"
] | 2
|
2021-10-12T05:39:32.000Z
|
2022-01-03T10:41:04.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Crown Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run fuzz test targets.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
import argparse
import configparser
import logging
import os
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Run the fuzz targets with all inputs from the seed_dir once.''',
)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'--par',
'-j',
type=int,
default=4,
help='How many targets to merge or execute in parallel.',
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
parser.add_argument(
'--m_dir',
help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
)
parser.add_argument(
'-g',
'--generate',
action='store_true',
help='Create new corpus seeds (or extend the existing ones) by running'
' the given targets for a finite number of times. Outputs them to'
' the passed seed_dir.'
)
args = parser.parse_args()
# Set up logging
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
# Build list of tests
test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
if not args.generate:
test_list_seedless = []
for t in test_list_selection:
corpus_path = os.path.join(args.seed_dir, t)
if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
test_list_seedless.append(t)
test_list_seedless.sort()
if test_list_seedless:
logging.info(
"Fuzzing harnesses lacking a seed corpus: {}".format(
" ".join(test_list_seedless)
)
)
logging.info("Please consider adding a fuzz seed corpus at https://github.com/crown-core/qa-assets")
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
if args.generate:
return generate_corpus_seeds(
fuzz_pool=fuzz_pool,
build_dir=config["environment"]["BUILDDIR"],
seed_dir=args.seed_dir,
targets=test_list_selection,
)
if args.m_dir:
merge_inputs(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
merge_dir=args.m_dir,
)
return
run_once(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
use_valgrind=args.valgrind,
)
def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
"""Generates new corpus seeds.
Run {targets} without input, and outputs the generated corpus seeds to
{seed_dir}.
"""
logging.info("Generating corpus seeds to {}".format(seed_dir))
def job(command):
logging.debug("Running '{}'\n".format(" ".join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(
' '.join(command),
subprocess.run(command, check=True, stderr=subprocess.PIPE,
universal_newlines=True).stderr
))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [
os.path.join(build_dir, "src", "test", "fuzz", target),
"-runs=100000",
target_seed_dir,
]
futures.append(fuzz_pool.submit(job, command))
for future in as_completed(futures):
future.result()
def merge_inputs(*, fuzz_pool, corpus, test_list, build_dir, merge_dir):
logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
jobs = []
for t in test_list:
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-merge=1',
'-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
os.path.join(corpus, t),
os.path.join(merge_dir, t),
]
os.makedirs(os.path.join(corpus, t), exist_ok=True)
os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
def job(t, args):
output = 'Run {} with args {}\n'.format(t, " ".join(args))
output += subprocess.run(args, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr
logging.debug(output)
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
future.result()
def run_once(*, fuzz_pool, corpus, test_list, build_dir, use_valgrind):
jobs = []
for t in test_list:
corpus_path = os.path.join(corpus, t)
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
def job(t, args):
output = 'Run {} with args {}'.format(t, args)
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output += result.stderr
return output, result
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
output, result = future.result()
logging.debug(output)
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}".format(" ".join(result.args), e.returncode))
sys.exit(1)
def parse_test_list(makefile):
with open(makefile, encoding='utf-8') as makefile_test:
test_list_all = []
read_targets = False
for line in makefile_test.readlines():
line = line.strip().replace('test/fuzz/', '').replace(' \\', '')
if read_targets:
if not line:
break
test_list_all.append(line)
continue
if line == 'FUZZ_TARGETS =':
read_targets = True
return test_list_all
if __name__ == '__main__':
main()
| 35.053191
| 180
| 0.600202
|
from concurrent.futures import ThreadPoolExecutor, as_completed
import argparse
import configparser
import logging
import os
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Run the fuzz targets with all inputs from the seed_dir once.''',
)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'--par',
'-j',
type=int,
default=4,
help='How many targets to merge or execute in parallel.',
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
parser.add_argument(
'--m_dir',
help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
)
parser.add_argument(
'-g',
'--generate',
action='store_true',
help='Create new corpus seeds (or extend the existing ones) by running'
' the given targets for a finite number of times. Outputs them to'
' the passed seed_dir.'
)
args = parser.parse_args()
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
if not args.generate:
test_list_seedless = []
for t in test_list_selection:
corpus_path = os.path.join(args.seed_dir, t)
if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
test_list_seedless.append(t)
test_list_seedless.sort()
if test_list_seedless:
logging.info(
"Fuzzing harnesses lacking a seed corpus: {}".format(
" ".join(test_list_seedless)
)
)
logging.info("Please consider adding a fuzz seed corpus at https://github.com/crown-core/qa-assets")
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
if args.generate:
return generate_corpus_seeds(
fuzz_pool=fuzz_pool,
build_dir=config["environment"]["BUILDDIR"],
seed_dir=args.seed_dir,
targets=test_list_selection,
)
if args.m_dir:
merge_inputs(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
merge_dir=args.m_dir,
)
return
run_once(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
use_valgrind=args.valgrind,
)
def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
logging.info("Generating corpus seeds to {}".format(seed_dir))
def job(command):
logging.debug("Running '{}'\n".format(" ".join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(
' '.join(command),
subprocess.run(command, check=True, stderr=subprocess.PIPE,
universal_newlines=True).stderr
))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [
os.path.join(build_dir, "src", "test", "fuzz", target),
"-runs=100000",
target_seed_dir,
]
futures.append(fuzz_pool.submit(job, command))
for future in as_completed(futures):
future.result()
def merge_inputs(*, fuzz_pool, corpus, test_list, build_dir, merge_dir):
logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
jobs = []
for t in test_list:
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-merge=1',
'-use_value_profile=1', in(corpus, t),
os.path.join(merge_dir, t),
]
os.makedirs(os.path.join(corpus, t), exist_ok=True)
os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
def job(t, args):
output = 'Run {} with args {}\n'.format(t, " ".join(args))
output += subprocess.run(args, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr
logging.debug(output)
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
future.result()
def run_once(*, fuzz_pool, corpus, test_list, build_dir, use_valgrind):
jobs = []
for t in test_list:
corpus_path = os.path.join(corpus, t)
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
def job(t, args):
output = 'Run {} with args {}'.format(t, args)
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output += result.stderr
return output, result
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
output, result = future.result()
logging.debug(output)
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}".format(" ".join(result.args), e.returncode))
sys.exit(1)
def parse_test_list(makefile):
with open(makefile, encoding='utf-8') as makefile_test:
test_list_all = []
read_targets = False
for line in makefile_test.readlines():
line = line.strip().replace('test/fuzz/', '').replace(' \\', '')
if read_targets:
if not line:
break
test_list_all.append(line)
continue
if line == 'FUZZ_TARGETS =':
read_targets = True
return test_list_all
if __name__ == '__main__':
main()
| true
| true
|
7906217d435e300f49b2b6ec9acfa86053ad1df5
| 89
|
py
|
Python
|
ubfcore/apps.py
|
himasnhu1/example
|
27db7941c5f7bd16ffb407654818012e43d82f7e
|
[
"MIT"
] | null | null | null |
ubfcore/apps.py
|
himasnhu1/example
|
27db7941c5f7bd16ffb407654818012e43d82f7e
|
[
"MIT"
] | null | null | null |
ubfcore/apps.py
|
himasnhu1/example
|
27db7941c5f7bd16ffb407654818012e43d82f7e
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class UbfCoreConfig(AppConfig):
name = 'ubfcore'
| 14.833333
| 33
| 0.752809
|
from django.apps import AppConfig
class UbfCoreConfig(AppConfig):
name = 'ubfcore'
| true
| true
|
790621cd53ec2386a1f3b9413a40673274cc76fd
| 2,184
|
py
|
Python
|
usr/examples/09-Feature-Detection/keypoints.py
|
ermay12/openmv
|
ed1cd12026b8bd7363b835f5c1b90e5d3d710151
|
[
"MIT"
] | 1
|
2018-02-27T09:23:51.000Z
|
2018-02-27T09:23:51.000Z
|
usr/examples/09-Feature-Detection/keypoints.py
|
guohuijiang1234/openmv
|
9c3e9109ec1a2b68bb34107557945bfa379d3a0e
|
[
"MIT"
] | null | null | null |
usr/examples/09-Feature-Detection/keypoints.py
|
guohuijiang1234/openmv
|
9c3e9109ec1a2b68bb34107557945bfa379d3a0e
|
[
"MIT"
] | null | null | null |
# Object tracking with keypoints example.
# Show the camera an object and then run the script. A set of keypoints will be extracted
# once and then tracked in the following frames. If you want a new set of keypoints re-run
# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
import sensor, time, image
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)
def draw_keypoints(img, kpts):
print(kpts)
img.draw_keypoints(kpts)
img = sensor.snapshot()
time.sleep(1000)
kpts1 = None
# NOTE: uncomment to load a keypoints descriptor from file
#kpts1 = image.load_descriptor("/desc.orb")
#img = sensor.snapshot()
#draw_keypoints(img, kpts1)
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
if (kpts1 == None):
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
draw_keypoints(img, kpts1)
else:
# NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
# keypoints from the first scale only, which will match one of the scales in the first descriptor.
kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True)
if (kpts2):
match = image.match_descriptor(kpts1, kpts2, threshold=85)
if (match.count()>10):
# If we have at least n "good matches"
# Draw bounding rectangle and cross.
img.draw_rectangle(match.rect())
img.draw_cross(match.cx(), match.cy(), size=10)
print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta()))
# NOTE: uncomment if you want to draw the keypoints
#img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
| 37.655172
| 106
| 0.687729
|
import sensor, time, image
sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)
def draw_keypoints(img, kpts):
print(kpts)
img.draw_keypoints(kpts)
img = sensor.snapshot()
time.sleep(1000)
kpts1 = None
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
if (kpts1 == None):
kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
draw_keypoints(img, kpts1)
else:
kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True)
if (kpts2):
match = image.match_descriptor(kpts1, kpts2, threshold=85)
if (match.count()>10):
img.draw_rectangle(match.rect())
img.draw_cross(match.cx(), match.cy(), size=10)
print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta()))
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
| true
| true
|
7906251dbf3c4f92a779bbac39f599cf597effec
| 17,720
|
py
|
Python
|
mne/tests/test_report.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
mne/tests/test_report.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 23
|
2017-09-12T11:08:26.000Z
|
2019-10-04T11:11:29.000Z
|
mne/tests/test_report.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3
|
2019-01-28T13:48:00.000Z
|
2019-07-10T16:02:11.000Z
|
# -*- coding: utf-8 -*-
# Authors: Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import copy
import glob
import os
import os.path as op
import shutil
import numpy as np
from numpy.testing import assert_equal
import pytest
from matplotlib import pyplot as plt
from mne import Epochs, read_events, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report, open_report, _ReportScraper
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel, Bunch,
run_tests_if_main, traits_test, requires_h5py)
from mne.viz import plot_alignment
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
def _get_example_figures():
"""Create two example figures."""
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
"""Test rendering -*.fif files for mne report."""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[ms_fname, ms_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
# Speed it up by picking channels
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121'])
raw.del_proj()
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
epochs.save(epochs_fname, overwrite=True)
# This can take forever (stall Travis), so let's make it fast
# Also, make sure crop range is wide enough to avoid rendering bug
epochs.average().crop(0.1, 0.2).save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert (op.isfile(fname))
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert '(MaxShield on)' in html
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert (op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert (op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert (repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
# SVG rendering
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
image_format='svg')
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
# ndarray support smoke test
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section')
@testing.requires_testing_data
def test_report_raw_psd_and_date():
"""Test report raw PSD and DATE_NONE functionality."""
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'PSD' in ''.join(report.html)
assert 'GMT' in ''.join(report.html)
# DATE_NONE functionality
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' not in ''.join(report.html)
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
"""Test adding figures/images to section."""
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert (repr(report))
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report."""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo',
subjects_dir=subjects_dir, decim=30)
report.save(op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report."""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report."""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert (html in html_compare)
assert (repr(report))
def test_add_slider_to_section():
"""Test adding a slider with a series of images to mne report."""
tempdir = _TempDir()
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert report.fnames[0] == 'my title-#-report_slider_section-#-custom'
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
# need at least 2
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
# Smoke test that SVG w/unicode can be added
report = Report()
fig, ax = plt.subplots()
ax.set_xlabel(u'μ')
report.add_slider_to_section([fig] * 2, image_format='svg')
def test_validate_input():
"""Test Report input validation."""
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
@requires_h5py
def test_open_report():
"""Test the open_report function."""
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
# Test creating a new report through the open_report function
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert report.subjects_dir == subjects_dir
assert report._fname == hdf5
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
# Exiting the context block should have triggered saving to HDF5
assert op.exists(hdf5)
# Load the HDF5 version of the report and check equivalence
report2 = open_report(hdf5)
assert report2._fname == hdf5
assert report2.subjects_dir == report.subjects_dir
assert report2.html == report.html
assert report2.__getstate__() == report.__getstate__()
assert '_fname' not in report2.__getstate__()
# Check parameters when loading a report
pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir) # This should work
# Check that the context manager doesn't swallow exceptions
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
1 / 0
def test_remove():
"""Test removing figures from a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1',
section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
# Test removal by caption
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert removed_index == 2
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[1]
assert r2.html[2] == r.html[3]
# Test restricting to section
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert removed_index == 1
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[2]
assert r2.html[2] == r.html[3]
# Test removal of empty sections
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert r2.sections == ['mysection']
assert r2._sectionvars == {'mysection': 'report_mysection'}
def test_add_or_replace():
"""Test replacing existing figures in a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
# By default, replace=False, so all figures should be there
assert len(r.html) == 4
old_r = copy.deepcopy(r)
# Re-add fig1 with replace=True, it should overwrite the last occurrence of
# fig1 in section 'mysection'.
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert len(r.html) == 4
assert r.html[1] != old_r.html[1] # This figure should have changed
# All other figures should be the same
assert r.html[0] == old_r.html[0]
assert r.html[2] == old_r.html[2]
assert r.html[3] == old_r.html[3]
def test_scraper(tmpdir):
"""Test report scraping."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
# Mock a Sphinx + sphinx_gallery config
app = Bunch(builder=Bunch(srcdir=str(tmpdir),
outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',
'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]),
example_globals=dict(a=1), target_file=target_file)
# Nothing yet
block = None
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
# Still nothing
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
# Once it's saved, add it
assert rst == ''
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert not op.isfile(out_html)
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert rst.count('"') == 6
assert "<iframe" in rst
assert op.isfile(img_fname.replace('png', 'svg'))
run_tests_if_main()
| 39.116998
| 79
| 0.66772
|
import copy
import glob
import os
import os.path as op
import shutil
import numpy as np
from numpy.testing import assert_equal
import pytest
from matplotlib import pyplot as plt
from mne import Epochs, read_events, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report, open_report, _ReportScraper
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel, Bunch,
run_tests_if_main, traits_test, requires_h5py)
from mne.viz import plot_alignment
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
def _get_example_figures():
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[ms_fname, ms_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121'])
raw.del_proj()
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
epochs.save(epochs_fname, overwrite=True)
# Also, make sure crop range is wide enough to avoid rendering bug
epochs.average().crop(0.1, 0.2).save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert (op.isfile(fname))
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert '(MaxShield on)' in html
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert (op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert (op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert (repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
# SVG rendering
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
image_format='svg')
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
# ndarray support smoke test
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section')
@testing.requires_testing_data
def test_report_raw_psd_and_date():
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'PSD' in ''.join(report.html)
assert 'GMT' in ''.join(report.html)
# DATE_NONE functionality
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' not in ''.join(report.html)
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert (repr(report))
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo',
subjects_dir=subjects_dir, decim=30)
report.save(op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert (html in html_compare)
assert (repr(report))
def test_add_slider_to_section():
tempdir = _TempDir()
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert report.fnames[0] == 'my title-rt.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
# need at least 2
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
# Smoke test that SVG w/unicode can be added
report = Report()
fig, ax = plt.subplots()
ax.set_xlabel(u'μ')
report.add_slider_to_section([fig] * 2, image_format='svg')
def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
@requires_h5py
def test_open_report():
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
# Test creating a new report through the open_report function
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert report.subjects_dir == subjects_dir
assert report._fname == hdf5
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
# Exiting the context block should have triggered saving to HDF5
assert op.exists(hdf5)
# Load the HDF5 version of the report and check equivalence
report2 = open_report(hdf5)
assert report2._fname == hdf5
assert report2.subjects_dir == report.subjects_dir
assert report2.html == report.html
assert report2.__getstate__() == report.__getstate__()
assert '_fname' not in report2.__getstate__()
# Check parameters when loading a report
pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir) # This should work
# Check that the context manager doesn't swallow exceptions
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
1 / 0
def test_remove():
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1',
section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert removed_index == 2
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[1]
assert r2.html[2] == r.html[3]
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert removed_index == 1
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[2]
assert r2.html[2] == r.html[3]
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert r2.sections == ['mysection']
assert r2._sectionvars == {'mysection': 'report_mysection'}
def test_add_or_replace():
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
assert len(r.html) == 4
old_r = copy.deepcopy(r)
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert len(r.html) == 4
assert r.html[1] != old_r.html[1]
assert r.html[0] == old_r.html[0]
assert r.html[2] == old_r.html[2]
assert r.html[3] == old_r.html[3]
def test_scraper(tmpdir):
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
app = Bunch(builder=Bunch(srcdir=str(tmpdir),
outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',
'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]),
example_globals=dict(a=1), target_file=target_file)
block = None
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert not op.isfile(out_html)
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert rst.count('"') == 6
assert "<iframe" in rst
assert op.isfile(img_fname.replace('png', 'svg'))
run_tests_if_main()
| true
| true
|
7906259d219aba7b385e6547c88660c4dd2930d4
| 286
|
py
|
Python
|
mysite/urls.py
|
feraco/shifting-morals
|
115b80ca82d0715db49e593e1463a449ded0477c
|
[
"MIT"
] | 1
|
2018-06-27T17:58:45.000Z
|
2018-06-27T17:58:45.000Z
|
mysite/urls.py
|
feraco/shifting-morals
|
115b80ca82d0715db49e593e1463a449ded0477c
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
feraco/shifting-morals
|
115b80ca82d0715db49e593e1463a449ded0477c
|
[
"MIT"
] | 1
|
2019-03-21T12:56:24.000Z
|
2019-03-21T12:56:24.000Z
|
from django.urls import include, path
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
urlpatterns += [
path('', RedirectView.as_view(url='/polls/')),
]
| 23.833333
| 50
| 0.695804
|
from django.urls import include, path
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
urlpatterns += [
path('', RedirectView.as_view(url='/polls/')),
]
| true
| true
|
7906261835a6f0e5ae6c914ccb779bd44c37c2d2
| 6,356
|
py
|
Python
|
main.py
|
LucaZancato/stric
|
8daeeca48b8d0b2db8156e7f1c66c0956c133353
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
LucaZancato/stric
|
8daeeca48b8d0b2db8156e7f1c66c0956c133353
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
LucaZancato/stric
|
8daeeca48b8d0b2db8156e7f1c66c0956c133353
|
[
"Apache-2.0"
] | null | null | null |
import hydra
import os
import logging
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from collections import defaultdict
import json
from IPython import embed
# from AD_models import AD_Time_Series
# from AD_utils import AD_report, AD_dataset, plot_AD_dataset, AD_preprocessing
# import T_models, A_models
import stric.datasets as datasets
import stric.detection_models.time_series_models as models
import stric.detection_models.detector_models as detectors
from stric.detection_models.time_series_models.stric import InterpretableTCNFading
import stric.detection_models.detector_models.likelihood_ratio_estimators as likelihood_ratio_estimators
from stric.detection_models.detector_models.base_detector import Detector
@hydra.main(config_name="config/config_interpretable_model")
def main(cfg):
data_path = os.path.join(hydra.utils.get_original_cwd(), 'data')
dataset = datasets.__dict__[cfg.dataset.info.name](
past_len=cfg.t_model.info.memory_length,
fut_len=cfg.t_model.info.pred_length,
data_path=data_path,
dataset_subset=cfg.dataset.info.subname,
dataset_index=cfg.dataset.info.index,
normalize=cfg.dataset.preprocessing.normalize,
)
linear_kernel_sizes = cfg.t_model.info.linear_kernel_sizes
interpretable_kernel_sizes = cfg.t_model.info.memory_length if linear_kernel_sizes is None else linear_kernel_sizes
############# Trend parameters ################
HP_lams = np.logspace(8, 10, cfg.t_model.info.num_trends_filters) # Range of values of regularization parameter for HP filter (regulates the regularity of the trend component)
HP_Ts = [interpretable_kernel_sizes] * cfg.t_model.info.num_trends_filters # Lenght of the HP filter (here we could choose large numbers if we want to increase the memory of the HP filter)
############# Periodic part parameters ################
theta = np.random.uniform(2 * np.pi / 20, 2 * np.pi / 10, cfg.t_model.info.n_periodic_poles).reshape(-1, 1)
r = np.random.uniform(1, 1, cfg.t_model.info.n_periodic_poles).reshape(-1, 1)
purely_periodic_poles = np.concatenate((r, theta), 1)
############# Linear part parameters ################
real_poles = np.random.uniform(-1, 1, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
theta = np.random.uniform(2 * np.pi / 20, 2 * np.pi / 10, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
r = np.random.uniform(0, 1, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
complex_poles = np.concatenate((r, theta), 1)
model = InterpretableTCNFading(data=dataset, test_portion=cfg.t_model.info.test_portion,
memory_length=cfg.t_model.info.memory_length, pred_length=cfg.t_model.info.pred_length,
input_channels=dataset.n_timeseries, output_channels=dataset.n_timeseries,
linear_kernel_sizes=interpretable_kernel_sizes,
HP_lams=HP_lams, HP_Ts=HP_Ts,
purely_periodic_poles=purely_periodic_poles,
real_poles=real_poles,
complex_poles=complex_poles,
num_channels_TCN=cfg.t_model.info.num_channels_TCN,
kernel_size_TCN=cfg.t_model.info.kernel_size_TCN,
dropout_TCN=cfg.t_model.info.dropout_TCN,
learnable_filters=False, random_init=False,
).to(cfg.device)
model.train_model(bs=cfg.t_model.info.bs, lr=cfg.t_model.info.lr, epochs=cfg.t_model.info.epochs)
# To visualize predictions per time-series (this plots all the available time-series)
model.visualize(save=cfg.save_images)
# Test predictive performance of the trained_model: see prediction errors across time-series for training and test
ind = 4
train_residuals, test_residuals = model.get_residuals(ind=ind)
# Save results
predictions_logs = defaultdict(list)
predictions_logs['train_residuals'] = train_residuals.tolist()
predictions_logs['test_residuals'] = test_residuals.tolist()
predictions_logs['train_residuals_stds'] = train_residuals.std(0).tolist()
predictions_logs['test_residuals_stds'] = test_residuals.std(0).tolist()
predictions_logs['train_residuals_stds_mean'] = train_residuals.std(0).mean().item()
predictions_logs['test_residuals_stds_mean'] = test_residuals.std(0).mean().item()
with open('predictions_logs.json', 'w') as file:
json.dump(predictions_logs, file)
# Plot Interepretable decomposition
_ = model.get_components(ind=None, save=cfg.save_images)
# Anomaly detection
####### Detector' HPs ########
kernel_length_scale = cfg.a_model.info.kernel_length_scale * test_residuals.std()
kernel_type = cfg.a_model.info.kernel_type
kernel_hps = {'length_scales': torch.tensor(kernel_length_scale), 'train_length_scales': False,
'scale_factor': torch.tensor(1.), 'train_scale_factor': False}
ones = np.ones(dataset.n_timeseries)
####### Detector' HPs ########
a_model = Detector(test_residuals, detectors.__dict__[cfg.a_model.type],
cfg.a_model.info.kernel_type, kernel_hps, win_length=cfg.a_model.info.k, n=cfg.a_model.info.n,
device=cfg.device)
a_model.fit()
log_lik = a_model.get_future_log_lik()
a_labels = a_model.get_anomaly_labels(cfg.a_model.info.threshold * ones)
a_model.visualize_anomaly_scores(save=cfg.save_images)
a_model.visualize_anomaly_labels(thresholds=cfg.a_model.info.threshold * ones, save=cfg.save_images)
# Save results
anomaly_logs = defaultdict(list)
anomaly_logs['log_lik'] = log_lik.tolist()
anomaly_logs['a_labels'] = a_labels.tolist()
with open('anomaly_logs.json', 'w') as file:
json.dump(anomaly_logs, file)
if __name__ == "__main__":
main()
| 48.892308
| 193
| 0.65922
|
import hydra
import os
import logging
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from collections import defaultdict
import json
from IPython import embed
import stric.datasets as datasets
import stric.detection_models.time_series_models as models
import stric.detection_models.detector_models as detectors
from stric.detection_models.time_series_models.stric import InterpretableTCNFading
import stric.detection_models.detector_models.likelihood_ratio_estimators as likelihood_ratio_estimators
from stric.detection_models.detector_models.base_detector import Detector
@hydra.main(config_name="config/config_interpretable_model")
def main(cfg):
data_path = os.path.join(hydra.utils.get_original_cwd(), 'data')
dataset = datasets.__dict__[cfg.dataset.info.name](
past_len=cfg.t_model.info.memory_length,
fut_len=cfg.t_model.info.pred_length,
data_path=data_path,
dataset_subset=cfg.dataset.info.subname,
dataset_index=cfg.dataset.info.index,
normalize=cfg.dataset.preprocessing.normalize,
)
linear_kernel_sizes = cfg.t_model.info.linear_kernel_sizes
interpretable_kernel_sizes = cfg.t_model.info.memory_length if linear_kernel_sizes is None else linear_kernel_sizes
als'] = train_residuals.tolist()
predictions_logs['test_residuals'] = test_residuals.tolist()
predictions_logs['train_residuals_stds'] = train_residuals.std(0).tolist()
predictions_logs['test_residuals_stds'] = test_residuals.std(0).tolist()
predictions_logs['train_residuals_stds_mean'] = train_residuals.std(0).mean().item()
predictions_logs['test_residuals_stds_mean'] = test_residuals.std(0).mean().item()
with open('predictions_logs.json', 'w') as file:
json.dump(predictions_logs, file)
_ = model.get_components(ind=None, save=cfg.save_images)
_scales': torch.tensor(kernel_length_scale), 'train_length_scales': False,
'scale_factor': torch.tensor(1.), 'train_scale_factor': False}
ones = np.ones(dataset.n_timeseries)
####### Detector' HPs _residuals, detectors.__dict__[cfg.a_model.type],
cfg.a_model.info.kernel_type, kernel_hps, win_length=cfg.a_model.info.k, n=cfg.a_model.info.n,
device=cfg.device)
a_model.fit()
log_lik = a_model.get_future_log_lik()
a_labels = a_model.get_anomaly_labels(cfg.a_model.info.threshold * ones)
a_model.visualize_anomaly_scores(save=cfg.save_images)
a_model.visualize_anomaly_labels(thresholds=cfg.a_model.info.threshold * ones, save=cfg.save_images)
anomaly_logs = defaultdict(list)
anomaly_logs['log_lik'] = log_lik.tolist()
anomaly_logs['a_labels'] = a_labels.tolist()
with open('anomaly_logs.json', 'w') as file:
json.dump(anomaly_logs, file)
if __name__ == "__main__":
main()
| true
| true
|
79062624af2b18168386d973f0244b1f3a54dea5
| 1,058
|
py
|
Python
|
bent/weakprime.py
|
rgc-retired/math_puzzles
|
0f96fc0f4d53f9ece53fb7af02c037067f710fac
|
[
"MIT"
] | null | null | null |
bent/weakprime.py
|
rgc-retired/math_puzzles
|
0f96fc0f4d53f9ece53fb7af02c037067f710fac
|
[
"MIT"
] | null | null | null |
bent/weakprime.py
|
rgc-retired/math_puzzles
|
0f96fc0f4d53f9ece53fb7af02c037067f710fac
|
[
"MIT"
] | null | null | null |
import sympy
from sympy import *
def check_weak_prime(n):
if not isprime(n):
return(False)
digits=[int(i) for i in str(n)]
# For each digit location - test all other values to see if
# the result is prime. If so - then this is not a weak prime
for position in range(len(digits)):
digits2=[i for i in digits]
for j in range(10):
if j != digits[position]:
digits2[position]=j
m=0
for i in digits2:
m=10*m+i
if isprime(m):
return(False)
return(True)
def search_palindromic_weak_prime(nlow,nhigh):
n=nlow
if not isprime(n):
n=nextprime(n)
while(n<nhigh):
if check_weak_prime(n):
print("Weak prime = ",n)
n2=int(str(n)[::-1])
if check_weak_prime(n2):
print("Solution found:")
print(" n = ",n)
print(" n2 = ",n2)
return True
n=nextprime(n)
return False
| 28.594595
| 65
| 0.503781
|
import sympy
from sympy import *
def check_weak_prime(n):
if not isprime(n):
return(False)
digits=[int(i) for i in str(n)]
for position in range(len(digits)):
digits2=[i for i in digits]
for j in range(10):
if j != digits[position]:
digits2[position]=j
m=0
for i in digits2:
m=10*m+i
if isprime(m):
return(False)
return(True)
def search_palindromic_weak_prime(nlow,nhigh):
n=nlow
if not isprime(n):
n=nextprime(n)
while(n<nhigh):
if check_weak_prime(n):
print("Weak prime = ",n)
n2=int(str(n)[::-1])
if check_weak_prime(n2):
print("Solution found:")
print(" n = ",n)
print(" n2 = ",n2)
return True
n=nextprime(n)
return False
| true
| true
|
790627cc943ea78dc54d4eb3460a6686dc664b7d
| 1,708
|
py
|
Python
|
neighbourhood/migrations/0004_auto_20220103_1315.py
|
Maryan23/MyHood
|
338d76399cbdeded96d2ed3b19928146322cb705
|
[
"MIT"
] | null | null | null |
neighbourhood/migrations/0004_auto_20220103_1315.py
|
Maryan23/MyHood
|
338d76399cbdeded96d2ed3b19928146322cb705
|
[
"MIT"
] | null | null | null |
neighbourhood/migrations/0004_auto_20220103_1315.py
|
Maryan23/MyHood
|
338d76399cbdeded96d2ed3b19928146322cb705
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2022-01-03 10:15
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0003_auto_20211222_2324'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, null=True)),
('created_on', models.DateTimeField(auto_now_add=True, null=True)),
('updated_on', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AddField(
model_name='neighbourhood',
name='description',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='neighbourhood',
name='hood_image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='hood_image'),
),
migrations.AddField(
model_name='neighbourhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
]
| 34.857143
| 121
| 0.600117
|
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0003_auto_20211222_2324'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, null=True)),
('created_on', models.DateTimeField(auto_now_add=True, null=True)),
('updated_on', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AddField(
model_name='neighbourhood',
name='description',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='neighbourhood',
name='hood_image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='hood_image'),
),
migrations.AddField(
model_name='neighbourhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
]
| true
| true
|
790628d2b0fdee6504fb07b1936ed470c3b9c782
| 14,257
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py
|
tiwaria1/llvm
|
616a396db0610ae0c1992361af005a869ef81897
|
[
"Apache-2.0"
] | 1
|
2020-09-10T01:00:18.000Z
|
2020-09-10T01:00:18.000Z
|
lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py
|
coolstar/llvm-project
|
e21ccdd5b5667de50de65ee8903a89a21020e89a
|
[
"Apache-2.0"
] | null | null | null |
lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py
|
coolstar/llvm-project
|
e21ccdd5b5667de50de65ee8903a89a21020e89a
|
[
"Apache-2.0"
] | null | null | null |
from lldbsuite.test.lldbtest import *
import os
import vscode
class VSCodeTestCaseBase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
def create_debug_adaptor(self):
'''Create the Visual Studio Code debug adaptor'''
self.assertTrue(os.path.exists(self.lldbVSCodeExec),
'lldb-vscode must exist')
log_file_path = self.getBuildArtifact('vscode.txt')
self.vscode = vscode.DebugAdaptor(
executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(),
log_file=log_file_path)
def build_and_create_debug_adaptor(self):
self.build()
self.create_debug_adaptor()
def set_source_breakpoints(self, source_path, lines, condition=None,
hitCondition=None):
'''Sets source breakpoints and returns an array of strings containing
the breakpoint IDs ("1", "2") for each breakpoint that was set.
'''
response = self.vscode.request_setBreakpoints(
source_path, lines, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def set_function_breakpoints(self, functions, condition=None,
hitCondition=None):
'''Sets breakpoints by function name given an array of function names
and returns an array of strings containing the breakpoint IDs
("1", "2") for each breakpoint that was set.
'''
response = self.vscode.request_setFunctionBreakpoints(
functions, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def verify_breakpoint_hit(self, breakpoint_ids):
'''Wait for the process we are debugging to stop, and verify we hit
any breakpoint location in the "breakpoint_ids" array.
"breakpoint_ids" should be a list of breakpoint ID strings
(["1", "2"]). The return value from self.set_source_breakpoints()
or self.set_function_breakpoints() can be passed to this function'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'breakpoint':
continue
if 'description' not in body:
continue
# Descriptions for breakpoints will be in the form
# "breakpoint 1.1", so look for any description that matches
# ("breakpoint 1.") in the description field as verification
# that one of the breakpoint locations was hit. VSCode doesn't
# allow breakpoints to have multiple locations, but LLDB does.
# So when looking at the description we just want to make sure
# the right breakpoint matches and not worry about the actual
# location.
description = body['description']
print("description: %s" % (description))
for breakpoint_id in breakpoint_ids:
match_desc = 'breakpoint %s.' % (breakpoint_id)
if match_desc in description:
return
self.assertTrue(False, "breakpoint not hit")
def verify_exception_breakpoint_hit(self, filter_label):
'''Wait for the process we are debugging to stop, and verify the stop
reason is 'exception' and that the description matches
'filter_label'
'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'exception':
continue
if 'description' not in body:
continue
description = body['description']
if filter_label == description:
return True
return False
def verify_commands(self, flavor, output, commands):
self.assertTrue(output and len(output) > 0, "expect console output")
lines = output.splitlines()
prefix = '(lldb) '
for cmd in commands:
found = False
for line in lines:
if line.startswith(prefix) and cmd in line:
found = True
break
self.assertTrue(found,
"verify '%s' found in console output for '%s'" % (
cmd, flavor))
def get_dict_value(self, d, key_path):
'''Verify each key in the key_path array is in contained in each
dictionary within "d". Assert if any key isn't in the
corresponding dictionary. This is handy for grabbing values from VS
Code response dictionary like getting
response['body']['stackFrames']
'''
value = d
for key in key_path:
if key in value:
value = value[key]
else:
self.assertTrue(key in value,
'key "%s" from key_path "%s" not in "%s"' % (
key, key_path, d))
return value
def get_stackFrames_and_totalFramesCount(self, threadId=None, startFrame=None,
levels=None, dump=False):
response = self.vscode.request_stackTrace(threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
if response:
stackFrames = self.get_dict_value(response, ['body', 'stackFrames'])
totalFrames = self.get_dict_value(response, ['body', 'totalFrames'])
self.assertTrue(totalFrames > 0,
'verify totalFrames count is provided by extension that supports '
'async frames loading')
return (stackFrames, totalFrames)
return (None, 0)
def get_stackFrames(self, threadId=None, startFrame=None, levels=None,
dump=False):
(stackFrames, totalFrames) = self.get_stackFrames_and_totalFramesCount(
threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
return stackFrames
def get_source_and_line(self, threadId=None, frameIndex=0):
stackFrames = self.get_stackFrames(threadId=threadId,
startFrame=frameIndex,
levels=1)
if stackFrames is not None:
stackFrame = stackFrames[0]
['source', 'path']
if 'source' in stackFrame:
source = stackFrame['source']
if 'path' in source:
if 'line' in stackFrame:
return (source['path'], stackFrame['line'])
return ('', 0)
def get_stdout(self, timeout=0.0):
return self.vscode.get_output('stdout', timeout=timeout)
def get_console(self, timeout=0.0):
return self.vscode.get_output('console', timeout=timeout)
def get_local_as_int(self, name, threadId=None):
value = self.vscode.get_local_variable_value(name, threadId=threadId)
if value.startswith('0x'):
return int(value, 16)
elif value.startswith('0'):
return int(value, 8)
else:
return int(value)
def set_local(self, name, value, id=None):
'''Set a top level local variable only.'''
return self.vscode.request_setVariable(1, name, str(value), id=id)
def set_global(self, name, value, id=None):
'''Set a top level global variable only.'''
return self.vscode.request_setVariable(2, name, str(value), id=id)
def stepIn(self, threadId=None, waitForStop=True):
self.vscode.request_stepIn(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOver(self, threadId=None, waitForStop=True):
self.vscode.request_next(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOut(self, threadId=None, waitForStop=True):
self.vscode.request_stepOut(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def continue_to_next_stop(self):
self.vscode.request_continue()
return self.vscode.wait_for_stopped()
def continue_to_breakpoints(self, breakpoint_ids):
self.vscode.request_continue()
self.verify_breakpoint_hit(breakpoint_ids)
def continue_to_exception_breakpoint(self, filter_label):
self.vscode.request_continue()
self.assertTrue(self.verify_exception_breakpoint_hit(filter_label),
'verify we got "%s"' % (filter_label))
def continue_to_exit(self, exitCode=0):
self.vscode.request_continue()
stopped_events = self.vscode.wait_for_stopped()
self.assertEquals(len(stopped_events), 1,
"stopped_events = {}".format(stopped_events))
self.assertEquals(stopped_events[0]['event'], 'exited',
'make sure program ran to completion')
self.assertEquals(stopped_events[0]['body']['exitCode'], exitCode,
'exitCode == %i' % (exitCode))
def attach(self, program=None, pid=None, waitFor=None, trace=None,
initCommands=None, preRunCommands=None, stopCommands=None,
exitCommands=None, attachCommands=None, coreFile=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and attach to the process.
'''
# Make sure we disconnect and terminate the VSCode debug adaptor even
# if we throw an exception during the test case.
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_attach(
program=program, pid=pid, waitFor=waitFor, trace=trace,
initCommands=initCommands, preRunCommands=preRunCommands,
stopCommands=stopCommands, exitCommands=exitCommands,
attachCommands=attachCommands, coreFile=coreFile)
if not (response and response['success']):
self.assertTrue(response['success'],
'attach failed (%s)' % (response['message']))
def launch(self, program=None, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,sourcePath=None,
debuggerRoot=None, launchCommands=None, sourceMap=None):
'''Sending launch request to vscode
'''
# Make sure we disconnect and terminate the VSCode debug adapter,
# if we throw an exception during the test case
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_launch(
program,
args=args,
cwd=cwd,
env=env,
stopOnEntry=stopOnEntry,
disableASLR=disableASLR,
disableSTDIO=disableSTDIO,
shellExpandArguments=shellExpandArguments,
trace=trace,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
sourcePath=sourcePath,
debuggerRoot=debuggerRoot,
launchCommands=launchCommands,
sourceMap=sourceMap)
if not (response and response['success']):
self.assertTrue(response['success'],
'launch failed (%s)' % (response['message']))
def build_and_launch(self, program, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
sourcePath=None, debuggerRoot=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and launch the process.
'''
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR,
disableSTDIO, shellExpandArguments, trace,
initCommands, preRunCommands, stopCommands, exitCommands,
sourcePath, debuggerRoot)
| 44.139319
| 86
| 0.58694
|
from lldbsuite.test.lldbtest import *
import os
import vscode
class VSCodeTestCaseBase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
def create_debug_adaptor(self):
self.assertTrue(os.path.exists(self.lldbVSCodeExec),
'lldb-vscode must exist')
log_file_path = self.getBuildArtifact('vscode.txt')
self.vscode = vscode.DebugAdaptor(
executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(),
log_file=log_file_path)
def build_and_create_debug_adaptor(self):
self.build()
self.create_debug_adaptor()
def set_source_breakpoints(self, source_path, lines, condition=None,
hitCondition=None):
response = self.vscode.request_setBreakpoints(
source_path, lines, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def set_function_breakpoints(self, functions, condition=None,
hitCondition=None):
response = self.vscode.request_setFunctionBreakpoints(
functions, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def verify_breakpoint_hit(self, breakpoint_ids):
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'breakpoint':
continue
if 'description' not in body:
continue
# allow breakpoints to have multiple locations, but LLDB does.
# So when looking at the description we just want to make sure
# the right breakpoint matches and not worry about the actual
# location.
description = body['description']
print("description: %s" % (description))
for breakpoint_id in breakpoint_ids:
match_desc = 'breakpoint %s.' % (breakpoint_id)
if match_desc in description:
return
self.assertTrue(False, "breakpoint not hit")
def verify_exception_breakpoint_hit(self, filter_label):
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'exception':
continue
if 'description' not in body:
continue
description = body['description']
if filter_label == description:
return True
return False
def verify_commands(self, flavor, output, commands):
self.assertTrue(output and len(output) > 0, "expect console output")
lines = output.splitlines()
prefix = '(lldb) '
for cmd in commands:
found = False
for line in lines:
if line.startswith(prefix) and cmd in line:
found = True
break
self.assertTrue(found,
"verify '%s' found in console output for '%s'" % (
cmd, flavor))
def get_dict_value(self, d, key_path):
value = d
for key in key_path:
if key in value:
value = value[key]
else:
self.assertTrue(key in value,
'key "%s" from key_path "%s" not in "%s"' % (
key, key_path, d))
return value
def get_stackFrames_and_totalFramesCount(self, threadId=None, startFrame=None,
levels=None, dump=False):
response = self.vscode.request_stackTrace(threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
if response:
stackFrames = self.get_dict_value(response, ['body', 'stackFrames'])
totalFrames = self.get_dict_value(response, ['body', 'totalFrames'])
self.assertTrue(totalFrames > 0,
'verify totalFrames count is provided by extension that supports '
'async frames loading')
return (stackFrames, totalFrames)
return (None, 0)
def get_stackFrames(self, threadId=None, startFrame=None, levels=None,
dump=False):
(stackFrames, totalFrames) = self.get_stackFrames_and_totalFramesCount(
threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
return stackFrames
def get_source_and_line(self, threadId=None, frameIndex=0):
stackFrames = self.get_stackFrames(threadId=threadId,
startFrame=frameIndex,
levels=1)
if stackFrames is not None:
stackFrame = stackFrames[0]
['source', 'path']
if 'source' in stackFrame:
source = stackFrame['source']
if 'path' in source:
if 'line' in stackFrame:
return (source['path'], stackFrame['line'])
return ('', 0)
def get_stdout(self, timeout=0.0):
return self.vscode.get_output('stdout', timeout=timeout)
def get_console(self, timeout=0.0):
return self.vscode.get_output('console', timeout=timeout)
def get_local_as_int(self, name, threadId=None):
value = self.vscode.get_local_variable_value(name, threadId=threadId)
if value.startswith('0x'):
return int(value, 16)
elif value.startswith('0'):
return int(value, 8)
else:
return int(value)
def set_local(self, name, value, id=None):
return self.vscode.request_setVariable(1, name, str(value), id=id)
def set_global(self, name, value, id=None):
return self.vscode.request_setVariable(2, name, str(value), id=id)
def stepIn(self, threadId=None, waitForStop=True):
self.vscode.request_stepIn(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOver(self, threadId=None, waitForStop=True):
self.vscode.request_next(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOut(self, threadId=None, waitForStop=True):
self.vscode.request_stepOut(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def continue_to_next_stop(self):
self.vscode.request_continue()
return self.vscode.wait_for_stopped()
def continue_to_breakpoints(self, breakpoint_ids):
self.vscode.request_continue()
self.verify_breakpoint_hit(breakpoint_ids)
def continue_to_exception_breakpoint(self, filter_label):
self.vscode.request_continue()
self.assertTrue(self.verify_exception_breakpoint_hit(filter_label),
'verify we got "%s"' % (filter_label))
def continue_to_exit(self, exitCode=0):
self.vscode.request_continue()
stopped_events = self.vscode.wait_for_stopped()
self.assertEquals(len(stopped_events), 1,
"stopped_events = {}".format(stopped_events))
self.assertEquals(stopped_events[0]['event'], 'exited',
'make sure program ran to completion')
self.assertEquals(stopped_events[0]['body']['exitCode'], exitCode,
'exitCode == %i' % (exitCode))
def attach(self, program=None, pid=None, waitFor=None, trace=None,
initCommands=None, preRunCommands=None, stopCommands=None,
exitCommands=None, attachCommands=None, coreFile=None):
# Make sure we disconnect and terminate the VSCode debug adaptor even
# if we throw an exception during the test case.
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_attach(
program=program, pid=pid, waitFor=waitFor, trace=trace,
initCommands=initCommands, preRunCommands=preRunCommands,
stopCommands=stopCommands, exitCommands=exitCommands,
attachCommands=attachCommands, coreFile=coreFile)
if not (response and response['success']):
self.assertTrue(response['success'],
'attach failed (%s)' % (response['message']))
def launch(self, program=None, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,sourcePath=None,
debuggerRoot=None, launchCommands=None, sourceMap=None):
# Make sure we disconnect and terminate the VSCode debug adapter,
# if we throw an exception during the test case
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_launch(
program,
args=args,
cwd=cwd,
env=env,
stopOnEntry=stopOnEntry,
disableASLR=disableASLR,
disableSTDIO=disableSTDIO,
shellExpandArguments=shellExpandArguments,
trace=trace,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
sourcePath=sourcePath,
debuggerRoot=debuggerRoot,
launchCommands=launchCommands,
sourceMap=sourceMap)
if not (response and response['success']):
self.assertTrue(response['success'],
'launch failed (%s)' % (response['message']))
def build_and_launch(self, program, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
sourcePath=None, debuggerRoot=None):
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR,
disableSTDIO, shellExpandArguments, trace,
initCommands, preRunCommands, stopCommands, exitCommands,
sourcePath, debuggerRoot)
| true
| true
|
79062920dfebee87226a14a6e3e4aaf3535fe385
| 1,075
|
py
|
Python
|
server/server.py
|
dave-cz/esp32_power_meter
|
649c2020be587b2d57d40dd3c201feec3596c2a0
|
[
"MIT"
] | 1
|
2022-01-13T17:21:55.000Z
|
2022-01-13T17:21:55.000Z
|
server/server.py
|
dave-cz/esp32_power_meter
|
649c2020be587b2d57d40dd3c201feec3596c2a0
|
[
"MIT"
] | null | null | null |
server/server.py
|
dave-cz/esp32_power_meter
|
649c2020be587b2d57d40dd3c201feec3596c2a0
|
[
"MIT"
] | null | null | null |
import logging
import pandas as pd
from flask import Flask, request
from gevent.pywsgi import WSGIServer
from time import sleep
from func import rms, meas_to_influx, rms_to_influx, config
logger = logging.getLogger(config['log_name'])
logger.setLevel(logging.INFO)
h_stream = logging.StreamHandler()
h_stream.setLevel(logging.INFO)
logger.addHandler(h_stream)
app = Flask(__name__)
@app.post('/save')
def save():
headers = request.headers
if 'X-API-KEY' not in headers or headers['X-API-KEY'] != config['api_key']:
sleep(5)
return '', 401
data = request.json
dt = pd.Timestamp(data['dt'])
s_data, power = rms(data['payload'], data['ticks'], dt)
if power < 0:
logger.error(data)
return '', 204
if power < 100:
return str(power)
# print(s_data)
# print(power)
rms_to_influx(power, dt)
meas_to_influx(s_data)
return str(power)
if __name__ == '__main__':
# app.run(host=config['url'], port=config['port'])
WSGIServer((config['url'], config['port']), app).serve_forever()
| 22.87234
| 79
| 0.664186
|
import logging
import pandas as pd
from flask import Flask, request
from gevent.pywsgi import WSGIServer
from time import sleep
from func import rms, meas_to_influx, rms_to_influx, config
logger = logging.getLogger(config['log_name'])
logger.setLevel(logging.INFO)
h_stream = logging.StreamHandler()
h_stream.setLevel(logging.INFO)
logger.addHandler(h_stream)
app = Flask(__name__)
@app.post('/save')
def save():
headers = request.headers
if 'X-API-KEY' not in headers or headers['X-API-KEY'] != config['api_key']:
sleep(5)
return '', 401
data = request.json
dt = pd.Timestamp(data['dt'])
s_data, power = rms(data['payload'], data['ticks'], dt)
if power < 0:
logger.error(data)
return '', 204
if power < 100:
return str(power)
rms_to_influx(power, dt)
meas_to_influx(s_data)
return str(power)
if __name__ == '__main__':
WSGIServer((config['url'], config['port']), app).serve_forever()
| true
| true
|
790629a1290c8a43454251bb08563665adc9b9b5
| 599
|
py
|
Python
|
python-dsa/Section4/graph_adj_list.py
|
vermuz/mani-professional-notes
|
896328e81e376bc113553c81d38ad6c1781b8e0b
|
[
"CC-BY-3.0"
] | 26
|
2018-06-28T05:32:20.000Z
|
2021-11-08T13:12:41.000Z
|
Section4/graph_adj_list.py
|
khiemspdt/Python-Data-Structures-and-Algorithms-v-
|
3540693eb18313cbc9d65dad8232357dd351b3a9
|
[
"MIT"
] | null | null | null |
Section4/graph_adj_list.py
|
khiemspdt/Python-Data-Structures-and-Algorithms-v-
|
3540693eb18313cbc9d65dad8232357dd351b3a9
|
[
"MIT"
] | 31
|
2018-05-10T21:31:21.000Z
|
2022-02-14T12:38:08.000Z
|
# Undirected Graph from demo represented as Adjacency List
graph = {
"a": [("b", 7), ("c", 9), ("f", 14)],
"b": [("a", 7), ("c", 10), ("d", 15)],
"c": [("a", 9), ("b", 10), ("d", 11), ("f", 2)],
"d": [("b", 15), ("c", 11), ("e", 6)],
"e": [("d", 6), ("f", 9)],
"f": [("a", 14), ("c", 2), ("e", 9)],
}
def find_vertices():
return graph.keys()
def find_edges():
edges = []
for v in graph:
for e in graph[v]:
edges.append((v, e[0], e[1]))
return edges
print("Vertices: {}".format(find_vertices()))
print("Edges: {}".format(find_edges()))
| 24.958333
| 58
| 0.435726
|
graph = {
"a": [("b", 7), ("c", 9), ("f", 14)],
"b": [("a", 7), ("c", 10), ("d", 15)],
"c": [("a", 9), ("b", 10), ("d", 11), ("f", 2)],
"d": [("b", 15), ("c", 11), ("e", 6)],
"e": [("d", 6), ("f", 9)],
"f": [("a", 14), ("c", 2), ("e", 9)],
}
def find_vertices():
return graph.keys()
def find_edges():
edges = []
for v in graph:
for e in graph[v]:
edges.append((v, e[0], e[1]))
return edges
print("Vertices: {}".format(find_vertices()))
print("Edges: {}".format(find_edges()))
| true
| true
|
79062a4a296cfdcd7b9893d2ae317bf782d4b55e
| 10,790
|
py
|
Python
|
test/functional/wallet_import_rescan.py
|
XziimP/bitcoinV
|
38980aff8a8be63b338bbe83ea9896107104fc60
|
[
"MIT"
] | 128
|
2015-01-20T22:21:27.000Z
|
2021-09-17T04:40:56.000Z
|
test/functional/wallet_import_rescan.py
|
ccyanxyz/bitcoin
|
9dd6bbba613d7462afdb6276c4002bc183478528
|
[
"MIT"
] | 162
|
2015-02-23T00:45:54.000Z
|
2021-11-10T09:51:47.000Z
|
test/functional/wallet_import_rescan.py
|
ccyanxyz/bitcoin
|
9dd6bbba613d7462afdb6276c4002bc183478528
|
[
"MIT"
] | 168
|
2015-01-13T13:54:38.000Z
|
2022-01-24T23:04:06.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.address import AddressType
from test_framework.util import (
connect_nodes,
assert_equal,
set_node_times,
)
import collections
from decimal import Decimal
import enum
import itertools
import random
Call = enum.Enum("Call", "single multiaddress multiscript")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data address_type rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
assert_equal(self.address["solvable"], True)
assert_equal(self.address["isscript"], self.address_type == AddressType.p2sh_segwit)
assert_equal(self.address["iswitness"], self.address_type == AddressType.bech32)
if self.address["isscript"]:
assert_equal(self.address["embedded"]["isscript"], False)
assert_equal(self.address["embedded"]["iswitness"], True)
if self.call == Call.single:
if self.data == Data.address:
response = self.node.importaddress(address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.node.importpubkey(pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call in (Call.multiaddress, Call.multiscript):
request = {
"scriptPubKey": {
"address": self.address["address"]
} if self.call == Call.multiaddress else self.address["scriptPubKey"],
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}
if self.address_type == AddressType.p2sh_segwit and self.data != Data.address:
# We need solving data when providing a pubkey or privkey as data
request.update({"redeemscript": self.address['embedded']['scriptPubKey']})
response = self.node.importmulti(
requests=[request],
options={"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)},
)
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmation_height=None):
"""Verify that listtransactions/listreceivedbyaddress return expected values."""
txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)
current_height = self.node.getblockcount()
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], 1 + current_height - confirmation_height)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], 1 + current_height - confirmation_height)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, AddressType, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
AMOUNT_DUST = 0.00000546
def get_rand_amount():
r = random.uniform(AMOUNT_DUST, 1)
return Decimal(str(round(r, 8)))
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
self.extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
# Import keys with pruning disabled
self.start_nodes(extra_args=[[]] * self.num_nodes)
for n in self.nodes:
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(
label=variant.label,
address_type=variant.address_type.value,
))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = get_rand_amount()
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
# Generate a block further in the future (past the rescan window).
assert_equal(self.nodes[0].getrawmempool(), [])
set_node_times(
self.nodes,
self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + TIMESTAMP_WINDOW + 1,
)
self.nodes[0].generate(1)
self.sync_all()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run import for variant {}'.format(variant))
expect_rescan = variant.rescan == Rescan.yes
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(variant.timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, variant.confirmation_height)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = get_rand_amount()
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_all()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run check for variant {}'.format(variant))
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, variant.confirmation_height)
if __name__ == "__main__":
ImportRescanTest().main()
| 46.913043
| 126
| 0.66469
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.address import AddressType
from test_framework.util import (
connect_nodes,
assert_equal,
set_node_times,
)
import collections
from decimal import Decimal
import enum
import itertools
import random
Call = enum.Enum("Call", "single multiaddress multiscript")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data address_type rescan prune")):
def do_import(self, timestamp):
rescan = self.rescan == Rescan.yes
assert_equal(self.address["solvable"], True)
assert_equal(self.address["isscript"], self.address_type == AddressType.p2sh_segwit)
assert_equal(self.address["iswitness"], self.address_type == AddressType.bech32)
if self.address["isscript"]:
assert_equal(self.address["embedded"]["isscript"], False)
assert_equal(self.address["embedded"]["iswitness"], True)
if self.call == Call.single:
if self.data == Data.address:
response = self.node.importaddress(address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.node.importpubkey(pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call in (Call.multiaddress, Call.multiscript):
request = {
"scriptPubKey": {
"address": self.address["address"]
} if self.call == Call.multiaddress else self.address["scriptPubKey"],
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}
if self.address_type == AddressType.p2sh_segwit and self.data != Data.address:
request.update({"redeemscript": self.address['embedded']['scriptPubKey']})
response = self.node.importmulti(
requests=[request],
options={"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)},
)
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmation_height=None):
txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)
current_height = self.node.getblockcount()
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], 1 + current_height - confirmation_height)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], 1 + current_height - confirmation_height)
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, AddressType, Rescan, (False, True))]
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
TIMESTAMP_WINDOW = 2 * 60 * 60
AMOUNT_DUST = 0.00000546
def get_rand_amount():
r = random.uniform(AMOUNT_DUST, 1)
return Decimal(str(round(r, 8)))
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
self.extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes(extra_args=[[]] * self.num_nodes)
for n in self.nodes:
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(
label=variant.label,
address_type=variant.address_type.value,
))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = get_rand_amount()
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1)
variant.confirmation_height = self.nodes[0].getblockcount()
variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
assert_equal(self.nodes[0].getrawmempool(), [])
set_node_times(
self.nodes,
self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + TIMESTAMP_WINDOW + 1,
)
self.nodes[0].generate(1)
self.sync_all()
for variant in IMPORT_VARIANTS:
self.log.info('Run import for variant {}'.format(variant))
expect_rescan = variant.rescan == Rescan.yes
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(variant.timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, variant.confirmation_height)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = get_rand_amount()
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1)
variant.confirmation_height = self.nodes[0].getblockcount()
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_all()
for variant in IMPORT_VARIANTS:
self.log.info('Run check for variant {}'.format(variant))
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, variant.confirmation_height)
if __name__ == "__main__":
ImportRescanTest().main()
| true
| true
|
79062ac560580fcfe662156bf70930d38c1109dc
| 1,178
|
py
|
Python
|
avltree/AVLNode.py
|
gpk2000/avl-db
|
11003e26f8114a3a70e75c952c2464ae0ed29cc5
|
[
"MIT"
] | 1
|
2021-06-15T05:22:19.000Z
|
2021-06-15T05:22:19.000Z
|
avltree/AVLNode.py
|
gpk2000/avl-db
|
11003e26f8114a3a70e75c952c2464ae0ed29cc5
|
[
"MIT"
] | null | null | null |
avltree/AVLNode.py
|
gpk2000/avl-db
|
11003e26f8114a3a70e75c952c2464ae0ed29cc5
|
[
"MIT"
] | null | null | null |
class NoNodeData(Exception):
pass
class AVLNode(object):
def __init__(self, key=None, value=None) -> None:
"""Initializes the AVL Node.
Args:
data (dict, optional): {Key:Value} pair. Defaults to None.
"""
super().__init__()
self.key = key
self.value = value
self.left = None
self.right = None
self.height = 1
def __str__(self) -> str:
"""Prints single AVL Node to stdout
Raises:
NoNodeData: If no data is present in the node
Returns:
str: output string
"""
if self.key:
out = "data: {0}\nleft: {1}\nright: {2}\n".format(
(self.key, self.value), self.left.__str__(), self.right.__str__())
return out
raise NoNodeData
def get_key(self) -> str:
"""returns the key of the node
Returns:
str: the key in (key, value) pair
"""
return self.key
def get_value(self) -> str:
"""returns the value of the key
Returns:
str: the value in (key, value) pair
"""
return self.value
| 23.56
| 82
| 0.516129
|
class NoNodeData(Exception):
pass
class AVLNode(object):
def __init__(self, key=None, value=None) -> None:
super().__init__()
self.key = key
self.value = value
self.left = None
self.right = None
self.height = 1
def __str__(self) -> str:
if self.key:
out = "data: {0}\nleft: {1}\nright: {2}\n".format(
(self.key, self.value), self.left.__str__(), self.right.__str__())
return out
raise NoNodeData
def get_key(self) -> str:
return self.key
def get_value(self) -> str:
return self.value
| true
| true
|
79062ad443583a58e8c07a4e627fcfa37486aab4
| 7,486
|
py
|
Python
|
libs/networks/resnet_dilation.py
|
Kinpzz/RCRNet-Pytorch
|
8d9f0fe0c7ad651db7578b2d96741de11036ef82
|
[
"MIT"
] | 67
|
2019-11-22T14:50:09.000Z
|
2021-12-21T21:57:55.000Z
|
libs/networks/resnet_dilation.py
|
Kinpzz/RCRNet-Pytorch
|
8d9f0fe0c7ad651db7578b2d96741de11036ef82
|
[
"MIT"
] | 6
|
2019-12-03T14:03:57.000Z
|
2021-10-10T11:25:30.000Z
|
libs/networks/resnet_dilation.py
|
Kinpzz/RCRNet-Pytorch
|
8d9f0fe0c7ad651db7578b2d96741de11036ef82
|
[
"MIT"
] | 15
|
2019-10-24T08:14:50.000Z
|
2021-09-24T05:56:16.000Z
|
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 33.419643
| 102
| 0.599386
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| true
| true
|
79062b674ad6bafbff9ed9db57e157595c6fdec5
| 1,626
|
py
|
Python
|
plot_battery.py
|
rjmendez/lifepo4_bms
|
7561b50d3ff6551a65cf9d10c8f4bffeeb34db34
|
[
"Unlicense"
] | 1
|
2019-09-25T19:06:37.000Z
|
2019-09-25T19:06:37.000Z
|
plot_battery.py
|
rjmendez/lifepo4_bms
|
7561b50d3ff6551a65cf9d10c8f4bffeeb34db34
|
[
"Unlicense"
] | null | null | null |
plot_battery.py
|
rjmendez/lifepo4_bms
|
7561b50d3ff6551a65cf9d10c8f4bffeeb34db34
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
all_raw = open(sys.argv[1], 'r')
# init empty lists
cell0v = []
cell1v = []
cell2v = []
cell3v = []
totalv = []
# Process data into lists
for line in all_raw:
if 'voltage cell 0: ' in line:
try:
cell0v.append(float(line.replace('voltage cell 0: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 1: ' in line:
try:
cell1v.append(float(line.replace('voltage cell 1: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 2: ' in line:
try:
cell2v.append(float(line.replace('voltage cell 2: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 3: ' in line:
try:
cell3v.append(float(line.replace('voltage cell 3: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage total: ' in line:
try:
totalv.append(float(line.replace('voltage total: ', '')[:-4]))
except:
print('Malformed data: ' + line)
# Write images
# Total voltage of pack
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(totalv)
plt.savefig(sys.argv[1]+'_total_voltage.png')
plt.clf()
# Cells
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(cell0v, color='blue')
plt.plot(cell1v, color='red')
plt.plot(cell2v, color='green')
plt.plot(cell3v, color='cyan')
plt.xlabel('C0 = blue C1 = red C2 = green C3 = cyan')
plt.savefig(sys.argv[1]+'_cell_voltage.png')
| 29.035714
| 75
| 0.587946
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
all_raw = open(sys.argv[1], 'r')
cell0v = []
cell1v = []
cell2v = []
cell3v = []
totalv = []
for line in all_raw:
if 'voltage cell 0: ' in line:
try:
cell0v.append(float(line.replace('voltage cell 0: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 1: ' in line:
try:
cell1v.append(float(line.replace('voltage cell 1: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 2: ' in line:
try:
cell2v.append(float(line.replace('voltage cell 2: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 3: ' in line:
try:
cell3v.append(float(line.replace('voltage cell 3: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage total: ' in line:
try:
totalv.append(float(line.replace('voltage total: ', '')[:-4]))
except:
print('Malformed data: ' + line)
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(totalv)
plt.savefig(sys.argv[1]+'_total_voltage.png')
plt.clf()
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(cell0v, color='blue')
plt.plot(cell1v, color='red')
plt.plot(cell2v, color='green')
plt.plot(cell3v, color='cyan')
plt.xlabel('C0 = blue C1 = red C2 = green C3 = cyan')
plt.savefig(sys.argv[1]+'_cell_voltage.png')
| true
| true
|
79062bd264064197ab9e1975ef3a6b3f090ed903
| 1,961
|
py
|
Python
|
config/settings/local.py
|
megcunningham/django-diesel
|
72016c4e1405cf8aa6227823d112974acd8133b8
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/local.py
|
megcunningham/django-diesel
|
72016c4e1405cf8aa6227823d112974acd8133b8
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/local.py
|
megcunningham/django-diesel
|
72016c4e1405cf8aa6227823d112974acd8133b8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!gjwrp$!ldm&fccwk7-bwajlwga)m)!js+pouvnhnxb9+^nbwbw')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| 31.126984
| 110
| 0.500765
|
from .common import *
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
Y = env("DJANGO_SECRET_KEY", default='CHANGEME!!!gjwrp$!ldm&fccwk7-bwajlwga)m)!js+pouvnhnxb9+^nbwbw')
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
INSTALLED_APPS += ('django_extensions', )
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| true
| true
|
79062be30a913ab25a06164b9800864bb79d5e79
| 333
|
py
|
Python
|
tests/test-failinfo_refcount.py
|
lwllvyb/libfiu-hack
|
a41612d78fbce5e2a33745837c2ec735cc22fd6e
|
[
"MIT"
] | null | null | null |
tests/test-failinfo_refcount.py
|
lwllvyb/libfiu-hack
|
a41612d78fbce5e2a33745837c2ec735cc22fd6e
|
[
"MIT"
] | null | null | null |
tests/test-failinfo_refcount.py
|
lwllvyb/libfiu-hack
|
a41612d78fbce5e2a33745837c2ec735cc22fd6e
|
[
"MIT"
] | null | null | null |
"""
Test that we keep references to failinfo as needed.
"""
import fiu
# Object we'll use for failinfo
finfo = [1, 2, 3]
fiu.enable('p1', failinfo = finfo)
assert fiu.fail('p1')
assert fiu.failinfo('p1') is finfo
finfo_id = id(finfo)
del finfo
assert fiu.failinfo('p1') == [1, 2, 3]
assert id(fiu.failinfo('p1')) == finfo_id
| 15.136364
| 51
| 0.666667
|
import fiu
finfo = [1, 2, 3]
fiu.enable('p1', failinfo = finfo)
assert fiu.fail('p1')
assert fiu.failinfo('p1') is finfo
finfo_id = id(finfo)
del finfo
assert fiu.failinfo('p1') == [1, 2, 3]
assert id(fiu.failinfo('p1')) == finfo_id
| true
| true
|
79062c09d1ba2bad2a4c0e85b0e3ae49054c928c
| 2,208
|
py
|
Python
|
AB/pythonfunctions/search/elasticsearch/client/monitoring.py
|
PatrickJD/AWS
|
c7f976c0c5795ac43803ac201dbb57d584308bb0
|
[
"MIT"
] | null | null | null |
AB/pythonfunctions/search/elasticsearch/client/monitoring.py
|
PatrickJD/AWS
|
c7f976c0c5795ac43803ac201dbb57d584308bb0
|
[
"MIT"
] | null | null | null |
AB/pythonfunctions/search/elasticsearch/client/monitoring.py
|
PatrickJD/AWS
|
c7f976c0c5795ac43803ac201dbb57d584308bb0
|
[
"MIT"
] | null | null | null |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
class MonitoringClient(NamespacedClient):
@query_params("interval", "system_api_version", "system_id")
def bulk(self, body, doc_type=None, params=None, headers=None):
"""
Used by the monitoring features to send monitoring data.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request(
"POST",
_make_path("_monitoring", doc_type, "bulk"),
params=params,
headers=headers,
body=body,
)
| 40.888889
| 108
| 0.685688
|
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
class MonitoringClient(NamespacedClient):
@query_params("interval", "system_api_version", "system_id")
def bulk(self, body, doc_type=None, params=None, headers=None):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request(
"POST",
_make_path("_monitoring", doc_type, "bulk"),
params=params,
headers=headers,
body=body,
)
| true
| true
|
79062cc22e761815e110c4f9c4667a88a215871a
| 1,044
|
py
|
Python
|
gen/pb_python/flyteidl/service/flyteadmin/test/test_admin_pager_duty_notification.py
|
SmritiSatyanV/flyteidl
|
e8a29e0deb437d9e7086f9e90b72362cd26000a2
|
[
"Apache-2.0"
] | 13
|
2019-08-05T22:02:36.000Z
|
2020-07-05T06:21:14.000Z
|
gen/pb_python/flyteidl/service/flyteadmin/test/test_admin_pager_duty_notification.py
|
SmritiSatyanV/flyteidl
|
e8a29e0deb437d9e7086f9e90b72362cd26000a2
|
[
"Apache-2.0"
] | 70
|
2021-02-01T22:14:27.000Z
|
2022-03-29T12:56:06.000Z
|
gen/pb_python/flyteidl/service/flyteadmin/test/test_admin_pager_duty_notification.py
|
SmritiSatyanV/flyteidl
|
e8a29e0deb437d9e7086f9e90b72362cd26000a2
|
[
"Apache-2.0"
] | 22
|
2021-02-01T16:13:28.000Z
|
2022-02-25T08:15:29.000Z
|
# coding: utf-8
"""
flyteidl/service/admin.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import flyteadmin
from flyteadmin.models.admin_pager_duty_notification import AdminPagerDutyNotification # noqa: E501
from flyteadmin.rest import ApiException
class TestAdminPagerDutyNotification(unittest.TestCase):
"""AdminPagerDutyNotification unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdminPagerDutyNotification(self):
"""Test AdminPagerDutyNotification"""
# FIXME: construct object with mandatory attributes with example values
# model = flyteadmin.models.admin_pager_duty_notification.AdminPagerDutyNotification() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.463415
| 119
| 0.737548
|
from __future__ import absolute_import
import unittest
import flyteadmin
from flyteadmin.models.admin_pager_duty_notification import AdminPagerDutyNotification
from flyteadmin.rest import ApiException
class TestAdminPagerDutyNotification(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAdminPagerDutyNotification(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.