hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fed947f3b45976f09b22c7d3b90930f13ee6efbd | 30,788 | py | Python | zerver/views/registration.py | kaustubh-nair/zulip | fb96407607c1f42b350980ad13af20b884750606 | [
"Apache-2.0"
] | 1 | 2020-08-24T08:26:03.000Z | 2020-08-24T08:26:03.000Z | zerver/views/registration.py | kaustubh-nair/zulip | fb96407607c1f42b350980ad13af20b884750606 | [
"Apache-2.0"
] | null | null | null | zerver/views/registration.py | kaustubh-nair/zulip | fb96407607c1f42b350980ad13af20b884750606 | [
"Apache-2.0"
] | null | null | null | import logging
import smtplib
import urllib
from typing import Dict, List, Optional
from django.conf import settings
from django.contrib.auth import authenticate, get_backends
from django.core import validators
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.translation import ugettext as _
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
ConfirmationKeyException,
RealmCreationKey,
create_confirmation_link,
get_object_from_key,
render_confirmation_key_error,
validate_key,
)
from zerver.context_processors import get_realm_from_request, login_context
from zerver.decorator import do_login, require_post
from zerver.forms import (
FindMyTeamForm,
HomepageForm,
RealmCreationForm,
RealmRedirectForm,
RegistrationForm,
)
from zerver.lib.actions import (
bulk_add_subscriptions,
do_activate_user,
do_change_full_name,
do_change_password,
do_create_realm,
do_create_user,
do_set_user_display_setting,
lookup_default_stream_groups,
)
from zerver.lib.create_user import get_role_for_new_user
from zerver.lib.email_validation import email_allowed_for_realm, validate_email_not_already_in_realm
from zerver.lib.onboarding import send_initial_realm_messages, setup_realm_internal_bots
from zerver.lib.pysa import mark_sanitized
from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.sessions import get_expirable_session_var
from zerver.lib.subdomains import get_subdomain, is_root_domain_available
from zerver.lib.timezone import get_all_timezones
from zerver.lib.url_encoding import add_query_to_redirect_url
from zerver.lib.users import get_accounts_for_email
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import (
DisposableEmailError,
DomainNotAllowedForRealmError,
EmailContainsPlusError,
MultiuseInvite,
Realm,
Stream,
UserProfile,
get_default_stream_groups,
get_realm,
get_source_profile,
get_user_by_delivery_email,
name_changes_disabled,
)
from zerver.views.auth import (
create_preregistration_user,
finish_desktop_flow,
finish_mobile_flow,
get_safe_redirect_to,
redirect_and_log_into_subdomain,
redirect_to_deactivation_notice,
)
from zproject.backends import (
ExternalAuthResult,
ZulipLDAPAuthBackend,
ZulipLDAPExceptionNoMatchingLDAPUser,
any_social_backend_enabled,
email_auth_enabled,
email_belongs_to_ldap,
ldap_auth_enabled,
password_auth_enabled,
)
def check_prereg_key_and_redirect(request: HttpRequest, confirmation_key: str) -> HttpResponse:
confirmation = Confirmation.objects.filter(confirmation_key=confirmation_key).first()
if confirmation is None or confirmation.type not in [
Confirmation.USER_REGISTRATION, Confirmation.INVITATION, Confirmation.REALM_CREATION]:
return render_confirmation_key_error(
request, ConfirmationKeyException(ConfirmationKeyException.DOES_NOT_EXIST))
prereg_user = confirmation.content_object
if prereg_user.status == confirmation_settings.STATUS_REVOKED:
return render(request, "zerver/confirmation_link_expired_error.html")
try:
get_object_from_key(confirmation_key, confirmation.type, activate_object=False)
except ConfirmationKeyException as exception:
return render_confirmation_key_error(request, exception)
# confirm_preregistrationuser.html just extracts the confirmation_key
# (and GET parameters) and redirects to /accounts/register, so that the
# user can enter their information on a cleaner URL.
return render(request, 'confirmation/confirm_preregistrationuser.html',
context={
'key': confirmation_key,
'full_name': request.GET.get("full_name", None)})
@require_post
def accounts_register(request: HttpRequest) -> HttpResponse:
try:
key = request.POST.get('key', default='')
confirmation = Confirmation.objects.get(confirmation_key=key)
except Confirmation.DoesNotExist:
return render(request, "zerver/confirmation_link_expired_error.html")
prereg_user = confirmation.content_object
if prereg_user.status == confirmation_settings.STATUS_REVOKED:
return render(request, "zerver/confirmation_link_expired_error.html")
email = prereg_user.email
realm_creation = prereg_user.realm_creation
password_required = prereg_user.password_required
role = get_role_for_new_user(prereg_user.invited_as, realm_creation)
try:
validators.validate_email(email)
except ValidationError:
return render(request, "zerver/invalid_email.html", context={"invalid_email": True})
if realm_creation:
# For creating a new realm, there is no existing realm or domain
realm = None
else:
if get_subdomain(request) != prereg_user.realm.string_id:
return render_confirmation_key_error(
request, ConfirmationKeyException(ConfirmationKeyException.DOES_NOT_EXIST))
realm = prereg_user.realm
try:
email_allowed_for_realm(email, realm)
except DomainNotAllowedForRealmError:
return render(request, "zerver/invalid_email.html",
context={"realm_name": realm.name, "closed_domain": True})
except DisposableEmailError:
return render(request, "zerver/invalid_email.html",
context={"realm_name": realm.name, "disposable_emails_not_allowed": True})
except EmailContainsPlusError:
return render(request, "zerver/invalid_email.html",
context={"realm_name": realm.name, "email_contains_plus": True})
if realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return redirect_to_deactivation_notice()
try:
validate_email_not_already_in_realm(realm, email)
except ValidationError:
view_url = reverse('django.contrib.auth.views.login')
redirect_url = add_query_to_redirect_url(view_url, 'email=' + urllib.parse.quote_plus(email))
return HttpResponseRedirect(redirect_url)
name_validated = False
full_name = None
require_ldap_password = False
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
ldap_full_name = None
if settings.POPULATE_PROFILE_VIA_LDAP:
# If the user can be found in LDAP, we'll take the full name from the directory,
# and further down create a form pre-filled with it.
for backend in get_backends():
if isinstance(backend, LDAPBackend):
try:
ldap_username = backend.django_to_ldap_username(email)
except ZulipLDAPExceptionNoMatchingLDAPUser:
logging.warning("New account email %s could not be found in LDAP", email)
break
# Note that this `ldap_user` object is not a
# `ZulipLDAPUser` with a `Realm` attached, so
# calling `.populate_user()` on it will crash.
# This is OK, since we're just accessing this user
# to extract its name.
#
# TODO: We should potentially be accessing this
# user to sync its initial avatar and custom
# profile fields as well, if we indeed end up
# creating a user account through this flow,
# rather than waiting until `manage.py
# sync_ldap_user_data` runs to populate it.
ldap_user = _LDAPUser(backend, ldap_username)
try:
ldap_full_name = backend.get_mapped_name(ldap_user)
except TypeError:
break
# Check whether this is ZulipLDAPAuthBackend,
# which is responsible for authentication and
# requires that LDAP accounts enter their LDAP
# password to register, or ZulipLDAPUserPopulator,
# which just populates UserProfile fields (no auth).
require_ldap_password = isinstance(backend, ZulipLDAPAuthBackend)
break
if ldap_full_name:
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm({'full_name': ldap_full_name},
realm_creation=realm_creation)
request.session['authenticated_full_name'] = ldap_full_name
name_validated = True
elif realm is not None and realm.is_zephyr_mirror_realm:
# For MIT users, we can get an authoritative name from Hesiod.
# Technically we should check that this is actually an MIT
# realm, but we can cross that bridge if we ever get a non-MIT
# zephyr mirroring realm.
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""},
realm_creation=realm_creation)
name_validated = True
elif prereg_user.full_name:
if prereg_user.full_name_validated:
request.session['authenticated_full_name'] = prereg_user.full_name
name_validated = True
form = RegistrationForm({'full_name': prereg_user.full_name},
realm_creation=realm_creation)
else:
form = RegistrationForm(initial={'full_name': prereg_user.full_name},
realm_creation=realm_creation)
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')},
realm_creation=realm_creation,
)
else:
form = RegistrationForm(realm_creation=realm_creation)
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata, realm_creation=realm_creation)
if not (password_auth_enabled(realm) and password_required):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm) and form['password'].field.required:
password = form.cleaned_data['password']
else:
# If the user wasn't prompted for a password when
# completing the authentication form (because they're
# signing up with SSO and no password is required), set
# the password field to `None` (Which causes Django to
# create an unusable password).
password = None
if realm_creation:
string_id = form.cleaned_data['realm_subdomain']
realm_name = form.cleaned_data['realm_name']
realm = do_create_realm(string_id, realm_name)
setup_realm_internal_bots(realm)
assert(realm is not None)
full_name = form.cleaned_data['full_name']
default_stream_group_names = request.POST.getlist('default_stream_group')
default_stream_groups = lookup_default_stream_groups(default_stream_group_names, realm)
timezone = ""
if 'timezone' in request.POST and request.POST['timezone'] in get_all_timezones():
timezone = request.POST['timezone']
if 'source_realm' in request.POST and request.POST["source_realm"] != "on":
source_profile = get_source_profile(email, request.POST["source_realm"])
else:
source_profile = None
if not realm_creation:
try:
existing_user_profile: Optional[UserProfile] = get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
existing_user_profile = None
else:
existing_user_profile = None
user_profile: Optional[UserProfile] = None
return_data: Dict[str, bool] = {}
if ldap_auth_enabled(realm):
# If the user was authenticated using an external SSO
# mechanism like Google or GitHub auth, then authentication
# will have already been done before creating the
# PreregistrationUser object with password_required=False, and
# so we don't need to worry about passwords.
#
# If instead the realm is using EmailAuthBackend, we will
# set their password above.
#
# But if the realm is using LDAPAuthBackend, we need to verify
# their LDAP password (which will, as a side effect, create
# the user account) here using authenticate.
# pregeg_user.realm_creation carries the information about whether
# we're in realm creation mode, and the ldap flow will handle
# that and create the user with the appropriate parameters.
user_profile = authenticate(request=request,
username=email,
password=password,
realm=realm,
prereg_user=prereg_user,
return_data=return_data)
if user_profile is None:
can_use_different_backend = email_auth_enabled(realm) or any_social_backend_enabled(realm)
if settings.LDAP_APPEND_DOMAIN:
# In LDAP_APPEND_DOMAIN configurations, we don't allow making a non-ldap account
# if the email matches the ldap domain.
can_use_different_backend = can_use_different_backend and (
not email_belongs_to_ldap(realm, email))
if return_data.get("no_matching_ldap_user") and can_use_different_backend:
# If both the LDAP and Email or Social auth backends are
# enabled, and there's no matching user in the LDAP
# directory then the intent is to create a user in the
# realm with their email outside the LDAP organization
# (with e.g. a password stored in the Zulip database,
# not LDAP). So we fall through and create the new
# account.
pass
else:
# TODO: This probably isn't going to give a
# user-friendly error message, but it doesn't
# particularly matter, because the registration form
# is hidden for most users.
view_url = reverse('django.contrib.auth.views.login')
query = 'email=' + urllib.parse.quote_plus(email)
redirect_url = add_query_to_redirect_url(view_url, query)
return HttpResponseRedirect(redirect_url)
elif not realm_creation:
# Since we'll have created a user, we now just log them in.
return login_and_go_to_home(request, user_profile)
else:
# With realm_creation=True, we're going to return further down,
# after finishing up the creation process.
pass
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
user_profile = existing_user_profile
do_activate_user(user_profile, acting_user=user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name, user_profile)
do_set_user_display_setting(user_profile, 'timezone', timezone)
# TODO: When we clean up the `do_activate_user` code path,
# make it respect invited_as_admin / is_realm_admin.
if user_profile is None:
user_profile = do_create_user(email, password, realm, full_name,
prereg_user=prereg_user,
role=role,
tos_version=settings.TOS_VERSION,
timezone=timezone,
newsletter_data={"IP": request.META['REMOTE_ADDR']},
default_stream_groups=default_stream_groups,
source_profile=source_profile,
realm_creation=realm_creation,
acting_user=None)
if realm_creation:
bulk_add_subscriptions([realm.signup_notifications_stream], [user_profile])
send_initial_realm_messages(realm)
# Because for realm creation, registration happens on the
# root domain, we need to log them into the subdomain for
# their new realm.
return redirect_and_log_into_subdomain(ExternalAuthResult(user_profile=user_profile,
data_dict={'is_realm_creation': True}))
# This dummy_backend check below confirms the user is
# authenticating to the correct subdomain.
auth_result = authenticate(username=user_profile.delivery_email,
realm=realm,
return_data=return_data,
use_dummy_backend=True)
if return_data.get('invalid_subdomain'):
# By construction, this should never happen.
logging.error(
"Subdomain mismatch in registration %s: %s",
realm.subdomain, user_profile.delivery_email,
)
return redirect('/')
return login_and_go_to_home(request, auth_result)
return render(
request,
'zerver/register.html',
context={'form': form,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'creating_new_team': realm_creation,
'password_required': password_auth_enabled(realm) and password_required,
'require_ldap_password': require_ldap_password,
'password_auth_enabled': password_auth_enabled(realm),
'root_domain_available': is_root_domain_available(),
'default_stream_groups': [] if realm is None else get_default_stream_groups(realm),
'accounts': get_accounts_for_email(email),
'MAX_REALM_NAME_LENGTH': str(Realm.MAX_REALM_NAME_LENGTH),
'MAX_NAME_LENGTH': str(UserProfile.MAX_NAME_LENGTH),
'MAX_PASSWORD_LENGTH': str(form.MAX_PASSWORD_LENGTH),
'MAX_REALM_SUBDOMAIN_LENGTH': str(Realm.MAX_REALM_SUBDOMAIN_LENGTH),
},
)
def login_and_go_to_home(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
mobile_flow_otp = get_expirable_session_var(request.session, 'registration_mobile_flow_otp',
delete=True)
desktop_flow_otp = get_expirable_session_var(request.session, 'registration_desktop_flow_otp',
delete=True)
if mobile_flow_otp is not None:
return finish_mobile_flow(request, user_profile, mobile_flow_otp)
elif desktop_flow_otp is not None:
return finish_desktop_flow(request, user_profile, desktop_flow_otp)
do_login(request, user_profile)
# Using 'mark_sanitized' to work around false positive where Pysa thinks
# that 'user_profile' is user-controlled
return HttpResponseRedirect(mark_sanitized(user_profile.realm.uri) + reverse('zerver.views.home.home'))
def prepare_activation_url(email: str, request: HttpRequest,
realm_creation: bool=False,
streams: Optional[List[Stream]]=None,
invited_as: Optional[int]=None) -> str:
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request, realm_creation)
if streams is not None:
prereg_user.streams.set(streams)
if invited_as is not None:
prereg_user.invited_as = invited_as
prereg_user.save()
confirmation_type = Confirmation.USER_REGISTRATION
if realm_creation:
confirmation_type = Confirmation.REALM_CREATION
activation_url = create_confirmation_link(prereg_user, confirmation_type)
if settings.DEVELOPMENT and realm_creation:
request.session['confirmation_key'] = {'confirmation_key': activation_url.split('/')[-1]}
return activation_url
def send_confirm_registration_email(email: str, activation_url: str, language: str,
realm: Optional[Realm]=None) -> None:
send_email('zerver/emails/confirm_registration', to_emails=[email],
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context={'activate_url': activation_url},
realm=realm)
def redirect_to_email_login_url(email: str) -> HttpResponseRedirect:
login_url = reverse('django.contrib.auth.views.login')
email = urllib.parse.quote_plus(email)
redirect_url = add_query_to_redirect_url(login_url, 'already_registered=' + email)
return HttpResponseRedirect(redirect_url)
def create_realm(request: HttpRequest, creation_key: Optional[str]=None) -> HttpResponse:
try:
key_record = validate_key(creation_key)
except RealmCreationKey.Invalid:
return render(request, "zerver/realm_creation_failed.html",
context={'message': _('The organization creation link has expired'
' or is not valid.')})
if not settings.OPEN_REALM_CREATION:
if key_record is None:
return render(request, "zerver/realm_creation_failed.html",
context={'message': _('New organization creation disabled')})
# When settings.OPEN_REALM_CREATION is enabled, anyone can create a new realm,
# with a few restrictions on their email address.
if request.method == 'POST':
form = RealmCreationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
activation_url = prepare_activation_url(email, request, realm_creation=True)
if key_record is not None and key_record.presume_email_valid:
# The user has a token created from the server command line;
# skip confirming the email is theirs, taking their word for it.
# This is essential on first install if the admin hasn't stopped
# to configure outbound email up front, or it isn't working yet.
key_record.delete()
return HttpResponseRedirect(activation_url)
try:
send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE)
except smtplib.SMTPException as e:
logging.error('Error in create_realm: %s', str(e))
return HttpResponseRedirect("/config-error/smtp")
if key_record is not None:
key_record.delete()
return HttpResponseRedirect(reverse('new_realm_send_confirm', kwargs={'email': email}))
else:
form = RealmCreationForm()
return render(request,
'zerver/create_realm.html',
context={'form': form, 'current_url': request.get_full_path},
)
def accounts_home(request: HttpRequest, multiuse_object_key: str="",
multiuse_object: Optional[MultiuseInvite]=None) -> HttpResponse:
try:
realm = get_realm(get_subdomain(request))
except Realm.DoesNotExist:
return HttpResponseRedirect(reverse('zerver.views.registration.find_account'))
if realm.deactivated:
return redirect_to_deactivation_notice()
from_multiuse_invite = False
streams_to_subscribe = None
invited_as = None
if multiuse_object:
realm = multiuse_object.realm
streams_to_subscribe = multiuse_object.streams.all()
from_multiuse_invite = True
invited_as = multiuse_object.invited_as
if request.method == 'POST':
form = HomepageForm(request.POST, realm=realm, from_multiuse_invite=from_multiuse_invite)
if form.is_valid():
email = form.cleaned_data['email']
activation_url = prepare_activation_url(email, request, streams=streams_to_subscribe,
invited_as=invited_as)
try:
send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE, realm=realm)
except smtplib.SMTPException as e:
logging.error('Error in accounts_home: %s', str(e))
return HttpResponseRedirect("/config-error/smtp")
return HttpResponseRedirect(reverse('signup_send_confirm', kwargs={'email': email}))
email = request.POST['email']
try:
validate_email_not_already_in_realm(realm, email)
except ValidationError:
return redirect_to_email_login_url(email)
else:
form = HomepageForm(realm=realm)
context = login_context(request)
context.update({'form': form, 'current_url': request.get_full_path,
'multiuse_object_key': multiuse_object_key,
'from_multiuse_invite': from_multiuse_invite})
return render(request, 'zerver/accounts_home.html', context=context)
def accounts_home_from_multiuse_invite(request: HttpRequest, confirmation_key: str) -> HttpResponse:
multiuse_object = None
try:
multiuse_object = get_object_from_key(confirmation_key, Confirmation.MULTIUSE_INVITE)
# Required for oAuth2
except ConfirmationKeyException as exception:
realm = get_realm_from_request(request)
if realm is None or realm.invite_required:
return render_confirmation_key_error(request, exception)
return accounts_home(request, multiuse_object_key=confirmation_key,
multiuse_object=multiuse_object)
def generate_204(request: HttpRequest) -> HttpResponse:
return HttpResponse(content=None, status=204)
def find_account(request: HttpRequest) -> HttpResponse:
from zerver.context_processors import common_context
url = reverse('zerver.views.registration.find_account')
emails: List[str] = []
if request.method == 'POST':
form = FindMyTeamForm(request.POST)
if form.is_valid():
emails = form.cleaned_data['emails']
# Django doesn't support __iexact__in lookup with EmailField, so we have
# to use Qs to get around that without needing to do multiple queries.
emails_q = Q()
for email in emails:
emails_q |= Q(delivery_email__iexact=email)
for user in UserProfile.objects.filter(
emails_q, is_active=True, is_bot=False,
realm__deactivated=False):
context = common_context(user)
context.update({
'email': user.delivery_email,
})
send_email('zerver/emails/find_team', to_user_ids=[user.id], context=context,
from_address=FromAddress.SUPPORT)
# Note: Show all the emails in the result otherwise this
# feature can be used to ascertain which email addresses
# are associated with Zulip.
data = urllib.parse.urlencode({'emails': ','.join(emails)})
return redirect(add_query_to_redirect_url(url, data))
else:
form = FindMyTeamForm()
result = request.GET.get('emails')
# The below validation is perhaps unnecessary, in that we
# shouldn't get able to get here with an invalid email unless
# the user hand-edits the URLs.
if result:
for email in result.split(','):
try:
validators.validate_email(email)
emails.append(email)
except ValidationError:
pass
return render(request,
'zerver/find_account.html',
context={'form': form, 'current_url': lambda: url,
'emails': emails})
def realm_redirect(request: HttpRequest) -> HttpResponse:
if request.method == 'POST':
form = RealmRedirectForm(request.POST)
if form.is_valid():
subdomain = form.cleaned_data['subdomain']
realm = get_realm(subdomain)
redirect_to = get_safe_redirect_to(request.GET.get("next", ""), realm.uri)
return HttpResponseRedirect(redirect_to)
else:
form = RealmRedirectForm()
return render(request, 'zerver/realm_redirect.html', context={'form': form})
| 46.790274 | 109 | 0.638625 |
58c07acc7c0d516797a1ef890db449ab8c9f9ef7 | 11,327 | py | Python | zhusuan/distributions/base.py | ycguo028/zhusuan | 244536d93c55e486a3587e53229f0a7e1b19bef0 | [
"MIT"
] | 4 | 2017-05-23T20:18:41.000Z | 2020-03-03T15:00:53.000Z | zhusuan/distributions/base.py | ycguo028/zhusuan | 244536d93c55e486a3587e53229f0a7e1b19bef0 | [
"MIT"
] | null | null | null | zhusuan/distributions/base.py | ycguo028/zhusuan | 244536d93c55e486a3587e53229f0a7e1b19bef0 | [
"MIT"
] | 2 | 2018-11-27T02:43:22.000Z | 2019-11-23T18:27:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
from zhusuan.utils import add_name_scope
__all__ = [
'Distribution',
]
class Distribution(object):
"""
The :class:`Distribution` class is the base class for various probabilistic
distributions which support batch inputs, generating batches of samples and
evaluate probabilities at batches of given values.
The typical input shape for a :class:`Distribution` is like
``batch_shape + input_shape``. where ``input_shape`` represents the shape
of non-batch input parameter, :attr:`batch_shape` represents how many
independent inputs are fed into the distribution.
Samples generated are of shape
``([n_samples]+ )batch_shape + value_shape``. The first additional axis
is omitted only when passed `n_samples` is None (by default), in which
case one sample is generated. :attr:`value_shape` is the non-batch value
shape of the distribution. For a univariate distribution, its
:attr:`value_shape` is [].
There are cases where a batch of random variables are grouped into a
single event so that their probabilities should be computed together. This
is achieved by setting `group_event_ndims` argument, which defaults to 0.
The last `group_event_ndims` number of axes in :attr:`batch_shape` are
grouped into a single event. For example,
``Normal(..., group_event_ndims=1)`` will set the last axis of its
:attr:`batch_shape` to a single event, i.e., a multivariate Normal with
identity covariance matrix.
When evaluating probabilities at given values, the given Tensor should be
broadcastable to shape ``(... + )batch_shape + value_shape``. The returned
Tensor has shape ``(... + )batch_shape[:-group_event_ndims]``.
.. seealso::
:doc:`/concepts`
For both, the parameter `dtype` represents type of samples. For discrete,
can be set by user. For continuous, automatically determined from parameter
types.
The value type of `prob` and `log_prob` will be `param_dtype` which is
deduced from the parameter(s) when initializating. And `dtype` must be
among `int16`, `int32`, `int64`, `float16`, `float32` and `float64`.
When two or more parameters are tensors and they have different type,
`TypeError` will be raised.
:param dtype: The value type of samples from the distribution.
:param param_dtype: The parameter(s) type of the distribution.
:param is_continuous: Whether the distribution is continuous.
:param is_reparameterized: A bool. Whether the gradients of samples can
and are allowed to propagate back into inputs, using the
reparametrization trick from (Kingma, 2013).
:param group_event_ndims: A 0-D `int32` Tensor representing the number of
dimensions in :attr:`batch_shape` (counted from the end) that are
grouped into a single event, so that their probabilities are calculated
together. Default is 0, which means a single value is an event.
See above for more detailed explanation.
"""
def __init__(self,
dtype,
param_dtype,
is_continuous,
is_reparameterized,
group_event_ndims=0):
self._dtype = dtype
self._param_dtype = param_dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
if isinstance(group_event_ndims, int):
if group_event_ndims < 0:
raise ValueError("group_event_ndims must be non-negative.")
self._group_event_ndims = group_event_ndims
else:
group_event_ndims = tf.convert_to_tensor(
group_event_ndims, tf.int32)
_assert_rank_op = tf.assert_rank(
group_event_ndims, 0,
message="group_event_ndims should be a scalar (0-D Tensor).")
_assert_nonnegative_op = tf.assert_greater_equal(
group_event_ndims, 0,
message="group_event_ndims must be non-negative.")
with tf.control_dependencies([_assert_rank_op,
_assert_nonnegative_op]):
self._group_event_ndims = tf.identity(group_event_ndims)
@property
def dtype(self):
"""The sample type of the distribution."""
return self._dtype
@property
def param_dtype(self):
"""The parameter(s) type of the distribution."""
return self._param_dtype
@property
def is_continuous(self):
"""Whether the distribution is continuous."""
return self._is_continuous
@property
def is_reparameterized(self):
"""
Whether the gradients of samples can and are allowed to propagate back
into inputs, using the reparametrization trick from (Kingma, 2013).
"""
return self._is_reparameterized
@property
def group_event_ndims(self):
"""
The number of dimensions in :attr:`batch_shape` (counted from the end)
that are grouped into a single event, so that their probabilities are
calculated together. See `Distribution` for more detailed explanation.
"""
return self._group_event_ndims
@property
def value_shape(self):
"""
The non-batch value shape of a distribution. For batch inputs, the
shape of a generated sample is ``batch_shape + value_shape``.
"""
static_value_shape = self.get_value_shape()
if static_value_shape.is_fully_defined():
return tf.convert_to_tensor(static_value_shape, dtype=tf.int32)
return self._value_shape()
def _value_shape(self):
"""
Private method for subclasses to rewrite the :attr:`value_shape`
property.
"""
raise NotImplementedError()
def get_value_shape(self):
"""
Static :attr:`value_shape`.
:return: A `TensorShape` instance.
"""
return self._get_value_shape()
def _get_value_shape(self):
"""
Private method for subclasses to rewrite the :meth:`get_value_shape`
method.
"""
raise NotImplementedError()
@property
def batch_shape(self):
"""
The shape showing how many independent inputs (which we call batches)
are fed into the distribution. For batch inputs, the shape of a
generated sample is ``batch_shape + value_shape``.
We borrow this concept from `tf.contrib.distributions`.
"""
static_batch_shape = self.get_batch_shape()
if static_batch_shape.is_fully_defined():
return tf.convert_to_tensor(static_batch_shape, dtype=tf.int32)
return self._batch_shape()
def _batch_shape(self):
"""
Private method for subclasses to rewrite the :attr:`batch_shape`
property.
"""
raise NotImplementedError()
def get_batch_shape(self):
"""
Static :attr:`batch_shape`.
:return: A `TensorShape` instance.
"""
return self._get_batch_shape()
def _get_batch_shape(self):
"""
Private method for subclasses to rewrite the :meth:`get_batch_shape`
method.
"""
raise NotImplementedError()
@add_name_scope
def sample(self, n_samples=None):
"""
sample(n_samples=None)
Return samples from the distribution. When `n_samples` is None (by
default), one sample of shape ``batch_shape + value_shape`` is
generated. For a scalar `n_samples`, the returned Tensor has a new
sample dimension with size `n_samples` inserted at ``axis=0``, i.e.,
the shape of samples is ``[n_samples] + batch_shape + value_shape``.
:param n_samples: A 0-D `int32` Tensor or None. How many independent
samples to draw from the distribution.
:return: A Tensor of samples.
"""
if n_samples is None:
samples = self._sample(n_samples=1)
return tf.squeeze(samples, axis=0)
elif isinstance(n_samples, int):
return self._sample(n_samples)
else:
n_samples = tf.convert_to_tensor(n_samples, dtype=tf.int32)
_assert_rank_op = tf.assert_rank(
n_samples, 0,
message="n_samples should be a scalar (0-D Tensor).")
with tf.control_dependencies([_assert_rank_op]):
samples = self._sample(n_samples)
return samples
def _sample(self, n_samples):
"""
Private method for subclasses to rewrite the :meth:`sample` method.
"""
raise NotImplementedError()
def _check_input_shape(self, given):
given = tf.convert_to_tensor(given, dtype=self.dtype)
err_msg = "The given argument should be able to broadcast to " \
"match batch_shape + value_shape of the distribution."
if (given.get_shape() and self.get_batch_shape() and
self.get_value_shape()):
static_sample_shape = tf.TensorShape(
self.get_batch_shape().as_list() +
self.get_value_shape().as_list())
try:
tf.broadcast_static_shape(given.get_shape(),
static_sample_shape)
except ValueError:
raise ValueError(
err_msg + " ({} vs. {} + {})".format(
given.get_shape(), self.get_batch_shape(),
self.get_value_shape()))
return given
@add_name_scope
def log_prob(self, given):
"""
log_prob(given)
Compute log probability density (mass) function at `given` value.
:param given: A Tensor. The value at which to evaluate log probability
density (mass) function. Must be able to broadcast to have a shape
of ``(... + )batch_shape + value_shape``.
:return: A Tensor of shape
``(... + )batch_shape[:-group_event_ndims]``.
"""
given = self._check_input_shape(given)
log_p = self._log_prob(given)
return tf.reduce_sum(log_p, tf.range(-self._group_event_ndims, 0))
@add_name_scope
def prob(self, given):
"""
prob(given)
Compute probability density (mass) function at `given` value.
:param given: A Tensor. The value at which to evaluate probability
density (mass) function. Must be able to broadcast to have a shape
of ``(... + )batch_shape + value_shape``.
:return: A Tensor of shape
``(... + )batch_shape[:-group_event_ndims]``.
"""
given = self._check_input_shape(given)
p = self._prob(given)
return tf.reduce_prod(p, tf.range(-self._group_event_ndims, 0))
def _log_prob(self, given):
"""
Private method for subclasses to rewrite the :meth:`log_prob` method.
"""
raise NotImplementedError()
def _prob(self, given):
"""
Private method for subclasses to rewrite the :meth:`prob` method.
"""
raise NotImplementedError()
| 37.382838 | 79 | 0.632648 |
d931996119597182cc91df651148a269f7a8541f | 7,601 | py | Python | dvc/fs/fsspec_wrapper.py | discdiver/dvc | d4861d535ab868e59a5b602846ef70ebe6526b75 | [
"Apache-2.0"
] | 1 | 2022-03-16T13:27:40.000Z | 2022-03-16T13:27:40.000Z | dvc/fs/fsspec_wrapper.py | InEase/dvc | e60ab00f5126912ca58da8df39ce6277fb30023d | [
"Apache-2.0"
] | null | null | null | dvc/fs/fsspec_wrapper.py | InEase/dvc | e60ab00f5126912ca58da8df39ce6277fb30023d | [
"Apache-2.0"
] | null | null | null | import os
import shutil
from typing import IO, TYPE_CHECKING, Any, Dict, Iterator, Optional, overload
from funcy import cached_property
from tqdm.utils import CallbackIOWrapper
from ._callback import DEFAULT_CALLBACK
from .base import FileSystem
FSPath = str
AnyFSPath = str
if TYPE_CHECKING:
from typing_extensions import Literal
# An info() entry, might evolve to a TypedDict
# in the future (e.g for properly type 'size' etc).
Entry = Dict[str, Any]
# pylint: disable=no-member
class FSSpecWrapper(FileSystem):
TRAVERSE_PREFIX_LEN = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fs_args = {"skip_instance_cache": True}
self.fs_args.update(self._prepare_credentials(**kwargs))
@staticmethod
def _get_kwargs_from_urls(urlpath: str) -> "Dict[str, Any]":
from fsspec.utils import infer_storage_options
options = infer_storage_options(urlpath)
options.pop("path", None)
options.pop("protocol", None)
return options
@cached_property
def fs(self):
raise NotImplementedError
def _prepare_credentials(
self, **config: Dict[str, Any] # pylint: disable=unused-argument
) -> Dict[str, Any]:
"""Prepare the arguments for authentication to the
host filesystem"""
return {}
def _isdir(self, path: AnyFSPath) -> bool:
return self.fs.isdir(path)
def isdir(self, path: AnyFSPath) -> bool:
try:
return self._isdir(path)
except FileNotFoundError:
return False
def isfile(self, path: AnyFSPath) -> bool:
try:
return not self._isdir(path)
except FileNotFoundError:
return False
def is_empty(self, path: AnyFSPath) -> bool:
entry = self.info(path)
if entry["type"] == "directory":
return not self.fs.ls(path)
return entry["size"] == 0
def open(
self,
path: AnyFSPath,
mode: str = "r",
encoding: Optional[str] = None,
**kwargs,
) -> "IO": # pylint: disable=arguments-differ
return self.fs.open(path, mode=mode)
def checksum(self, path: AnyFSPath) -> str:
return self.fs.checksum(path)
def copy(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
self.makedirs(self.path.parent(to_info))
self.fs.copy(from_info, to_info)
def exists(self, path: AnyFSPath) -> bool:
return self.fs.exists(path)
@overload
def ls(
self, path: AnyFSPath, detail: "Literal[True]"
) -> "Iterator[Entry]":
...
@overload
def ls(self, path: AnyFSPath, detail: "Literal[False]") -> Iterator[str]:
...
def ls(self, path, detail=False):
yield from self.fs.ls(path, detail=detail)
def find(self, path, prefix=None):
yield from self.fs.find(path)
def move(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
self.fs.move(from_info, to_info)
def remove(self, path: AnyFSPath) -> None:
self.fs.rm_file(path)
def info(self, path: AnyFSPath) -> "Entry":
return self.fs.info(path)
def makedirs(self, path: AnyFSPath, **kwargs) -> None:
self.fs.makedirs(path, exist_ok=kwargs.pop("exist_ok", True))
def put_file(
self,
from_file: AnyFSPath,
to_info: AnyFSPath,
callback: Any = DEFAULT_CALLBACK,
**kwargs,
) -> None:
self.fs.put_file(from_file, to_info, callback=callback, **kwargs)
self.fs.invalidate_cache(self.path.parent(to_info))
def get_file(
self,
from_info: AnyFSPath,
to_info: AnyFSPath,
callback: Any = DEFAULT_CALLBACK,
**kwargs,
) -> None:
self.fs.get_file(from_info, to_info, callback=callback, **kwargs)
def upload_fobj(self, fobj: IO, to_info: AnyFSPath, **kwargs) -> None:
self.makedirs(self.path.parent(to_info))
with self.open(to_info, "wb") as fdest:
shutil.copyfileobj(
fobj,
fdest,
length=getattr(fdest, "blocksize", None), # type: ignore
)
# pylint: disable=abstract-method
class ObjectFSWrapper(FSSpecWrapper):
TRAVERSE_PREFIX_LEN = 3
def makedirs(self, path: AnyFSPath, **kwargs) -> None:
# For object storages make this method a no-op. The original
# fs.makedirs() method will only check if the bucket exists
# and create if it doesn't though we don't want to support
# that behavior, and the check will cost some time so we'll
# simply ignore all mkdir()/makedirs() calls.
return None
def _isdir(self, path: AnyFSPath) -> bool:
# Directory in object storages are interpreted differently
# among different fsspec providers, so this logic is a temporary
# measure for us to adapt as of now. It checks whether it is a
# directory (as in a prefix with contents) or whether it is an empty
# file where it's name ends with a forward slash
entry = self.info(path)
return entry["type"] == "directory" or (
entry["size"] == 0
and entry["type"] == "file"
and entry["name"].endswith("/")
)
def find(self, path, prefix=None):
if prefix:
with_prefix = self.path.parent(path)
files = self.fs.find(with_prefix, prefix=self.path.parts(path)[-1])
else:
with_prefix = path
files = self.fs.find(path)
# When calling find() on a file, it returns the same file in a list.
# For object-based storages, the same behavior applies to empty
# directories since they are represented as files. This condition
# checks whether we should yield an empty list (if it is an empty
# directory) or just yield the file itself.
if len(files) == 1 and files[0] == with_prefix and self.isdir(path):
return None
yield from files
# pylint: disable=arguments-differ
class NoDirectoriesMixin:
def isdir(self, *args, **kwargs):
return False
def isfile(self, *args, **kwargs):
return True
def find(self, *args, **kwargs):
raise NotImplementedError
def walk(self, *args, **kwargs):
raise NotImplementedError
def ls(self, *args, **kwargs):
raise NotImplementedError
class CallbackMixin:
"""Provides callback support for the filesystem that don't support yet."""
def put_file(
self,
from_file,
to_info,
callback=DEFAULT_CALLBACK,
**kwargs,
):
"""Add compatibility support for Callback."""
# pylint: disable=protected-access
self.makedirs(self.path.parent(to_info))
size = os.path.getsize(from_file)
with open(from_file, "rb") as fobj:
callback.set_size(size)
wrapped = CallbackIOWrapper(callback.relative_update, fobj)
self.upload_fobj(wrapped, to_info)
self.fs.invalidate_cache(self.path.parent(to_info))
def get_file(
self,
from_info,
to_info,
callback=DEFAULT_CALLBACK,
**kwargs,
):
# pylint: disable=protected-access
total: int = self.getsize(from_info)
if total:
callback.set_size(total)
with self.open(from_info, "rb") as fobj, open(to_info, "wb") as fdest:
wrapped = CallbackIOWrapper(callback.relative_update, fobj)
shutil.copyfileobj(wrapped, fdest, length=fobj.blocksize)
| 30.773279 | 79 | 0.617682 |
8e5c99be0b07750dec2d972c0c723cb3ae1a9db9 | 287 | py | Python | src/Injectors/File/5.py | shownadda/Malware-Dev | a3fb40371bb4c4f41c582747af41ae8800050f5c | [
"Unlicense"
] | 46 | 2022-01-30T14:29:02.000Z | 2022-03-25T03:49:13.000Z | src/Injectors/File/5.py | shownadda/Malware-Dev | a3fb40371bb4c4f41c582747af41ae8800050f5c | [
"Unlicense"
] | null | null | null | src/Injectors/File/5.py | shownadda/Malware-Dev | a3fb40371bb4c4f41c582747af41ae8800050f5c | [
"Unlicense"
] | 1 | 2022-03-05T03:42:55.000Z | 2022-03-05T03:42:55.000Z | ## Extract injected image from Main image
import PIL.Image
import io
with open("pizza.jpg", "rb") as f:
content = f.read()
offset = content.index(bytes.fromhex("FFD9"))
f.seek(offset + 2)
new_img = PIL.Image.open(io.BytesIO(f.read()))
new_img.save("fresh_pizza.png") | 28.7 | 50 | 0.66899 |
fade289f995a5a3f0e5de20f12ee8e817890b9c2 | 10,661 | py | Python | contest/views/admin.py | scintiller/OnlineJudge | 4e66da0e366c8b950a1ccae2b435b81d9fe07e6c | [
"MIT"
] | null | null | null | contest/views/admin.py | scintiller/OnlineJudge | 4e66da0e366c8b950a1ccae2b435b81d9fe07e6c | [
"MIT"
] | 6 | 2020-06-05T21:37:42.000Z | 2022-01-13T01:19:55.000Z | contest/views/admin.py | scintiller/OnlineJudge | 4e66da0e366c8b950a1ccae2b435b81d9fe07e6c | [
"MIT"
] | null | null | null | import copy
import os
import zipfile
from ipaddress import ip_network
import dateutil.parser
from django.http import FileResponse
from account.decorators import check_contest_permission, ensure_created_by
from account.models import User
from submission.models import Submission, JudgeStatus
from utils.api import APIView, validate_serializer
from utils.cache import cache
from utils.constants import CacheKey
from utils.shortcuts import rand_str
from utils.tasks import delete_files
from ..models import Contest, ContestAnnouncement, ACMContestRank
from ..serializers import (ContestAnnouncementSerializer, ContestAdminSerializer,
CreateConetestSeriaizer, CreateContestAnnouncementSerializer,
EditConetestSeriaizer, EditContestAnnouncementSerializer,
ACMContesHelperSerializer, )
class ContestAPI(APIView):
@validate_serializer(CreateConetestSeriaizer)
def post(self, request):
data = request.data
data["start_time"] = dateutil.parser.parse(data["start_time"])
data["end_time"] = dateutil.parser.parse(data["end_time"])
data["created_by"] = request.user
if data["end_time"] <= data["start_time"]:
return self.error("Start time must occur earlier than end time")
if data.get("password") and data["password"] == "":
data["password"] = None
for ip_range in data["allowed_ip_ranges"]:
try:
ip_network(ip_range, strict=False)
except ValueError:
return self.error(f"{ip_range} is not a valid cidr network")
contest = Contest.objects.create(**data)
return self.success(ContestAdminSerializer(contest).data)
@validate_serializer(EditConetestSeriaizer)
def put(self, request):
data = request.data
try:
contest = Contest.objects.get(id=data.pop("id"))
ensure_created_by(contest, request.user)
except Contest.DoesNotExist:
return self.error("Contest does not exist")
data["start_time"] = dateutil.parser.parse(data["start_time"])
data["end_time"] = dateutil.parser.parse(data["end_time"])
if data["end_time"] <= data["start_time"]:
return self.error("Start time must occur earlier than end time")
if not data["password"]:
data["password"] = None
for ip_range in data["allowed_ip_ranges"]:
try:
ip_network(ip_range, strict=False)
except ValueError:
return self.error(f"{ip_range} is not a valid cidr network")
if not contest.real_time_rank and data.get("real_time_rank"):
cache_key = f"{CacheKey.contest_rank_cache}:{contest.id}"
cache.delete(cache_key)
for k, v in data.items():
setattr(contest, k, v)
contest.save()
return self.success(ContestAdminSerializer(contest).data)
def get(self, request):
contest_id = request.GET.get("id")
if contest_id:
try:
contest = Contest.objects.get(id=contest_id)
ensure_created_by(contest, request.user)
return self.success(ContestAdminSerializer(contest).data)
except Contest.DoesNotExist:
return self.error("Contest does not exist")
# admin比赛列表
contests = Contest.objects.all().order_by("-create_time")
if request.user.is_admin():
contests = contests.filter(created_by=request.user)
keyword = request.GET.get("keyword")
if keyword:
contests = contests.filter(title__contains=keyword)
return self.success(self.paginate_data(request, contests, ContestAdminSerializer))
class ContestAnnouncementAPI(APIView):
@validate_serializer(CreateContestAnnouncementSerializer)
def post(self, request):
"""
Create one contest_announcement.
"""
data = request.data
try:
contest = Contest.objects.get(id=data.pop("contest_id"))
ensure_created_by(contest, request.user)
data["contest"] = contest
data["created_by"] = request.user
except Contest.DoesNotExist:
return self.error("Contest does not exist")
announcement = ContestAnnouncement.objects.create(**data)
return self.success(ContestAnnouncementSerializer(announcement).data)
@validate_serializer(EditContestAnnouncementSerializer)
def put(self, request):
"""
update contest_announcement
"""
data = request.data
try:
contest_announcement = ContestAnnouncement.objects.get(id=data.pop("id"))
ensure_created_by(contest_announcement, request.user)
except ContestAnnouncement.DoesNotExist:
return self.error("Contest announcement does not exist")
for k, v in data.items():
setattr(contest_announcement, k, v)
contest_announcement.save()
return self.success()
def delete(self, request):
"""
Delete one contest_announcement.
"""
contest_announcement_id = request.GET.get("id")
if contest_announcement_id:
if request.user.is_admin():
ContestAnnouncement.objects.filter(id=contest_announcement_id,
contest__created_by=request.user).delete()
else:
ContestAnnouncement.objects.filter(id=contest_announcement_id).delete()
return self.success()
def get(self, request):
"""
Get one contest_announcement or contest_announcement list.
"""
contest_announcement_id = request.GET.get("id")
if contest_announcement_id:
try:
contest_announcement = ContestAnnouncement.objects.get(id=contest_announcement_id)
ensure_created_by(contest_announcement, request.user)
return self.success(ContestAnnouncementSerializer(contest_announcement).data)
except ContestAnnouncement.DoesNotExist:
return self.error("Contest announcement does not exist")
contest_id = request.GET.get("contest_id")
if not contest_id:
return self.error("Parameter error")
contest_announcements = ContestAnnouncement.objects.filter(contest_id=contest_id)
if request.user.is_admin():
contest_announcements = contest_announcements.filter(created_by=request.user)
keyword = request.GET.get("keyword")
if keyword:
contest_announcements = contest_announcements.filter(title__contains=keyword)
return self.success(ContestAnnouncementSerializer(contest_announcements, many=True).data)
class ACMContestHelper(APIView):
@check_contest_permission(check_type="ranks")
def get(self, request):
ranks = ACMContestRank.objects.filter(contest=self.contest, accepted_number__gt=0) \
.values("id", "user__username", "user__userprofile__real_name", "submission_info")
results = []
for rank in ranks:
for problem_id, info in rank["submission_info"].items():
if info["is_ac"]:
results.append({
"id": rank["id"],
"username": rank["user__username"],
"real_name": rank["user__userprofile__real_name"],
"problem_id": problem_id,
"ac_info": info,
"checked": info.get("checked", False)
})
results.sort(key=lambda x: -x["ac_info"]["ac_time"])
return self.success(results)
@check_contest_permission(check_type="ranks")
@validate_serializer(ACMContesHelperSerializer)
def put(self, request):
data = request.data
try:
rank = ACMContestRank.objects.get(pk=data["rank_id"])
except ACMContestRank.DoesNotExist:
return self.error("Rank id does not exist")
problem_rank_status = rank.submission_info.get(data["problem_id"])
if not problem_rank_status:
return self.error("Problem id does not exist")
problem_rank_status["checked"] = data["checked"]
rank.save(update_fields=("submission_info",))
return self.success()
class DownloadContestSubmissions(APIView):
def _dump_submissions(self, contest, exclude_admin=True):
problem_ids = contest.problem_set.all().values_list("id", "_id")
id2display_id = {k[0]: k[1] for k in problem_ids}
ac_map = {k[0]: False for k in problem_ids}
submissions = Submission.objects.filter(contest=contest, result=JudgeStatus.ACCEPTED).order_by("-create_time")
user_ids = submissions.values_list("user_id", flat=True)
users = User.objects.filter(id__in=user_ids)
path = f"/tmp/{rand_str()}.zip"
with zipfile.ZipFile(path, "w") as zip_file:
for user in users:
if user.is_admin_role() and exclude_admin:
continue
user_ac_map = copy.deepcopy(ac_map)
user_submissions = submissions.filter(user_id=user.id)
for submission in user_submissions:
problem_id = submission.problem_id
if user_ac_map[problem_id]:
continue
file_name = f"{user.username}_{id2display_id[submission.problem_id]}.txt"
compression = zipfile.ZIP_DEFLATED
zip_file.writestr(zinfo_or_arcname=f"{file_name}",
data=submission.code,
compress_type=compression)
user_ac_map[problem_id] = True
return path
def get(self, request):
contest_id = request.GET.get("contest_id")
if not contest_id:
return self.error("Parameter error")
try:
contest = Contest.objects.get(id=contest_id)
ensure_created_by(contest, request.user)
except Contest.DoesNotExist:
return self.error("Contest does not exist")
exclude_admin = request.GET.get("exclude_admin") == "1"
zip_path = self._dump_submissions(contest, exclude_admin)
delete_files.send_with_options(args=(zip_path,), delay=300_000)
resp = FileResponse(open(zip_path, "rb"))
resp["Content-Type"] = "application/zip"
resp["Content-Disposition"] = f"attachment;filename={os.path.basename(zip_path)}"
return resp
| 43.872428 | 118 | 0.632305 |
78190d97d688b4cf86f1b27dd3b735361cc19e02 | 674 | py | Python | pytglib/api/types/check_chat_username_result_public_chats_too_much.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/check_chat_username_result_public_chats_too_much.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/check_chat_username_result_public_chats_too_much.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class CheckChatUsernameResultPublicChatsTooMuch(Object):
"""
The user has too much chats with username, one of them should be made private first
Attributes:
ID (:obj:`str`): ``CheckChatUsernameResultPublicChatsTooMuch``
No parameters required.
Returns:
CheckChatUsernameResult
Raises:
:class:`telegram.Error`
"""
ID = "checkChatUsernameResultPublicChatsTooMuch"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "CheckChatUsernameResultPublicChatsTooMuch":
return CheckChatUsernameResultPublicChatsTooMuch()
| 21.741935 | 87 | 0.678042 |
479b8c80a01b6aadda26944229a6f16fed38d6f0 | 696 | py | Python | entity/ReminderInfo.py | zwffff2015/stock | 5f6017ab3a9af8920b1e595c5e3595458c95ce30 | [
"MIT"
] | null | null | null | entity/ReminderInfo.py | zwffff2015/stock | 5f6017ab3a9af8920b1e595c5e3595458c95ce30 | [
"MIT"
] | null | null | null | entity/ReminderInfo.py | zwffff2015/stock | 5f6017ab3a9af8920b1e595c5e3595458c95ce30 | [
"MIT"
] | null | null | null | class ReminderInfo:
def __init__(self, maxPrice, minPrice, upPercent, downPercent, receiver, code, name, buyPrice=0,
yesterdayHighPrice=0,
yesterdayChangePercent=0,
totalChangePercentLast30Days=0):
self.maxPrice = maxPrice
self.minPrice = minPrice
self.upPercent = upPercent
self.downPercent = downPercent
self.receiver = receiver
self.code = code
self.name = name
self.buyPrice = buyPrice
self.yesterdayHighPrice = yesterdayHighPrice
self.yesterdayChangePercent = yesterdayChangePercent
self.totalChangePercentLast30Days = totalChangePercentLast30Days
| 40.941176 | 100 | 0.666667 |
6e5c9ff38b08faeba03ac289230a2556fc8047cc | 10,053 | py | Python | src/v5.1/resources/swagger_client/models/tpdm_anonymized_student_education_organization_association.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 2 | 2021-04-27T17:18:17.000Z | 2021-04-27T19:14:39.000Z | src/v5.1/resources/swagger_client/models/tpdm_anonymized_student_education_organization_association.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | null | null | null | src/v5.1/resources/swagger_client/models/tpdm_anonymized_student_education_organization_association.py | xmarcosx/edfi-notebook | 0564ebdf1d0f45a9d25056e7e61369f0a837534d | [
"Apache-2.0"
] | 1 | 2022-01-06T09:43:11.000Z | 2022-01-06T09:43:11.000Z | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmAnonymizedStudentEducationOrganizationAssociation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'begin_date': 'date',
'anonymized_student_reference': 'TpdmAnonymizedStudentReference',
'education_organization_reference': 'EdFiEducationOrganizationReference',
'end_date': 'date',
'etag': 'str'
}
attribute_map = {
'id': 'id',
'begin_date': 'beginDate',
'anonymized_student_reference': 'anonymizedStudentReference',
'education_organization_reference': 'educationOrganizationReference',
'end_date': 'endDate',
'etag': '_etag'
}
def __init__(self, id=None, begin_date=None, anonymized_student_reference=None, education_organization_reference=None, end_date=None, etag=None, _configuration=None): # noqa: E501
"""TpdmAnonymizedStudentEducationOrganizationAssociation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._id = None
self._begin_date = None
self._anonymized_student_reference = None
self._education_organization_reference = None
self._end_date = None
self._etag = None
self.discriminator = None
if id is not None:
self.id = id
self.begin_date = begin_date
self.anonymized_student_reference = anonymized_student_reference
self.education_organization_reference = education_organization_reference
if end_date is not None:
self.end_date = end_date
if etag is not None:
self.etag = etag
@property
def id(self):
"""Gets the id of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
# noqa: E501
:return: The id of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TpdmAnonymizedStudentEducationOrganizationAssociation.
# noqa: E501
:param id: The id of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:type: str
"""
self._id = id
@property
def begin_date(self):
"""Gets the begin_date of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
Begin date for the association # noqa: E501
:return: The begin_date of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:rtype: date
"""
return self._begin_date
@begin_date.setter
def begin_date(self, begin_date):
"""Sets the begin_date of this TpdmAnonymizedStudentEducationOrganizationAssociation.
Begin date for the association # noqa: E501
:param begin_date: The begin_date of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:type: date
"""
if self._configuration.client_side_validation and begin_date is None:
raise ValueError("Invalid value for `begin_date`, must not be `None`") # noqa: E501
self._begin_date = begin_date
@property
def anonymized_student_reference(self):
"""Gets the anonymized_student_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:return: The anonymized_student_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:rtype: TpdmAnonymizedStudentReference
"""
return self._anonymized_student_reference
@anonymized_student_reference.setter
def anonymized_student_reference(self, anonymized_student_reference):
"""Sets the anonymized_student_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation.
:param anonymized_student_reference: The anonymized_student_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:type: TpdmAnonymizedStudentReference
"""
if self._configuration.client_side_validation and anonymized_student_reference is None:
raise ValueError("Invalid value for `anonymized_student_reference`, must not be `None`") # noqa: E501
self._anonymized_student_reference = anonymized_student_reference
@property
def education_organization_reference(self):
"""Gets the education_organization_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:return: The education_organization_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:rtype: EdFiEducationOrganizationReference
"""
return self._education_organization_reference
@education_organization_reference.setter
def education_organization_reference(self, education_organization_reference):
"""Sets the education_organization_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation.
:param education_organization_reference: The education_organization_reference of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:type: EdFiEducationOrganizationReference
"""
if self._configuration.client_side_validation and education_organization_reference is None:
raise ValueError("Invalid value for `education_organization_reference`, must not be `None`") # noqa: E501
self._education_organization_reference = education_organization_reference
@property
def end_date(self):
"""Gets the end_date of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
The end date for the association. # noqa: E501
:return: The end_date of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:rtype: date
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this TpdmAnonymizedStudentEducationOrganizationAssociation.
The end date for the association. # noqa: E501
:param end_date: The end_date of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:type: date
"""
self._end_date = end_date
@property
def etag(self):
"""Gets the etag of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
A unique system-generated value that identifies the version of the resource. # noqa: E501
:return: The etag of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""Sets the etag of this TpdmAnonymizedStudentEducationOrganizationAssociation.
A unique system-generated value that identifies the version of the resource. # noqa: E501
:param etag: The etag of this TpdmAnonymizedStudentEducationOrganizationAssociation. # noqa: E501
:type: str
"""
self._etag = etag
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmAnonymizedStudentEducationOrganizationAssociation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmAnonymizedStudentEducationOrganizationAssociation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmAnonymizedStudentEducationOrganizationAssociation):
return True
return self.to_dict() != other.to_dict()
| 37.935849 | 482 | 0.679598 |
9828b45924c30d21484d0de726dd951478e26894 | 1,157 | py | Python | tests/queues_test.py | Cormen-Lib-Developers/Cormen-Lib | 7877f1180cb76b56c909b70a35d85af8a83e530a | [
"MIT"
] | 8 | 2022-01-24T00:34:59.000Z | 2022-03-15T01:34:51.000Z | tests/queues_test.py | Cormen-Lib-Developers/Cormen-Lib | 7877f1180cb76b56c909b70a35d85af8a83e530a | [
"MIT"
] | 8 | 2022-02-09T22:07:35.000Z | 2022-03-17T16:23:08.000Z | tests/queues_test.py | Cormen-Lib-Developers/Cormen-Lib | 7877f1180cb76b56c909b70a35d85af8a83e530a | [
"MIT"
] | null | null | null | import unittest
from dalpy.queues import Queue, QueueUnderflowError
class QueueTest(unittest.TestCase):
def test_init(self):
q = Queue()
self.assertTrue(q.is_empty())
self.assertEqual(q.size(), 0)
def test_empty_ops(self):
q = Queue()
self.assertRaises(QueueUnderflowError, lambda: q.front())
self.assertRaises(QueueUnderflowError, lambda: q.dequeue())
def test_enqueue(self):
q = Queue()
q.enqueue(2)
self.assertEqual(q.front(), 2)
self.assertEqual(q.size(), 1)
self.assertFalse(q.is_empty())
def test_enqueue_dequeue(self):
q = Queue()
q.enqueue(2)
self.assertEqual(q.dequeue(), 2)
self.assertEqual(q.size(), 0)
self.assertTrue(q.is_empty())
def test_many_enqueue(self):
q = Queue()
for i in range(3):
q.enqueue(i)
self.assertEqual(q.front(), 0)
for i in range(3):
self.assertEqual(q.front(), i)
self.assertEqual(q.dequeue(), i)
self.assertEqual(q.size(), 3 - i - 1)
if __name__ == '__main__':
unittest.main()
| 26.295455 | 67 | 0.581677 |
9ce829efb3bb462dcebbca0fb7778f3f0d58cb99 | 2,565 | py | Python | toH5.py | anuradhawick/seq2vec | 4ced909f4046a7aa8ec0562ea85e75d5702054b5 | [
"MIT"
] | 6 | 2021-05-10T02:10:20.000Z | 2022-01-16T06:53:14.000Z | toH5.py | anuradhawick/seq2vec | 4ced909f4046a7aa8ec0562ea85e75d5702054b5 | [
"MIT"
] | 1 | 2021-12-29T01:53:58.000Z | 2022-01-07T00:53:42.000Z | toH5.py | anuradhawick/seq2vec | 4ced909f4046a7aa8ec0562ea85e75d5702054b5 | [
"MIT"
] | null | null | null | import h5py
from glob import glob
from tqdm import tqdm
import numpy as np
import argparse
def main(tmp_path, tmp_sort_path, h5_filename):
ds_size = 0
d_shape = 0
for file in glob(tmp_path):
for line in open(file):
if d_shape == 0:
d_shape = len(line.strip().split()) - 1
ds_size += 1
file_heads = [open(file) for file in glob(tmp_path)]
sorted_output = open(tmp_sort_path, "w+")
line_heads = [file.readline() for file in file_heads]
for x in tqdm(range(ds_size), total=ds_size, desc="Sorting vectors"):
line_idx = np.array([int(line.strip().split()[0]) if line else float('inf') for line in line_heads])
min_idx = np.argmin(line_idx)
sorted_output.write(line_heads[min_idx])
line_heads = [line if n!=min_idx else file_heads[n].readline() for n, line in enumerate(line_heads)]
[file.close() for file in file_heads]
h5 = h5py.File(h5_filename, 'w')
ds = h5.create_dataset("vectors", (ds_size, d_shape), dtype='f')
off_set = 0
vecs = []
for line in tqdm(open(tmp_sort_path), total=ds_size, desc="Writing to h5"):
d = list(map(float, line.strip().split()))
idx = str(d[0])
data = np.array(d[1:])
vecs.append(data)
# when large enough
if len(vecs) * d_shape > 1073741824:
vecs = np.array(vecs)
ds[off_set:off_set+len(vecs)] = vecs
off_set += len(vecs)
vecs = []
vecs = np.array(vecs)
ds[off_set:off_set+len(vecs)] = vecs
off_set += len(vecs)
vecs = []
h5.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""This script of Seq2Vec helps you to convert the raw output to H5. \
Quite helpful in machine learning work.""")
parser.add_argument('--seq2vec-outdir', '-s2v',
help="Output directory of seq2vec containing all the *.txt files.",
type=str,
required=True)
parser.add_argument('--destination-file', '-h5',
help="Name of the destination h5 file.",
type=str,
required=True)
args = parser.parse_args()
tmp_path = args.seq2vec_outdir + "/*.txt"
tmp_sort_path = args.seq2vec_outdir + "/gathered-sorted.txt"
h5_filename = args.destination_file
if ".h5" not in h5_filename:
h5_filename += ".h5"
main(tmp_path, tmp_sort_path, h5_filename) | 30.535714 | 119 | 0.584795 |
4bdd036e0bdd279896ac3971e7e656ef257e97ad | 4,444 | py | Python | mitmproxy/utils.py | dolfly/mitmproxy | 4604c25c6055a37e5f25a238d2a089759bd5d98a | [
"MIT"
] | null | null | null | mitmproxy/utils.py | dolfly/mitmproxy | 4604c25c6055a37e5f25a238d2a089759bd5d98a | [
"MIT"
] | null | null | null | mitmproxy/utils.py | dolfly/mitmproxy | 4604c25c6055a37e5f25a238d2a089759bd5d98a | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, print_function, division)
import os
import datetime
import re
import time
import json
import importlib
import inspect
def timestamp():
"""
Returns a serializable UTC timestamp.
"""
return time.time()
def format_timestamp(s):
s = time.localtime(s)
d = datetime.datetime.fromtimestamp(time.mktime(s))
return d.strftime("%Y-%m-%d %H:%M:%S")
def format_timestamp_with_milli(s):
d = datetime.datetime.fromtimestamp(s)
return d.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
def isBin(s):
"""
Does this string have any non-ASCII characters?
"""
for i in s:
i = ord(i)
if i < 9 or 13 < i < 32 or 126 < i:
return True
return False
def isMostlyBin(s):
s = s[:100]
return sum(isBin(ch) for ch in s) / len(s) > 0.3
def isXML(s):
for i in s:
if i in "\n \t":
continue
elif i == "<":
return True
else:
return False
def pretty_json(s):
try:
p = json.loads(s)
except ValueError:
return None
return json.dumps(p, sort_keys=True, indent=4)
def pretty_duration(secs):
formatters = [
(100, "{:.0f}s"),
(10, "{:2.1f}s"),
(1, "{:1.2f}s"),
]
for limit, formatter in formatters:
if secs >= limit:
return formatter.format(secs)
# less than 1 sec
return "{:.0f}ms".format(secs * 1000)
class Data:
def __init__(self, name):
m = importlib.import_module(name)
dirname = os.path.dirname(inspect.getsourcefile(m))
self.dirname = os.path.abspath(dirname)
def path(self, path):
"""
Returns a path to the package data housed at 'path' under this
module.Path can be a path to a file, or to a directory.
This function will raise ValueError if the path does not exist.
"""
fullpath = os.path.join(self.dirname, path)
if not os.path.exists(fullpath):
raise ValueError("dataPath: %s does not exist." % fullpath)
return fullpath
pkg_data = Data(__name__)
class LRUCache:
"""
A simple LRU cache for generated values.
"""
def __init__(self, size=100):
self.size = size
self.cache = {}
self.cacheList = []
def get(self, gen, *args):
"""
gen: A (presumably expensive) generator function. The identity of
gen is NOT taken into account by the cache.
*args: A list of immutable arguments, used to establish identiy by
*the cache, and passed to gen to generate values.
"""
if args in self.cache:
self.cacheList.remove(args)
self.cacheList.insert(0, args)
return self.cache[args]
else:
ret = gen(*args)
self.cacheList.insert(0, args)
self.cache[args] = ret
if len(self.cacheList) > self.size:
d = self.cacheList.pop()
self.cache.pop(d)
return ret
def clean_hanging_newline(t):
"""
Many editors will silently add a newline to the final line of a
document (I'm looking at you, Vim). This function fixes this common
problem at the risk of removing a hanging newline in the rare cases
where the user actually intends it.
"""
if t and t[-1] == "\n":
return t[:-1]
return t
def parse_size(s):
"""
Parses a size specification. Valid specifications are:
123: bytes
123k: kilobytes
123m: megabytes
123g: gigabytes
"""
if not s:
return None
mult = None
if s[-1].lower() == "k":
mult = 1024**1
elif s[-1].lower() == "m":
mult = 1024**2
elif s[-1].lower() == "g":
mult = 1024**3
if mult:
s = s[:-1]
else:
mult = 1
try:
return int(s) * mult
except ValueError:
raise ValueError("Invalid size specification: %s" % s)
def safe_subn(pattern, repl, target, *args, **kwargs):
"""
There are Unicode conversion problems with re.subn. We try to smooth
that over by casting the pattern and replacement to strings. We really
need a better solution that is aware of the actual content ecoding.
"""
return re.subn(str(pattern), str(repl), target, *args, **kwargs)
| 25.107345 | 78 | 0.566382 |
3345696e4641f7fca9071586964f350bce68ae4a | 8,775 | py | Python | python/setup.py | NVIDIA/cuQuantum | 0f00494d4639d760228ac002e83e6d2d3dd97eca | [
"BSD-3-Clause"
] | 52 | 2021-12-04T20:39:12.000Z | 2022-03-29T11:52:55.000Z | python/setup.py | NVIDIA/cuQuantum | 0f00494d4639d760228ac002e83e6d2d3dd97eca | [
"BSD-3-Clause"
] | 3 | 2022-02-01T22:46:50.000Z | 2022-03-24T01:52:29.000Z | python/setup.py | NVIDIA/cuQuantum | 0f00494d4639d760228ac002e83e6d2d3dd97eca | [
"BSD-3-Clause"
] | 18 | 2021-12-20T17:52:07.000Z | 2022-03-29T02:27:58.000Z | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import site
import subprocess
import sys
from packaging.version import Version
from setuptools import setup, Extension, find_packages
from Cython.Build import cythonize
# Get __version__ variable
source_root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(source_root, 'cuquantum', '_version.py')) as f:
exec(f.read())
# set up version constraints: note that CalVer like 22.03 is normalized to
# 22.3 by setuptools, so we must follow the same practice in the constraints;
# also, we don't need the Python patch number here
cuqnt_py_ver = Version(__version__)
cuqnt_ver_major_minor = f"{cuqnt_py_ver.major}.{cuqnt_py_ver.minor}"
# search order:
# 1. installed "cuquantum" package
# 2. env var
for path in site.getsitepackages():
path = os.path.join(path, 'cuquantum')
if os.path.isdir(path):
cuquantum_root = path
using_cuquantum_wheel = True
break
else:
cuquantum_root = os.environ.get('CUQUANTUM_ROOT')
using_cuquantum_wheel = False
# We allow setting CUSTATEVEC_ROOT and CUTENSORNET_ROOT separately for the ease
# of development, but users are encouraged to either install cuquantum from PyPI
# or conda, or set CUQUANTUM_ROOT to the existing installation.
try:
custatevec_root = os.environ['CUSTATEVEC_ROOT']
using_cuquantum_wheel = False
except KeyError as e:
if cuquantum_root is None:
raise RuntimeError('cuStateVec is not found, please install "cuquantum" '
'or set $CUQUANTUM_ROOT') from e
else:
custatevec_root = cuquantum_root
try:
cutensornet_root = os.environ['CUTENSORNET_ROOT']
using_cuquantum_wheel = False
except KeyError as e:
if cuquantum_root is None:
raise RuntimeError('cuTensorNet is not found, please install "cuquantum" '
'or set $CUQUANTUM_ROOT') from e
else:
cutensornet_root = cuquantum_root
# search order:
# 1. installed "cutensor" package
# 2. env var
for path in site.getsitepackages():
path = os.path.join(path, 'cutensor')
if os.path.isdir(path):
cutensor_root = path
assert using_cuquantum_wheel # if this raises, the env is corrupted
break
else:
cutensor_root = os.environ.get('CUTENSOR_ROOT')
assert not using_cuquantum_wheel
if cutensor_root is None:
raise RuntimeError('cuTENSOR is not found, please install "cutensor" '
'or set $CUTENSOR_ROOT')
# We can't assume users to have CTK installed via pip, so we really need this...
# TODO(leofang): try /usr/local/cuda?
try:
cuda_path = os.environ['CUDA_PATH']
except KeyError as e:
raise RuntimeError('CUDA is not found, please set $CUDA_PATH') from e
# TODO: use setup.cfg and/or pyproject.toml
setup_requires = [
'Cython>=0.29.22,<3',
'packaging',
]
install_requires = [
'numpy',
# 'cupy', # <-- can't be listed here as on PyPI this is the name for source build, not for wheel
# 'torch', # <-- PyTorch is optional; also, it does not live on PyPI...
'typing_extensions',
]
ignore_cuquantum_dep = bool(os.environ.get('CUQUANTUM_IGNORE_SOLVER', False))
if not ignore_cuquantum_dep:
assert using_cuquantum_wheel # if this raises, the env is corrupted
# cuTENSOR version is constrained in the cuquantum package, so we don't
# need to list it
setup_requires.append(f'cuquantum=={cuqnt_ver_major_minor}.*')
install_requires.append(f'cuquantum=={cuqnt_ver_major_minor}.*')
def check_cuda_version():
try:
# We cannot do a dlopen and call cudaRuntimeGetVersion, because it
# requires GPUs. We also do not want to rely on the compiler utility
# provided in distutils (deprecated) or setuptools, as this is a very
# simple string parsing task.
cuda_h = os.path.join(cuda_path, 'include', 'cuda.h')
with open(cuda_h, 'r') as f:
cuda_h = f.read().split('\n')
for line in cuda_h:
if "#define CUDA_VERSION" in line:
ver = int(line.split()[-1])
break
else:
raise RuntimeError("cannot parse CUDA_VERSION")
except:
raise
else:
# 11020 -> "11.2"
return str(ver // 1000) + '.' + str((ver % 100) // 10)
cuda_ver = check_cuda_version()
if cuda_ver in ('10.2', '11.0'):
cutensor_ver = cuda_ver
elif '11.0' < cuda_ver < '12.0':
cutensor_ver = '11'
else:
raise RuntimeError(f"Unsupported CUDA version: {cuda_ver}")
def prepare_libs_and_rpaths():
global cusv_lib_dir, cutn_lib_dir
# we include both lib64 and lib to accommodate all possible sources
cusv_lib_dir = [os.path.join(custatevec_root, 'lib'),
os.path.join(custatevec_root, 'lib64')]
cutn_lib_dir = [os.path.join(cutensornet_root, 'lib'),
os.path.join(cutensornet_root, 'lib64'),
os.path.join(cutensor_root, 'lib', cutensor_ver)]
global cusv_lib, cutn_lib, extra_linker_flags
if using_cuquantum_wheel:
cusv_lib = [':libcustatevec.so.1']
cutn_lib = [':libcutensornet.so.1', ':libcutensor.so.1']
# The rpaths must be adjusted given the following full-wheel installation:
# cuquantum-python: site-packages/cuquantum/{custatevec, cutensornet}/ [=$ORIGIN]
# cusv & cutn: site-packages/cuquantum/lib/
# cutensor: site-packages/cutensor/lib/CUDA_VER/
ldflag = "-Wl,--disable-new-dtags,"
ldflag += "-rpath,$ORIGIN/../lib,"
ldflag += f"-rpath,$ORIGIN/../../cutensor/lib/{cutensor_ver}"
extra_linker_flags = [ldflag]
else:
cusv_lib = ['custatevec']
cutn_lib = ['cutensornet', 'cutensor']
extra_linker_flags = []
prepare_libs_and_rpaths()
print("\n****************************************************************")
print("CUDA version:", cuda_ver)
print("CUDA path:", cuda_path)
print("cuStateVec path:", custatevec_root)
print("cuTensorNet path:", cutensornet_root)
print("cuTENSOR path:", cutensor_root)
print("****************************************************************\n")
custatevec = Extension(
"cuquantum.custatevec.custatevec",
sources=["cuquantum/custatevec/custatevec.pyx"],
include_dirs=[os.path.join(cuda_path, 'include'),
os.path.join(custatevec_root, 'include')],
library_dirs=cusv_lib_dir,
libraries=cusv_lib,
extra_link_args=extra_linker_flags,
)
cutensornet = Extension(
"cuquantum.cutensornet.cutensornet",
sources=["cuquantum/cutensornet/cutensornet.pyx"],
include_dirs=[os.path.join(cuda_path, 'include'),
os.path.join(cutensornet_root, 'include')],
library_dirs=cutn_lib_dir,
libraries=cutn_lib,
extra_link_args=extra_linker_flags,
)
setup(
name="cuquantum-python",
version=__version__,
description="Python APIs for cuQuantum",
url="https://github.com/NVIDIA/cuQuantum",
author="NVIDIA Corporation",
author_email="cuquantum-python@nvidia.com",
license="BSD-3-Clause",
license_files = ('LICENSE',),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Topic :: Education",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Environment :: GPU :: NVIDIA CUDA",
"Environment :: GPU :: NVIDIA CUDA :: 11.0",
"Environment :: GPU :: NVIDIA CUDA :: 11.1",
"Environment :: GPU :: NVIDIA CUDA :: 11.2",
"Environment :: GPU :: NVIDIA CUDA :: 11.3",
"Environment :: GPU :: NVIDIA CUDA :: 11.4",
"Environment :: GPU :: NVIDIA CUDA :: 11.5",
#"Environment :: GPU :: NVIDIA CUDA :: 11.6", # PyPI has not added it yet
],
ext_modules=cythonize([
custatevec,
cutensornet,
], verbose=True, language_level=3,
compiler_directives={'embedsignature': True}),
packages=find_packages(include=['cuquantum', 'cuquantum.*']),
package_data={"": ["*.pxd", "*.pyx", "*.py"],},
zip_safe=False,
python_requires='>=3.7',
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=install_requires + [
# pytest < 6.2 is slow in collecting tests
'pytest>=6.2',
#'cffi>=1.0.0', # optional
]
)
| 35.526316 | 100 | 0.646154 |
a9526c8701734fce756f26d5982cef2c440da913 | 3,008 | py | Python | tests/unit/test_multipart.py | sivel/requisitor | c6b10d400111998591ea9e21148c9cd7e905a453 | [
"Apache-2.0"
] | 7 | 2020-04-17T04:05:19.000Z | 2020-05-15T14:35:21.000Z | tests/unit/test_multipart.py | sivel/requisitor | c6b10d400111998591ea9e21148c9cd7e905a453 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_multipart.py | sivel/requisitor | c6b10d400111998591ea9e21148c9cd7e905a453 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
from email.message import Message
import pytest
from requisitor.multipart import Field
from requisitor.multipart import prepare_multipart
def test_prepare_multipart(request):
fixture_boundary = b'===============9055186793967377464=='
here = os.path.dirname(__file__)
multipart = os.path.join(here, 'fixtures/multipart.txt')
cacert = os.path.join(here, 'fixtures/cacert.pem')
cakey = os.path.join(here, 'fixtures/cakey.pem')
cacert_txt = open(os.path.join(here, 'fixtures/cacert.txt'), 'rb')
request.addfinalizer(cacert_txt.close)
png = pathlib.Path(here) / 'fixtures/1x1.png'
fields = {
'form_field_1': 'form_value_1',
'form_field_2': {
'content': 'form_value_2',
},
'form_field_3': {
'content': '<html></html>',
'mime_type': 'text/html',
},
'form_field_4': {
'content': '{"foo": "bar"}',
'mime_type': 'application/json',
},
'file1': {
'content': 'file_content_1',
'file': 'fake_file1.txt',
},
'file2': {
'content': '<html></html>',
'mime_type': 'text/html',
'file': 'fake_file2.html',
},
'file3': Field(
file='fake_file3.json',
content='{"foo": "bar"}',
main_type='application',
sub_type='json',
),
'file4': {
'file': cacert,
'mime_type': 'text/plain',
},
'file5': {
'file': cakey,
},
'file6': {
'file': cacert_txt,
},
'file7': {
'file': png,
},
}
content_type, b_data = prepare_multipart(fields)
headers = Message()
headers['content-type'] = content_type
assert headers.get_content_type() == 'multipart/form-data'
boundary = headers.get_boundary()
assert boundary is not None
with open(multipart, 'rb') as f:
b_expected = f.read().replace(fixture_boundary, boundary.encode())
# Depending on Python version, there may or may not be a trailing newline
assert b_data.rstrip(b'\r\n') == b_expected.rstrip(b'\r\n')
def test_wrong_type():
pytest.raises(TypeError, prepare_multipart, 'foo')
pytest.raises(TypeError, prepare_multipart, {'foo': None})
def test_empty():
pytest.raises(ValueError, prepare_multipart, {'foo': {}})
def test_unknown_mime(mocker):
fields = {'foo': {'filename': 'foo.boom', 'content': 'foo'}}
mocker.patch('mimetypes.guess_type', return_value=(None, None))
content_type, b_data = prepare_multipart(fields)
assert b'Content-Type: application/octet-stream' in b_data
def test_bad_mime(mocker):
fields = {'foo': {'filename': 'foo.boom', 'content': 'foo'}}
mocker.patch('mimetypes.guess_type', side_effect=TypeError)
content_type, b_data = prepare_multipart(fields)
assert b'Content-Type: application/octet-stream' in b_data
| 29.782178 | 77 | 0.589096 |
fdce3a22f4d9ef80c42411304912340c9bf70bda | 29,371 | py | Python | examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
] | 2 | 2022-02-19T07:02:52.000Z | 2022-02-19T07:02:55.000Z | examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
] | 2 | 2022-03-14T10:13:16.000Z | 2022-03-14T11:50:27.000Z | examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
] | 1 | 2022-02-20T11:47:53.000Z | 2022-02-20T11:47:53.000Z | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
""" Pre-Training a 🤗 Wav2Vec2 model on unlabeled audio data """
import argparse
import logging
import math
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Union
import datasets
import torch
from datasets import DatasetDict, concatenate_datasets, load_dataset
from torch.utils.data.dataloader import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from huggingface_hub import Repository
from transformers import (
AdamW,
SchedulerType,
Wav2Vec2Config,
Wav2Vec2FeatureExtractor,
Wav2Vec2ForPreTraining,
get_scheduler,
is_wandb_available,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_names",
nargs="+",
type=str,
required=True,
help="The configuration names of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_split_names",
nargs="+",
type=str,
required=True,
help="The names of the training data set splits to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--preprocessing_only",
action="store_true",
help="Only run the preprocessing script to be cached for future use",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Where do you want to store the pretrained models downloaded from huggingface.co",
)
parser.add_argument(
"--validation_split_percentage",
type=int,
default=1,
help="Percentage of training data that should be used for validation if no validation is present in dataset.",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Number of steps between each logging",
)
parser.add_argument(
"--saving_steps",
type=int,
default=500,
help="Number of steps between each logging",
)
parser.add_argument(
"--audio_column_name",
type=str,
default="audio",
help="Column in the dataset that contains speech file path. Defaults to 'audio'",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--train_cache_file_name",
type=str,
default=None,
help="Path to the train cached file name",
)
parser.add_argument(
"--validation_cache_file_name",
type=str,
default=None,
help="Path to the validation cached file name",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="If True, use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.")
parser.add_argument(
"--max_gumbel_temperature",
type=float,
default=2.0,
help="Maximum temperature for gumbel softmax.",
)
parser.add_argument(
"--min_gumbel_temperature",
type=float,
default=0.5,
help="Minimum temperature for gumbel softmax.",
)
parser.add_argument(
"--gumbel_temperature_decay", type=float, default=0.999995, help="Decay of gumbel temperature during training."
)
parser.add_argument(
"--max_duration_in_seconds",
type=float,
default=5.0,
help="Filter out audio files that are longer than `max_duration_in_seconds` seconds",
)
parser.add_argument(
"--min_duration_in_seconds",
type=float,
default=3.0,
help="Filter out audio files that are shorter than `min_duration_in_seconds` seconds",
)
parser.add_argument(
"--pad_to_multiple_of",
type=int,
default=None,
help="If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).",
)
parser.add_argument(
"--adam_beta1",
type=float,
default=0.9,
help="Beta1 for AdamW optimizer",
)
parser.add_argument(
"--adam_beta2",
type=float,
default=0.999,
help="Beta2 for AdamW optimizer",
)
parser.add_argument(
"--adam_epsilon",
type=float,
default=1e-8,
help="Epsilon for AdamW optimizer",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
args = parser.parse_args()
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
@dataclass
class DataCollatorForWav2Vec2Pretraining:
"""
Data collator that will dynamically pad the inputs received and prepare masked indices
for self-supervised pretraining.
Args:
model (:class:`~transformers.Wav2Vec2ForPreTraining`):
The Wav2Vec2 model used for pretraining. The data collator needs to have access
to config and ``_get_feat_extract_output_lengths`` function for correct padding.
feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`):
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
model: Wav2Vec2ForPreTraining
feature_extractor: Wav2Vec2FeatureExtractor
padding: Union[bool, str] = "longest"
pad_to_multiple_of: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
batch = self.feature_extractor.pad(
features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
device = batch["input_values"].device
batch_size = batch["input_values"].shape[0]
mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
# make sure masked sequence length is a Python scalar
mask_indices_seq_length = int(mask_indices_seq_length)
# make sure that no loss is computed on padded inputs
if batch.get("attention_mask") is not None:
# compute real output lengths according to convolution formula
batch["sub_attention_mask"] = self.model._get_feature_vector_attention_mask(
mask_indices_seq_length, batch["attention_mask"]
)
features_shape = (batch_size, mask_indices_seq_length)
# sample randomly masked indices
mask_time_indices = _compute_mask_indices(
features_shape,
self.model.config.mask_time_prob,
self.model.config.mask_time_length,
attention_mask=batch.get("sub_attention_mask"),
)
# sample negative indices
sampled_negative_indices = _sample_negative_indices(
features_shape,
self.model.config.num_negatives,
mask_time_indices=mask_time_indices,
)
batch["mask_time_indices"] = torch.tensor(mask_time_indices, dtype=torch.long, device=device)
batch["sampled_negative_indices"] = torch.tensor(sampled_negative_indices, dtype=torch.long, device=device)
return batch
def multiply_grads(params, c):
"""Multiplies grads by a constant *c*."""
for p in params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def get_grad_norm(params, scale=1):
"""Compute grad norm given a gradient scale."""
total_norm = 0.0
for p in params:
if p.grad is not None:
param_norm = (p.grad.detach().data / scale).norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm**0.5
return total_norm
def main():
# See all possible arguments in src/transformers/args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# set up weights and biases if available
if is_wandb_available():
import wandb
wandb.init(project=args.output_dir.split("/")[-1])
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub and not args.preprocessing_only:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# 1. Download and create train, validation dataset
# We load all dataset configuration and datset split pairs passed in
# ``args.dataset_config_names`` and ``args.dataset_split_names``
datasets_splits = []
for dataset_config_name, train_split_name in zip(args.dataset_config_names, args.dataset_split_names):
# load dataset
dataset_split = load_dataset(
args.dataset_name, dataset_config_name, split=train_split_name, cache_dir=args.cache_dir
)
datasets_splits.append(dataset_split)
# Next, we concatenate all configurations and splits into a single training dataset
raw_datasets = DatasetDict()
if len(datasets_splits) > 1:
raw_datasets["train"] = concatenate_datasets(datasets_splits).shuffle(seed=args.seed)
else:
raw_datasets["train"] = datasets_splits[0]
# Take ``args.validation_split_percentage`` from the training dataset for the validation_split_percentage
num_validation_samples = raw_datasets["train"].num_rows * args.validation_split_percentage // 100
if num_validation_samples == 0:
raise ValueError(
"`args.validation_split_percentage` is less than a single sample "
f"for {len(raw_datasets['train'])} training samples. Increase "
"`args.num_validation_split_percentage`. "
)
raw_datasets["validation"] = raw_datasets["train"].select(range(num_validation_samples))
raw_datasets["train"] = raw_datasets["train"].select(range(num_validation_samples, raw_datasets["train"].num_rows))
# 2. Now we preprocess the datasets including loading the audio, resampling and normalization
# Thankfully, `datasets` takes care of automatically loading and resampling the audio,
# so that we just need to set the correct target sampling rate and normalize the input
# via the `feature_extractor`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(args.model_name_or_path)
# make sure that dataset decodes audio with correct sampling rate
raw_datasets = raw_datasets.cast_column(
args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
# only normalized-inputs-training is supported
if not feature_extractor.do_normalize:
raise ValueError(
"Training is only supported for normalized inputs. " "Make sure ``feature_extractor.do_normalize == True``"
)
# set max & min audio length in number of samples
max_length = int(args.max_duration_in_seconds * feature_extractor.sampling_rate)
min_length = int(args.min_duration_in_seconds * feature_extractor.sampling_rate)
def prepare_dataset(batch):
sample = batch[args.audio_column_name]
inputs = feature_extractor(
sample["array"], sampling_rate=sample["sampling_rate"], max_length=max_length, truncation=True
)
batch["input_values"] = inputs.input_values[0]
batch["input_length"] = len(inputs.input_values[0])
return batch
# load via mapped files via path
cache_file_names = None
if args.train_cache_file_name is not None:
cache_file_names = {"train": args.train_cache_file_name, "validation": args.validation_cache_file_name}
# load audio files into numpy arrays
with accelerator.main_process_first():
vectorized_datasets = raw_datasets.map(
prepare_dataset,
num_proc=args.preprocessing_num_workers,
remove_columns=raw_datasets["train"].column_names,
cache_file_names=cache_file_names,
)
if min_length > 0.0:
vectorized_datasets = vectorized_datasets.filter(
lambda x: x > min_length,
num_proc=args.preprocessing_num_workers,
input_columns=["input_length"],
)
vectorized_datasets = vectorized_datasets.remove_columns("input_length")
# for large datasets it is advised to run the preprocessing on a
# single machine first with ``args.preprocessing_only`` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step ``args.preprocessing_only`` can then be set to `False` to load the
# cached dataset
if args.preprocessing_only:
return
# 3. Load model
config = Wav2Vec2Config.from_pretrained(args.model_name_or_path)
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and ``config.feat_extract_norm='layer'"
)
# initialize random model
model = Wav2Vec2ForPreTraining(config)
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
model.gradient_checkpointing_enable()
# 4. Define data collator, optimizer and scheduler
data_collator = DataCollatorForWav2Vec2Pretraining(
model=model, feature_extractor=feature_extractor, pad_to_multiple_of=args.pad_to_multiple_of
)
train_dataloader = DataLoader(
vectorized_datasets["train"],
shuffle=True,
collate_fn=data_collator,
batch_size=args.per_device_train_batch_size,
)
eval_dataloader = DataLoader(
vectorized_datasets["validation"], collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
optimizer = AdamW(
list(model.parameters()),
lr=args.learning_rate,
betas=[args.adam_beta1, args.adam_beta2],
eps=args.adam_epsilon,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# 5. Train
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(vectorized_datasets['train'])}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
completed_steps = 0
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# compute num of losses
num_losses = batch["mask_time_indices"].sum()
sub_attention_mask = batch.pop("sub_attention_mask", None)
sub_attention_mask = (
sub_attention_mask if sub_attention_mask is not None else torch.ones_like(batch["mask_time_indices"])
)
percent_masked = num_losses / sub_attention_mask.sum()
# forward
outputs = model(**batch)
# divide loss by gradient accumulation steps since gradients
# are accumulated for multiple backward passes in PyTorch
loss = outputs.loss / args.gradient_accumulation_steps
accelerator.backward(loss)
# make sure that `num_losses` is summed for distributed training
# and average gradients over losses of all devices
if accelerator.state.num_processes > 1:
num_losses = accelerator.gather(num_losses).sum()
gradient_multiplier = accelerator.state.num_processes / num_losses
multiply_grads(model.module.parameters(), gradient_multiplier)
else:
multiply_grads(model.parameters(), 1 / num_losses)
# update step
if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
# compute grad norm for monitoring
scale = (
accelerator.scaler._scale.item()
if hasattr(accelerator, "scaler") and accelerator.scaler is not None
else 1
)
if accelerator.state.num_processes > 1:
grad_norm = get_grad_norm(model.module.parameters(), scale)
else:
grad_norm = get_grad_norm(model.parameters(), scale)
# update parameters
optimizer.step()
optimizer.zero_grad()
if not accelerator.optimizer_step_was_skipped:
lr_scheduler.step()
elif accelerator.is_local_main_process:
progress_bar.write(
"Gradients have overflown - skipping update step... " f"Updating gradient scale to {scale}..."
)
# update gumbel temperature
gumbel_temperature = max(
args.max_gumbel_temperature * args.gumbel_temperature_decay**completed_steps,
args.min_gumbel_temperature,
)
if hasattr(model, "module"):
model.module.set_gumbel_temperature(gumbel_temperature)
else:
model.set_gumbel_temperature(gumbel_temperature)
progress_bar.update(1)
completed_steps += 1
# 6. Log all results
if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0:
loss.detach()
outputs.contrastive_loss.detach()
outputs.diversity_loss.detach()
if accelerator.state.num_processes > 1:
loss = accelerator.gather(loss).sum()
outputs.contrastive_loss = accelerator.gather(outputs.contrastive_loss).sum()
outputs.diversity_loss = accelerator.gather(outputs.diversity_loss).sum()
percent_masked = accelerator.gather(percent_masked).sum()
train_logs = {
"loss": (loss * args.gradient_accumulation_steps) / num_losses,
"constrast_loss": outputs.contrastive_loss / num_losses,
"div_loss": outputs.diversity_loss / num_losses,
"%_mask_idx": percent_masked / accelerator.num_processes,
"ppl": outputs.codevector_perplexity,
"lr": torch.tensor(optimizer.param_groups[0]["lr"]),
"temp": torch.tensor(gumbel_temperature),
"grad_norm": torch.tensor(grad_norm),
}
log_str = ""
for k, v in train_logs.items():
log_str += "| {}: {:.3e}".format(k, v.item())
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available():
wandb.log(train_logs)
# save model every `args.saving_steps` steps
if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0:
if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process:
repo.push_to_hub(
commit_message=f"Training in progress step {completed_steps}",
blocking=False,
auto_lfs_prune=True,
)
# if completed steps > `args.max_train_steps` stop
if completed_steps >= args.max_train_steps:
break
# 7. Validate!
model.eval()
# init logs
val_logs = {
"val_loss": 0,
"val_contrastive_loss": 0,
"val_diversity_loss": 0,
"val_num_losses": 0,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
batch.pop("sub_attention_mask", None)
outputs = model(**batch)
val_logs["val_loss"] += outputs.loss
val_logs["val_contrastive_loss"] += outputs.contrastive_loss
val_logs["val_diversity_loss"] += outputs.diversity_loss
val_logs["val_num_losses"] += batch["mask_time_indices"].sum()
# sum over devices in multi-processing
if accelerator.num_processes > 1:
val_logs = {k: accelerator.gather(v).sum() for k, v in val_logs.items()}
val_logs = {k: v / val_logs["val_num_losses"] for k, v in val_logs.items()}
log_str = ""
for k, v in val_logs.items():
log_str += "| {}: {:.3e}".format(k, v.item())
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available():
wandb.log(val_logs)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
if __name__ == "__main__":
main()
| 40.34478 | 200 | 0.652038 |
8d80bbcd8903d2bf2811e04b5697b5d07e4c92b6 | 419 | py | Python | app/api/decorators.py | imdreamer2018/blogByFlask | 87f374e40b45c95cb87402c3fe1bf4df226a297d | [
"MIT"
] | 2 | 2020-05-18T00:56:25.000Z | 2020-05-18T00:56:27.000Z | app/api/decorators.py | imdreamer2018/SimpleBlog | 87f374e40b45c95cb87402c3fe1bf4df226a297d | [
"MIT"
] | 4 | 2020-05-16T13:22:44.000Z | 2020-05-16T13:22:47.000Z | app/api/decorators.py | imdreamer2018/SimpleBlog | 87f374e40b45c95cb87402c3fe1bf4df226a297d | [
"MIT"
] | null | null | null | from functools import wraps
from flask import g
from .errors import forbidden
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args,**kwargs):
if not g.current_user.can(permission):
return forbidden('Insufficient permissions')
return f(*args,**kwargs)
return decorated_function
return decorator | 32.230769 | 61 | 0.649165 |
dd171b6cde35a15bd755691b1cacdad663f9f12e | 9,654 | py | Python | kloudless/client.py | theonlybex/kloudless-python | 3e00b4659e5f790a9129e858b19d0bf283cd8e1c | [
"MIT"
] | null | null | null | kloudless/client.py | theonlybex/kloudless-python | 3e00b4659e5f790a9129e858b19d0bf283cd8e1c | [
"MIT"
] | null | null | null | kloudless/client.py | theonlybex/kloudless-python | 3e00b4659e5f790a9129e858b19d0bf283cd8e1c | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import re
import requests
from . import exceptions
from .auth import APIKeyAuth, BearerTokenAuth
from .re_patterns import download_file_patterns
from .resources import ResourceList, Resource, Response, ResponseJson
from .util import logger, url_join, construct_kloudless_endpoint
from .version import VERSION
try:
import simplejson as json
except ImportError:
import json
def handle_response(response):
if not response.ok:
logger.error("Request to '{}' failed: {} - {}".format(
response.url, response.status_code, response.text))
if response.status_code == 401:
raise exceptions.AuthorizationException(response=response)
elif response.status_code == 403:
raise exceptions.ForbiddenException(response=response)
elif response.status_code == 404:
raise exceptions.NotFoundException(response=response)
elif response.status_code == 429:
# TODO: retry mechanism for 429 response
# Might be able to make use of response.request object
raise exceptions.RateLimitException(response=response)
elif response.status_code >= 500:
raise exceptions.ServerException(response=response)
else:
raise exceptions.APIException(response=response)
logger.debug("Request to '{}' succeeded. Status code: {}".format(
response.url, response.status_code))
return response
class Session(requests.Session):
"""
The Session class helps build Kloudless specific headers.
"""
def __init__(self):
super(Session, self).__init__()
self.headers.update({
'User-Agent': 'kloudless-python/{}'.format(VERSION),
})
@staticmethod
def _update_kloudless_headers(headers, get_raw_data, raw_headers,
impersonate_user_id):
if isinstance(get_raw_data, bool):
headers['X-Kloudless-Raw-Data'] = str(get_raw_data).lower()
if raw_headers and isinstance(raw_headers, dict):
headers['X-Kloudless-Raw-Headers'] = json.dumps(
raw_headers)
if impersonate_user_id:
headers['X-Kloudless-As-User'] = str(impersonate_user_id)
def request(self, method, url, api_version=None, get_raw_data=None,
raw_headers=None, impersonate_user_id=None, **kwargs):
"""
Override :func:`requests.Session.request` with additional parameters.
See `API-wide options <https://developers.kloudless.com/docs/latest/
core#api-wide-options>`_ for more information about `get_raw_data`,
`raw_headers` and `impersonate_user_id` parameters.
:param str method: Http method
:param str url: Request url
:param int api_version: API version
:param bool get_raw_data: Set to ``True`` if the ``raw`` object
from upstream service is present. This is equal to the
``X-Kloudless-Raw-Data`` request header.
:param dict raw_headers: Headers fowarded to upstream service. This is
equal to the ``X-Kloudless-Raw-Headers`` request header
:param str impersonate_user_id: User id to access or modify data for
individual user accounts. This is equal to the
``X-Kloudless-As-User`` request header.
:param kwargs: kwargs passed to :func:`requests.Session.request`
:return: :class:`requests.Response`
:raises: :class:`kloudless.exceptions.APIException` or its subclasses
"""
if api_version is not None:
url = re.sub(
r'(https?://.+?/)v\d', r'\1v{}'.format(api_version), url
)
self._update_kloudless_headers(kwargs.setdefault('headers', dict()),
get_raw_data, raw_headers,
impersonate_user_id)
response = handle_response(
super(Session, self).request(method, url, **kwargs)
)
return response
class Client(Session):
"""
Base Client class to send all http requests in this library.
**Instance attributes**
:ivar str url: Base url that will be used as a prefix for all http method
calls
"""
def __init__(self, api_key=None, token=None):
"""
Either ``api_key`` or ``token`` is needed for instantiation.
:param api_key: API key
:param token: Bearer token
"""
super(Client, self).__init__()
if token:
self.token = token
self.auth = BearerTokenAuth(token)
elif api_key:
self.api_key = api_key
self.auth = APIKeyAuth(api_key)
else:
raise exceptions.InvalidParameter(
"An API Key or Bearer Token must be provided. Please check "
"api_key and token parameters."
)
self.url = construct_kloudless_endpoint()
def _compose_url(self, path):
return url_join(self.url, path)
def _create_response_object(self, response):
url = response.url
if 'application/json' not in response.headers.get('content-type', ''):
return Response(self, url, response)
try:
response_data = response.json()
except ValueError:
logger.error("Request to {} failed to decode json: {} - {}".format(
response.url, response.status_code, response.text))
raise
type_ = response_data.get('type')
if type_ == 'object_list':
return ResourceList(data=response_data, url=url,
client=self, response=response)
elif 'id' in response_data or 'href' in response_data:
return Resource(
data=response_data, url=url, client=self, response=response
)
else:
return ResponseJson(data=response_data, url=url,
client=self, response=response)
def request(self, method, path='', get_raw_response=False, **kwargs):
"""
| Override :func:`kloudless.client.Session.request`.
| Note that the actual request url will have ``self.url`` as a prefix.
:param str method: Http method
:param str path: Request path
:param str get_raw_response: Set to ``True`` if the raw
:class:`requests.Response` instance is in the returned value
:param kwargs: kwargs passed to :func:`kloudless.client.Session.request`
:return:
- :class:`requests.Response` if ``get_raw_response`` is ``True``
- :class:`kloudless.resources.base.Response` or its subclass otherwise
"""
url = self._compose_url(path)
response = super(Client, self).request(method, url, **kwargs)
if get_raw_response:
return response
return self._create_response_object(response)
def get(self, path='', **kwargs):
"""
| Http GET request.
| Note that the actual request url will have ``self.url`` as a
prefix.
:param str path: Request path
:param kwargs: See :func:`kloudless.client.Client.request` for more
options.
:return: :class:`kloudless.resources.base.Response` or its subclass
"""
if download_file_patterns.search(path):
kwargs.setdefault('stream', True)
return super(Client, self).get(path, **kwargs)
def post(self, path='', data=None, json=None, **kwargs):
"""
| Http POST request.
| Note that the actual request url will have ``self.url`` as a
prefix.
:param str path: Request path
:param data: passed to :func:`request.Request.post`
:param json: passed to :func:`request.Request.post`
:param kwargs: See :func:`kloudless.client.Client.request` for more
options.
:return: :class:`kloudless.resources.base.Response` or its subclass
"""
return super(Client, self).post(path, data, json, **kwargs)
def put(self, path='', data=None, **kwargs):
"""
| Http PUT request.
| Note that the actual request url will have ``self.url`` as a
prefix.
:param str path: Request path
:param data: passed to :func:`request.Request.put`
:param kwargs: See :func:`kloudless.client.Client.request` for more
options.
:return: :class:`kloudless.resources.base.Response` or its subclass
"""
return super(Client, self).put(path, data, **kwargs)
def patch(self, path='', data=None, **kwargs):
"""
| Http PATCH request.
| Note that the actual request url will have ``self.url`` as a
prefix.
:param str path: Request path
:param data: passed to :func:`request.Request.patch`
:param kwargs: See :func:`kloudless.client.Client.request` for more
options.
:return: :class:`kloudless.resources.base.Response` or its subclass
"""
return super(Client, self).patch(path, data, **kwargs)
def delete(self, path='', **kwargs):
"""
| Http DELETE request.
| Note that the actual request url will have ``self.url`` as a
prefix.
:param str path: Request path
:param kwargs: See :func:`kloudless.client.Client.request` for more
options.
:return: :class:`kloudless.resources.base.Response` or its subclass
"""
return super(Client, self).delete(path, **kwargs)
| 35.233577 | 82 | 0.611871 |
79727ea6bea04bb19fd21c9f87e3618788bff520 | 2,192 | py | Python | typo/wsgi.py | SmartTeleMax/typod | 1f559929535cdc104221f531b9921ad58a010dc3 | [
"MIT"
] | 3 | 2016-01-24T11:59:34.000Z | 2018-07-04T06:00:18.000Z | typo/wsgi.py | SmartTeleMax/typod | 1f559929535cdc104221f531b9921ad58a010dc3 | [
"MIT"
] | null | null | null | typo/wsgi.py | SmartTeleMax/typod | 1f559929535cdc104221f531b9921ad58a010dc3 | [
"MIT"
] | 1 | 2021-04-12T21:17:51.000Z | 2021-04-12T21:17:51.000Z | # -*- coding: utf-8 -*-
import cgi
import sys
import json
from itertools import product
import click
from correctors import TYPO_CLASSES
def make_app(corrector, format, limit=10):
def application(env, start_response):
method = env['REQUEST_METHOD']
if method == 'GET':
qs = cgi.parse_qs(env['QUERY_STRING'])
typo = qs.get('query', [''])[0]
elif method == 'POST':
typo = env['wsgi.input'].read()
else:
start_response('405 Method Not Allowed')
return []
start_response('200 OK',
[('Content-Type', 'text/plain; charset=UTF-8')])
typo = typo.decode('utf-8').strip()
suggestions, _ = corrector.suggestion(typo)
if format == 'json':
result = json.dumps(suggestions, ensure_ascii=False).encode('utf-8')
else:
results = []
for i, p in enumerate(product(*suggestions)):
if i > limit:
break
results.append(u"".join(word for word, weight in p))
result = '\n'.join(results).encode('utf-8')
return [result]
return application
@click.command()
@click.option('--index',
type=click.Path(readable=True, resolve_path=True),
default="index.data", required=True)
@click.option('--corrector', type=click.Choice(TYPO_CLASSES.keys()),
default='default')
@click.option('--format', type=click.Choice(['text', 'json']),
default='text')
@click.option('--lang', type=click.Choice(['ru', 'en', 'other']),
default='ru')
@click.option('--max-candidates', type=click.INT,
default=1)
@click.option('--limit', type=click.INT, default=10)
def cli(*a, **kw):
pass
ctx = cli.make_context('wsgi', sys.argv[1:])
corrector_cls = TYPO_CLASSES.get(ctx.params['corrector'])
corrector_inst = corrector_cls(ctx.params['index'],
max_candidates=ctx.params['max_candidates'],
lang=ctx.params['lang'])
application = make_app(corrector_inst, ctx.params['format'], ctx.params['limit'])
__all__ = ['application']
| 33.212121 | 81 | 0.573449 |
f307c54a08f28127b097643c61d22e014f01b2df | 1,083 | py | Python | Algo-1/week6/4-Quadruplets-2/quadruplets_2.py | pepincho/Python101-and-Algo1-Courses | 7cf38d26d5be5ffc1a37477ae6375a99906df9e2 | [
"MIT"
] | 2 | 2016-10-11T14:09:05.000Z | 2017-01-20T19:30:34.000Z | Algo-1/week6/4-Quadruplets-2/quadruplets_2.py | pepincho/HackBulgaria | 7cf38d26d5be5ffc1a37477ae6375a99906df9e2 | [
"MIT"
] | null | null | null | Algo-1/week6/4-Quadruplets-2/quadruplets_2.py | pepincho/HackBulgaria | 7cf38d26d5be5ffc1a37477ae6375a99906df9e2 | [
"MIT"
] | null | null | null | class Quadruplets_2:
# Returns the number of quadruplets that sum to zero.
# a - [int]
# b - [int]
# c - [int]
# d - [int]
@staticmethod
def zero_quadruplets_count(a, b, c, d):
left_sums = {}
right_sums = {}
counter = 0
for elem1 in a:
for elem2 in b:
if elem1 + elem2 not in left_sums:
left_sums[elem1 + elem2] = 1
else:
left_sums[elem1 + elem2] += 1
for elem1 in c:
for elem2 in d:
if elem1 + elem2 not in right_sums:
right_sums[elem1 + elem2] = 1
else:
right_sums[elem1 + elem2] += 1
for key in left_sums:
if -key in right_sums:
counter = counter + (left_sums[key] * right_sums[-key])
return counter
def main():
a = [5, 3, 4]
b = [-2, -1, 6]
c = [-1, -2, 4]
d = [-1, -2, 7]
print(Quadruplets_2.zero_quadruplets_count(a, b, c, d))
if __name__ == '__main__':
main()
| 22.5625 | 71 | 0.467221 |
bb433ef93ad10413cbd9ed4bf96c3ca539e94782 | 564 | py | Python | apps/officehours/models.py | ezl/hnofficehours | 3729eca064998bd2d0a9ba1b4fe7e56ccc57324b | [
"MIT"
] | 2 | 2015-11-05T13:47:44.000Z | 2020-07-20T19:57:45.000Z | apps/officehours/models.py | ezl/hnofficehours | 3729eca064998bd2d0a9ba1b4fe7e56ccc57324b | [
"MIT"
] | null | null | null | apps/officehours/models.py | ezl/hnofficehours | 3729eca064998bd2d0a9ba1b4fe7e56ccc57324b | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.contrib.sites.models import Site
import schedule.models
from schedule.models import Calendar
def create_global_calendar(sender=None, app=None, **kwargs):
if not sender:
return
cal_name = getattr(settings, 'GLOBAL_CALENDAR_SLUG', 'cal')
Calendar.objects.get_or_create_calendar_for_object(Site.objects.get_current(),
name=cal_name)
models.signals.post_syncdb.connect(create_global_calendar, sender=schedule.models)
| 37.6 | 82 | 0.723404 |
1e7cab34df8bb3fbab415842a9413eb5b70b7177 | 1,245 | py | Python | user/migrations/0003_auto_20200720_1241.py | MohammadReza-Jafari/Gizshop_api | f2dc895a32d5964e7d4d27da5172f132b511b7ee | [
"MIT"
] | null | null | null | user/migrations/0003_auto_20200720_1241.py | MohammadReza-Jafari/Gizshop_api | f2dc895a32d5964e7d4d27da5172f132b511b7ee | [
"MIT"
] | 5 | 2021-04-08T21:57:34.000Z | 2022-02-10T12:43:03.000Z | user/migrations/0003_auto_20200720_1241.py | MohammadReza-Jafari/Gizshop_api | f2dc895a32d5964e7d4d27da5172f132b511b7ee | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-07-20 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20200720_1221'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='address',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='customuser',
name='bank_account',
field=models.CharField(max_length=16, null=True),
),
migrations.AlterField(
model_name='customuser',
name='name',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='customuser',
name='national_code',
field=models.CharField(max_length=10, null=True),
),
migrations.AlterField(
model_name='customuser',
name='phone_number',
field=models.CharField(max_length=13, null=True),
),
migrations.AlterField(
model_name='customuser',
name='postal_code',
field=models.CharField(max_length=10, null=True),
),
]
| 28.295455 | 62 | 0.563052 |
da3d3a7dda608ba3fc672d4d8264efb50c965d4f | 5,330 | py | Python | pulsar/client/job_directory.py | nuwang/pulsar | ccebb1563c8f8eaca0505ed168920d8121bd8a1e | [
"Apache-2.0"
] | null | null | null | pulsar/client/job_directory.py | nuwang/pulsar | ccebb1563c8f8eaca0505ed168920d8121bd8a1e | [
"Apache-2.0"
] | null | null | null | pulsar/client/job_directory.py | nuwang/pulsar | ccebb1563c8f8eaca0505ed168920d8121bd8a1e | [
"Apache-2.0"
] | null | null | null | """
"""
import os.path
import posixpath
from collections import deque
from logging import getLogger
from galaxy.util import in_directory
from .util import PathHelper
log = getLogger(__name__)
TYPES_TO_METHOD = dict(
input="inputs_directory",
unstructured="unstructured_files_directory",
config="configs_directory",
tool="tool_files_directory",
workdir="working_directory",
metadata="metadata_directory",
output="outputs_directory",
output_workdir="working_directory",
output_metadata="metadata_directory",
)
class RemoteJobDirectory(object):
""" Representation of a (potentially) remote Pulsar-style staging directory.
"""
def __init__(self, remote_staging_directory, remote_id, remote_sep):
self.path_helper = PathHelper(remote_sep)
if remote_id:
self.job_directory = self.path_helper.remote_join(
remote_staging_directory,
remote_id
)
else:
self.job_directory = remote_staging_directory
def metadata_directory(self):
return self._sub_dir('metadata')
def working_directory(self):
return self._sub_dir('working')
def inputs_directory(self):
return self._sub_dir('inputs')
def outputs_directory(self):
return self._sub_dir('outputs')
def configs_directory(self):
return self._sub_dir('configs')
def tool_files_directory(self):
return self._sub_dir('tool_files')
def unstructured_files_directory(self):
return self._sub_dir('unstructured')
@property
def path(self):
return self.job_directory
@property
def separator(self):
return self.path_helper.separator
def calculate_path(self, remote_relative_path, input_type):
""" Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed.
"""
directory, allow_nested_files = self._directory_for_file_type(input_type)
return self.path_helper.remote_join(directory, remote_relative_path)
def _directory_for_file_type(self, file_type):
allow_nested_files = False
# work_dir and input_extra are types used by legacy clients...
# Obviously this client won't be legacy because this is in the
# client module, but this code is reused on server which may
# serve legacy clients.
allow_nested_files = file_type in ['input', 'unstructured', 'output', 'output_workdir', 'metadata', 'output_metadata']
directory_function = getattr(self, TYPES_TO_METHOD.get(file_type, None), None)
if not directory_function:
raise Exception("Unknown file_type specified %s" % file_type)
return directory_function(), allow_nested_files
def _sub_dir(self, name):
return self.path_helper.remote_join(self.job_directory, name)
def get_mapped_file(directory, remote_path, allow_nested_files=False, local_path_module=os.path, mkdir=True):
"""
>>> import ntpath
>>> get_mapped_file(r'C:\\pulsar\\staging\\101', 'dataset_1_files/moo/cow', allow_nested_files=True, local_path_module=ntpath, mkdir=False)
'C:\\\\pulsar\\\\staging\\\\101\\\\dataset_1_files\\\\moo\\\\cow'
>>> get_mapped_file(r'C:\\pulsar\\staging\\101', 'dataset_1_files/moo/cow', allow_nested_files=False, local_path_module=ntpath)
'C:\\\\pulsar\\\\staging\\\\101\\\\cow'
>>> get_mapped_file(r'C:\\pulsar\\staging\\101', '../cow', allow_nested_files=True, local_path_module=ntpath, mkdir=False)
Traceback (most recent call last):
Exception: Attempt to read or write file outside an authorized directory.
"""
if not allow_nested_files:
name = local_path_module.basename(remote_path)
path = local_path_module.join(directory, name)
else:
local_rel_path = __posix_to_local_path(remote_path, local_path_module=local_path_module)
local_path = local_path_module.join(directory, local_rel_path)
verify_is_in_directory(local_path, directory, local_path_module=local_path_module)
local_directory = local_path_module.dirname(local_path)
if mkdir and not local_path_module.exists(local_directory):
os.makedirs(local_directory)
path = local_path
return path
def __posix_to_local_path(path, local_path_module=os.path):
"""
Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows).
>>> import ntpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath)
'dataset_1_files\\\\moo\\\\cow'
>>> import posixpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath)
'dataset_1_files/moo/cow'
"""
partial_path = deque()
while True:
if not path or path == '/':
break
(path, base) = posixpath.split(path)
partial_path.appendleft(base)
return local_path_module.join(*partial_path)
def verify_is_in_directory(path, directory, local_path_module=os.path):
if not in_directory(path, directory, local_path_module):
msg = "Attempt to read or write file outside an authorized directory."
log.warn("%s Attempted path: %s, valid directory: %s" % (msg, path, directory))
raise Exception(msg)
| 36.758621 | 143 | 0.700188 |
4a0c35da91f6b94c7700c85ce9c497ac17fa1a2c | 1,602 | py | Python | build/_downloads/2e3e83652169c85c0c0972d072ffa8a2/two_layer_net_module.py | ScorpioDoctor/antares02 | 631b817d2e98f351d1173b620d15c4a5efed11da | [
"BSD-3-Clause"
] | null | null | null | build/_downloads/2e3e83652169c85c0c0972d072ffa8a2/two_layer_net_module.py | ScorpioDoctor/antares02 | 631b817d2e98f351d1173b620d15c4a5efed11da | [
"BSD-3-Clause"
] | null | null | null | build/_downloads/2e3e83652169c85c0c0972d072ffa8a2/two_layer_net_module.py | ScorpioDoctor/antares02 | 631b817d2e98f351d1173b620d15c4a5efed11da | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
PyTorch: 自定义 nn Modules
--------------------------
一个完全连接的ReLU网络,只有一个隐藏层,没有偏置,最小化欧氏误差训练从x预测y。
该实现将模型定义为自定义模块的子类。当你需要一个比已有的简单序列化模块更复杂的模型的时候,
你就需要用这种方式来定义你的模型。
请注意:这里有两个词,一个是 模型(model);另一个是 模块(module)。我们可以用模块的方式来定义模型
"""
import torch
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
在构造函数中,我们实例化了两个nn.Linear模块,
并将它们赋值为成员变量。
"""
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
在前馈函数中,我们接受一个输入数据的 Tensor,
并且我们必须返回输出数据的Tensor。在这里
我们可以使用造函数中已经定义好的Modules和
其他任意的Tensors上的算子来完成前馈函数的任务逻辑。
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# 创建持有输入和输出的随机张量
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# 通过实例化上面定义的类,来创建模型
model = TwoLayerNet(D_in, H, D_out)
# 构建我们的损失函数和优化器。在SGD的构造器中调用 model.parameters()
# 将会包含来自两个nn.Linear modules的可学习参数;
# 这两个 nn.Linear modules 是我们自定义的模型的类成员。
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
# 前向过程: 把 x 传递给model, 计算 predicted y
y_pred = model(x)
# 计算并输出loss
loss = criterion(y_pred, y)
print(t, loss.item())
# 把梯度置零, 执行后向传递, 以及 更新权重
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 24.272727 | 56 | 0.653558 |
b5e860aaf87c8f9283e72f530c020920407da243 | 7,720 | py | Python | gateware/spi_flash.py | paddatrapper/HDMI2USB-litex-firmware | 6a0235abe0ce9195b1717742c13c0dc4d45c3f4d | [
"BSD-2-Clause"
] | 4 | 2018-08-19T03:50:15.000Z | 2020-07-24T23:08:48.000Z | gateware/spi_flash.py | bunnie/litex-buildenv | 7a704884a7f139716880ea02fec9309e253878e4 | [
"BSD-2-Clause"
] | null | null | null | gateware/spi_flash.py | bunnie/litex-buildenv | 7a704884a7f139716880ea02fec9309e253878e4 | [
"BSD-2-Clause"
] | null | null | null | from migen import *
from migen.genlib.misc import timeline
from litex.soc.interconnect import wishbone
from litex.soc.interconnect.csr import AutoCSR, CSRStorage, CSRStatus
_FAST_READ = 0x0b
_DIOFR = 0xbb
_QIOFR = 0xeb
def _format_cmd(cmd, spi_width):
"""
`cmd` is the read instruction. Since everything is transmitted on all
dq lines (cmd, adr and data), extend/interleave cmd to full pads.dq
width even if dq1-dq3 are don't care during the command phase:
For example, for N25Q128, 0xeb is the quad i/o fast read, and
extended to 4 bits (dq1,dq2,dq3 high) is: 0xfffefeff
"""
c = 2**(8*spi_width)-1
for b in range(8):
if not (cmd>>b)%2:
c &= ~(1<<(b*spi_width))
return c
class SpiFlashDualQuad(Module, AutoCSR):
def __init__(self, pads, dummy=15, div=2, with_bitbang=True):
"""
Simple SPI flash.
Supports multi-bit pseudo-parallel reads (aka Dual or Quad I/O Fast
Read). Only supports mode0 (cpol=0, cpha=0).
"""
self.bus = bus = wishbone.Interface()
spi_width = len(pads.dq)
assert spi_width >= 2
if with_bitbang:
self.bitbang = CSRStorage(4)
self.miso = CSRStatus()
self.bitbang_en = CSRStorage()
# # #
cs_n = Signal(reset=1)
clk = Signal()
dq_oe = Signal()
wbone_width = len(bus.dat_r)
read_cmd_params = {
4: (_format_cmd(_QIOFR, 4), 4*8),
2: (_format_cmd(_DIOFR, 2), 2*8),
}
read_cmd, cmd_width = read_cmd_params[spi_width]
addr_width = 24
dq = TSTriple(spi_width)
self.specials.dq = dq.get_tristate(pads.dq)
sr = Signal(max(cmd_width, addr_width, wbone_width))
self.comb += bus.dat_r.eq(sr)
hw_read_logic = [
pads.clk.eq(clk),
pads.cs_n.eq(cs_n),
dq.o.eq(sr[-spi_width:]),
dq.oe.eq(dq_oe)
]
if with_bitbang:
bitbang_logic = [
pads.clk.eq(self.bitbang.storage[1]),
pads.cs_n.eq(self.bitbang.storage[2]),
# In Dual/Quad mode, no single data pin is consistently
# an input or output thanks to dual/quad reads, so we need a bit
# to swap direction of the pins. Aside from this additional bit,
# bitbang mode is identical for Single/Dual/Quad; dq[0] is mosi
# and dq[1] is miso, meaning remaining data pin values don't
# appear in CSR registers.
If(self.bitbang.storage[3],
dq.oe.eq(0)
).Else(
dq.oe.eq(1)
),
If(self.bitbang.storage[1], # CPOL=0/CPHA=0 or CPOL=1/CPHA=1 only.
self.miso.status.eq(dq.i[1])
),
dq.o.eq(Cat(self.bitbang.storage[0], Replicate(1, spi_width-1)))
]
self.comb += [
If(self.bitbang_en.storage,
bitbang_logic
).Else(
hw_read_logic
)
]
else:
self.comb += hw_read_logic
if div < 2:
raise ValueError("Unsupported value \'{}\' for div parameter for SpiFlash core".format(div))
else:
i = Signal(max=div)
dqi = Signal(spi_width)
self.sync += [
If(i == div//2 - 1,
clk.eq(1),
dqi.eq(dq.i),
),
If(i == div - 1,
i.eq(0),
clk.eq(0),
sr.eq(Cat(dqi, sr[:-spi_width]))
).Else(
i.eq(i + 1),
),
]
# spi is byte-addressed, prefix by zeros
z = Replicate(0, log2_int(wbone_width//8))
seq = [
(cmd_width//spi_width*div,
[dq_oe.eq(1), cs_n.eq(0), sr[-cmd_width:].eq(read_cmd)]),
(addr_width//spi_width*div,
[sr[-addr_width:].eq(Cat(z, bus.adr))]),
((dummy + wbone_width//spi_width)*div,
[dq_oe.eq(0)]),
(1,
[bus.ack.eq(1), cs_n.eq(1)]),
(div, # tSHSL!
[bus.ack.eq(0)]),
(0,
[]),
]
# accumulate timeline deltas
t, tseq = 0, []
for dt, a in seq:
tseq.append((t, a))
t += dt
self.sync += timeline(bus.cyc & bus.stb & (i == div - 1), tseq)
class SpiFlashSingle(Module, AutoCSR):
def __init__(self, pads, dummy=15, div=2, with_bitbang=True):
"""
Simple SPI flash.
Supports 1-bit reads. Only supports mode0 (cpol=0, cpha=0).
"""
self.bus = bus = wishbone.Interface()
if with_bitbang:
self.bitbang = CSRStorage(4)
self.miso = CSRStatus()
self.bitbang_en = CSRStorage()
# # #
if hasattr(pads, "wp"):
self.comb += pads.wp.eq(1)
if hasattr(pads, "hold"):
self.comb += pads.hold.eq(1)
cs_n = Signal(reset=1)
clk = Signal()
wbone_width = len(bus.dat_r)
read_cmd = _FAST_READ
cmd_width = 8
addr_width = 24
sr = Signal(max(cmd_width, addr_width, wbone_width))
self.comb += bus.dat_r.eq(sr)
hw_read_logic = [
pads.clk.eq(clk),
pads.cs_n.eq(cs_n),
pads.mosi.eq(sr[-1:])
]
if with_bitbang:
bitbang_logic = [
pads.clk.eq(self.bitbang.storage[1]),
pads.cs_n.eq(self.bitbang.storage[2]),
If(self.bitbang.storage[1], # CPOL=0/CPHA=0 or CPOL=1/CPHA=1 only.
self.miso.status.eq(pads.miso)
),
pads.mosi.eq(self.bitbang.storage[0])
]
self.comb += [
If(self.bitbang_en.storage,
bitbang_logic
).Else(
hw_read_logic
)
]
else:
self.comb += hw_read_logic
if div < 2:
raise ValueError("Unsupported value \'{}\' for div parameter for SpiFlash core".format(div))
else:
i = Signal(max=div)
miso = Signal()
self.sync += [
If(i == div//2 - 1,
clk.eq(1),
miso.eq(pads.miso),
),
If(i == div - 1,
i.eq(0),
clk.eq(0),
sr.eq(Cat(miso, sr[:-1]))
).Else(
i.eq(i + 1),
),
]
# spi is byte-addressed, prefix by zeros
z = Replicate(0, log2_int(wbone_width//8))
seq = [
(cmd_width*div,
[cs_n.eq(0), sr[-cmd_width:].eq(read_cmd)]),
(addr_width*div,
[sr[-addr_width:].eq(Cat(z, bus.adr))]),
((dummy + wbone_width)*div,
[]),
(1,
[bus.ack.eq(1), cs_n.eq(1)]),
(div, # tSHSL!
[bus.ack.eq(0)]),
(0,
[]),
]
# accumulate timeline deltas
t, tseq = 0, []
for dt, a in seq:
tseq.append((t, a))
t += dt
self.sync += timeline(bus.cyc & bus.stb & (i == div - 1), tseq)
def SpiFlash(pads, *args, **kw):
if hasattr(pads, "mosi"):
return SpiFlashSingle(pads, *args, **kw)
else:
return SpiFlashDualQuad(pads, *args, **kw)
| 29.465649 | 104 | 0.468005 |
60e917440fdc6ad8ae7907d2051dd98635c085ae | 4,815 | py | Python | notebooks/extract.py | kerteszg/EmbeddingNet | 9bff4711116e6a7f5b95c9ff5a5fc5c350d1fd86 | [
"MIT"
] | 2 | 2020-06-04T10:25:22.000Z | 2021-07-30T14:09:46.000Z | notebooks/extract.py | kerteszg/EmbeddingNet | 9bff4711116e6a7f5b95c9ff5a5fc5c350d1fd86 | [
"MIT"
] | null | null | null | notebooks/extract.py | kerteszg/EmbeddingNet | 9bff4711116e6a7f5b95c9ff5a5fc5c350d1fd86 | [
"MIT"
] | null | null | null | import cv2
import os
class Observation:
def __init__(self, line):
lst = line.split(',')
lst = [int(x) for x in lst]
self.id = lst[0]
self.frame = lst[1]
self.UpperPointShort = [lst[2], lst[3]]
self.UpperPointCorner = [lst[4], lst[5]]
self.UpperPointLong = [lst[6], lst[7]]
self.CrossCorner = [lst[8], lst[9]]
self.ShortSide = [lst[10], lst[11]]
self.Corner = [lst[12], lst[13]]
self.LongSide = [lst[14], lst[15]]
self.LowerCrossCorner = [lst[16], lst[17]]
def topLeftCorner(self):
return min([self.Corner[0], self.CrossCorner[0], self.LongSide[0], self.LowerCrossCorner[0],
self.ShortSide[0], self.UpperPointCorner[0], self.UpperPointShort[0], self.UpperPointLong[0]]),\
min([self.Corner[1], self.CrossCorner[1], self.LongSide[1], self.LowerCrossCorner[1],
self.ShortSide[1], self.UpperPointCorner[1], self.UpperPointShort[1], self.UpperPointLong[1]])
def lowerRightCorner(self):
return max([self.Corner[0], self.CrossCorner[0], self.LongSide[0], self.LowerCrossCorner[0],
self.ShortSide[0], self.UpperPointCorner[0], self.UpperPointShort[0], self.UpperPointLong[0]]),\
max([self.Corner[1], self.CrossCorner[1], self.LongSide[1], self.LowerCrossCorner[1],
self.ShortSide[1], self.UpperPointCorner[1], self.UpperPointShort[1], self.UpperPointLong[1]])
def selectionSize(self):
tlx, tly = self.topLeftCorner()
lrx, lry = self.lowerRightCorner()
return lrx-tlx, lry-tly
def getSquaredCorners(self):
w, h = self.selectionSize()
tlx, tly = self.topLeftCorner()
lrx, lry = self.lowerRightCorner()
if w > h:
half1 = int((w-h) / 2)
half2 = w-h-half1
tly -= half1
lry += half2
else:
half1 = int((h - w) / 2)
half2 = h - w - half1
tlx -= half1
lrx += half2
if lrx-tlx != lry-tly:
print('hupsz')
return (tlx, tly), (lrx, lry)
# TODO !!!
# 1A:1, 1B:3, 2A:1, 2B:3, 3A:1, 3B:3
#kamera = '1A'
#frame_disp = 1
just_show = False
# !!!
for (kamera, frame_disp) in [('1A', 1), ('1B', 3), ('2A', 1), ('2B', 3), ('3A', 1), ('3B', 3), ('4A', 1), ('4B', 3), ('5A', 1), ('5B', 3)]:
if not os.path.exists('frames/{}'.format(kamera)):
os.makedirs('frames/{}'.format(kamera))
vidcap = cv2.VideoCapture('e:\\{}.mov'.format(kamera))
success, image = vidcap.read()
lst = [line.rstrip('\n') for line in open('e:\\{}_annotations.txt'.format(kamera))]
lst = lst[1:] #remove first line
lst = [Observation(line) for line in lst]
lst.sort(key=lambda x: x.frame)
frame_id = frame_disp
while len(lst) > 0:
o = lst[0]
lst = lst[1:]
while frame_id < o.frame:
success, image = vidcap.read()
frame_id += 1
#if frame_id % 100 == 0:
# print('current frame: ', frame_id)
if frame_id == o.frame:
start, end = o.getSquaredCorners()
crop = image[start[1]:end[1], start[0]:end[0]]
if crop.shape[0] != crop.shape[1]:
diff = abs(crop.shape[0] - crop.shape[1])
half1 = int(diff / 2)
half2 = diff - half1
if crop.shape[0] > crop.shape[1]:
crop = crop[half1:crop.shape[0]-half2, :]
else:
crop = crop[:, half1:crop.shape[1]-half2]
if just_show:
cv2.rectangle(image, start, end, (255, 255, 255))
cv2.imshow("image", image)
cv2.waitKey(0)
else:
cv2.imwrite("frames/{}/{}_{}_{}.jpg".format(kamera, o.id, o.frame, crop.shape[0]), crop)
#print(crop.shape, ' saved as ', "frames/{}_{}_{}.jpg".format(o.id, o.frame, crop.shape[0]))
#if len(lst) % 100 == 0:
# print(len(lst), ' observations remaining')
print(kamera, 'done!')
print('done!')
'''
count = 0
while count < 1797:
success, f = vidcap.read()
count += 1
if count % 100 == 0:
print('current: ', count)
#f = cv2.imread('frames/frame1800.jpg')
#cv2.rectangle(f, (648, 240), (521, 243), (0,0,255))
#cv2.rectangle(f, (405, 183), (500, 181), (0,255,0))
#cv2.rectangle(f, (652, 415), (528, 423), (255,0,0))
#cv2.rectangle(f, (412, 316), (506, 312), (255,0,255))
#UpperPointLong[x y] -> ShortSide[x y]
#cv2.rectangle(f, (405, 183), (652, 415), (255,255,255))
o = Observation('1,1800,999,266,937,271,862,252,922,247,998,339,936,346,862,322,921,316')
tl, lr = o.getSquaredCorners()
cv2.rectangle(f, tl, lr, (255,0,0))
cv2.imshow("f", f)
cv2.waitKey(0)
''' | 37.913386 | 139 | 0.541641 |
a8de20a81ac74b49cdb0b0ef75c4361495ccc13f | 1,338 | py | Python | codes/string/karp_rabin_v1.py | NilLau/NilLau.github.io | e55768be0be4d6549b24c702554c11e64958d4c7 | [
"MIT"
] | null | null | null | codes/string/karp_rabin_v1.py | NilLau/NilLau.github.io | e55768be0be4d6549b24c702554c11e64958d4c7 | [
"MIT"
] | 6 | 2019-06-20T10:05:10.000Z | 2019-07-08T04:53:01.000Z | codes/string/karp_rabin_v1.py | 117ami/117ami.github.io | e55768be0be4d6549b24c702554c11e64958d4c7 | [
"MIT"
] | null | null | null | import string
def rabin_karp(s: string, pat: string):
'''Implementation of Rabin-Karp Algorithm to find all occurrence
of pat in s.
'''
m, n = len(s), len(pat)
h_pat = 0 # Hash value for pat
h_sub = 0 # Hash value for substring (with length n) of s
prime_modulus = 101 # A prime number serves as modulus
d = 256 # The number of characters in the input alphabet
base_offset = 1 # Base offset, which equals pow(d, M-1) % q
# To avoid overflowing integer maximums when the pattern string is longer,
# the pattern length base offset is pre-calculated in a loop, modulating
# the result each iteration
for i in range(n - 1):
base_offset = (base_offset * d) % prime_modulus
# Calculate the hash value of pattern and first window of s
for i in range(n):
h_pat = (d * h_pat + ord(pat[i])) % prime_modulus
h_sub = (d * h_sub + ord(s[i])) % prime_modulus
print(h_pat, h_sub)
occurrences = []
for i in range(m - n + 1):
if h_pat == h_sub:
if pat == s[i:i + n]:
occurrences.append(i)
if i < m - n:
h_sub = ((h_sub - ord(s[i]) * base_offset) * d +
ord(s[i + n]) + prime_modulus) % prime_modulus
return occurrences
s = 'banana'
t = 'ana'
print(rabin_karp(s, t))
| 31.857143 | 78 | 0.59716 |
af2e5f02bebdb2950b12825dd5eb23e73862ceb9 | 532 | py | Python | tests/fields/test_migrations_encrypted_default/0002_integerencryptedmodel_field_2.py | hishamkaram/django-cryptography | d3b5737dc562c46a475f12bda828e1c724b56633 | [
"BSD-3-Clause"
] | null | null | null | tests/fields/test_migrations_encrypted_default/0002_integerencryptedmodel_field_2.py | hishamkaram/django-cryptography | d3b5737dc562c46a475f12bda828e1c724b56633 | [
"BSD-3-Clause"
] | null | null | null | tests/fields/test_migrations_encrypted_default/0002_integerencryptedmodel_field_2.py | hishamkaram/django-cryptography | d3b5737dc562c46a475f12bda828e1c724b56633 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_cryptography.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fields', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='integerencrypteddefaultmodel',
name='field_2',
field=django_cryptography.fields.encrypt(models.IntegerField(max_length=50, blank=True)),
preserve_default=False,
),
]
| 24.181818 | 101 | 0.654135 |
0de89882c51248aedeab4445d17ab7655ac10307 | 23,745 | py | Python | main.py | sherif-zeet/yt5 | 89c008c0d5be6eb87f2f472a245d53b38980437a | [
"Apache-2.0"
] | null | null | null | main.py | sherif-zeet/yt5 | 89c008c0d5be6eb87f2f472a245d53b38980437a | [
"Apache-2.0"
] | null | null | null | main.py | sherif-zeet/yt5 | 89c008c0d5be6eb87f2f472a245d53b38980437a | [
"Apache-2.0"
] | 1 | 2021-06-08T13:15:31.000Z | 2021-06-08T13:15:31.000Z | from flask import Flask, request, send_file
from pytube import YouTube
import logging
import sys
"""
Flask YouTube Video Downloader - Python Marketer
https://pythonmarketer.com/2020/10/07/making-a-youtube-video-downloader-with-pythons-flask-and-pytube3-libraries/
"""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app = Flask(__name__)
@app.route("/")
def youtube_downloader():
"""Render HTML form to accept YouTube URL."""
html_page = f"""<html>
<head>
<Title>Youtube Downloader - Online Youtube Video Downloader | YT5</Title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<link href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" rel="stylesheet"/>
<link href="view-source:http://codenow.epizy.com/CSS/socialmedia/style.css" rel="stylesheet"/>
<link rel="stylesheet" type="text/css" href="https://yt1s.com/statics/css/style.css?v=1.99">
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-128x128.png" sizes="128x128">
<link rel="alternate" href="https://yt1s.com/en6" hreflang="x-default" /><link rel="alternate" href="https://yt1s.com/en6" hreflang="en" /><link rel="alternate" href="https://yt1s.com/de5" hreflang="de" /><link rel="alternate" href="https://yt1s.com/es6" hreflang="es" /><link rel="alternate" href="https://yt1s.com/fr" hreflang="fr" /><link rel="alternate" href="https://yt1s.com/hi" hreflang="hi" /><link rel="alternate" href="https://yt1s.com/id" hreflang="id" /><link rel="alternate" href="https://yt1s.com/it1" hreflang="it" /><link rel="alternate" href="https://yt1s.com/ja1" hreflang="ja" /><link rel="alternate" href="https://yt1s.com/ko1" hreflang="ko" /><link rel="alternate" href="https://yt1s.com/my1" hreflang="my" /><link rel="alternate" href="https://yt1s.com/ms" hreflang="ms" /><link rel="alternate" href="https://yt1s.com/ph" hreflang="en-PH" /><link rel="alternate" href="https://yt1s.com/pt2" hreflang="pt" /><link rel="alternate" href="https://yt1s.com/ru1" hreflang="ru" /><link rel="alternate" href="https://yt1s.com/th" hreflang="th" /><link rel="alternate" href="https://yt1s.com/tr1" hreflang="tr" /><link rel="alternate" href="https://yt1s.com/vi1" hreflang="vi" /><link rel="alternate" href="https://yt1s.com/zh-cn" hreflang="zh-CN" /><link rel="alternate" href="https://yt1s.com/zh-tw" hreflang="zh-TW" /><link rel="alternate" href="https://yt1s.com/sa1" hreflang="sa" /><link rel="alternate" href="https://yt1s.com/bn" hreflang="bn" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon.png" />
<link rel="apple-touch-icon-precomposed" sizes="57x57" href="https://yt1s.com/statics/image/apple-touch-icon-57x57.png" />
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="https://yt1s.com/statics/image/apple-touch-icon-114x114.png" />
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="https://yt1s.com/statics/image/apple-touch-icon-72x72.png" />
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="https://yt1s.com/statics/image/apple-touch-icon-144x144.png" />
<link rel="apple-touch-icon-precomposed" sizes="60x60" href="https://yt1s.com/statics/image/apple-touch-icon-60x60.png" />
<link rel="apple-touch-icon-precomposed" sizes="120x120" href="https://yt1s.com/statics/image/apple-touch-icon-120x120.png" />
<link rel="apple-touch-icon-precomposed" sizes="76x76" href="https://yt1s.com/statics/image/apple-touch-icon-76x76.png" />
<link rel="apple-touch-icon-precomposed" sizes="152x152" href="https://yt1s.com/statics/image/apple-touch-icon-152x152.png" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-196x196.png" sizes="196x196" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-96x96.png" sizes="96x96" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-16x16.png" sizes="16x16" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-128x128.png" sizes="128x128" />
</head>
<body style="font-family: Arial, Helvetica, sans-serif;">
<header>
<div class="main_nav">
<a class="logo" href="https://sherifzeet-yt5-4075.zeet.app/">
<svg xmlns="http://www.w3.org/2000/svg" width="37" height="28" viewBox="0 0 37 28" fill="none" alt="YT1S">
<g clip-path="url(#clip0)">
<path d="M0.413567 5.80249C0.646742 2.9375 2.94402 0.705376 5.81232 0.517162C9.497 0.275378 14.5363 0 18.375 0C22.2137 0 27.253 0.275378 30.9377 0.517162C33.806 0.705375 36.1033 2.9375 36.3364 5.80249C36.5469 8.38873 36.75 11.5252 36.75 14C36.75 16.4748 36.5469 19.6113 36.3364 22.1975C36.1033 25.0625 33.806 27.2946 30.9377 27.4828C27.253 27.7246 22.2137 28 18.375 28C14.5363 28 9.497 27.7246 5.81232 27.4828C2.94402 27.2946 0.646742 25.0625 0.413567 22.1975C0.203079 19.6113 0 16.4748 0 14C0 11.5252 0.203079 8.38873 0.413567 5.80249Z" fill="#FF0000"></path>
<path d="M11.1223 8.18535L8 11.1334L18 21L28 11.1334L24.8777 8.18535L20.1879 12.8132V0H15.8121V12.8132L11.1223 8.18535Z" fill="white"></path>
</g>
<defs>
<clipPath id="clip0">
<rect width="36.75" height="28" fill="white"></rect>
</clipPath>
</defs>
</svg> <span> YT5</span> </a>
</div>
<div id="navbar" class="navbar-collapse">
</div>
</header>
<br><br><br>
<center><h1 style="color:#4A474C">Youtube Downloader</h1>
<p style="color:#4A474C">Convert and download Youtube videos in MP4 for free</p> </center>
<div class="form">
<form action="/download_video" method="post">
<br><center>
<input class="pl-3" type="url" name="URL" aria-label="Search" placeholder="Search or paste Youtube link here" style="width: 500px; height: 50px; border-radius: 50px; outline: red; border:1px solid #4A474C ;" required ></input>
<input type="submit" value="Download" style="width: 125px; height: 50px; border-radius: 50px; outline: red; border:1px solid #4A474C ; background: #4A474C; color: white;">
</form></div><br><br><br><br>
<center><h2 class="title">Youtube Video Downloader</h2></center>
<div class="mw70">Download Youtube videos with YT5 YouTube Downloader. By using our downloader you can easily convert YouTube videos to MP4 file... and download them for free - this service works for computers, tablets and mobile devices. The videos are always converted in the highest available quality.</div>
<div class="ftco-section center">
<h2 class="title">How to download Youtube video? </h2>
<ul class="listicon liststep">
<li>
<span class="number">1</span>
<span>Paste YouTube url or enter keywords into the search box.</span>
</li>
<li>
<span class="number">2</span>
<span>Choose output MP4 format you want to convert and click "Download" button.</span>
</li>
<li>
<span class="number">3</span>
<span>Wait until the conversion is completed and download the file. Very easy and fast.</span>
</li>
</ul>
</div>
<center><a class="btn-red mag0" type="button" href="#">Convert now</a></center>
<cneter>
<div class="ftco-section section-left">
<section itemscope="" itemtype="https://schema.org/FAQPage">
<div class="sf_container">
<div class="wrapper">
<div class="sf_faq">
<h3 class="title center">Questions and Answers</h3>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">What is the fastest way to download Youtube videos?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
<ol>
<li>Access the Youtube URL you need to download.</li>
<li>Add "<b>pp</b>" after the word "youtube" then click "Enter". For example: youtube.com/watch?v=1PJIqpLInrw => youtubePP.com/watch?v=1PJIqpLInrw</li>
<li>Select the file format you wish then click to "Download" button.</li>
</ol> </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">Is there any limit on the amount of downloaded files applied for users?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
No. Our website allows users to convert and download unlimited amount of file and for free. </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">What are the supported video/audio formats?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
We offer a ton of conversion options and allow you to download MP4 format. You can watch video right after that on your device without installing any other software. </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">What are the compatible devices for the conversion?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
We offer the service that is compatible with all PC devices, smart phones and tablets. </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">How to download Youtube video to Android mobile phone?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
<ol>
<li>Access Youtube from your browser or open Youtube app on your Android device; after that, coppy the video URL you wish to download. </li>
<li>Paste the URL into the search box. You also can enter keyword to look for the video you wish. </li>
<li>Select the format you wish to download then tap "Download". After a few seconds, you can download the file.</li>
</ol> </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">Where do the downloaded files get saved?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
Files you've downloaded are automatically saved in the Downloads folder or the "download history" section on your device. </div>
</div>
</div>
</div>
</div>
</div>
</section>
</div>
</center>
<center>
<a href="https://www.facebook.com/profile.php?id=100068549361726" title="facebook" class="fa fa-facebook"></a>
<a href="#" title="twitter" class="fa fa-twitter"></a>
<a href="#" title="linkedin" class="fa fa-linkedin"></a>
<br><br> <hr>
</center>
<footer>
<div class="copyright">
<cneter>yt5.com © Sherif Abdullah Mahmoud 2021</center>
</div>
</footer>
<script>
console.log("Created by : Sherif Abdullah Mahmoud")
</script>
</body></html>"""
return html_page
@app.route("/download_video", methods=["GET","POST"])
def download_video():
"""
First pytube downloads the file locally in pythonanywhere:
/home/your_username/video_name.mp4
Then use Flask's send_file() to download the video
to the user's Downloads folder.
"""
try:
youtube_url = request.form["URL"]
local_download_path = YouTube(youtube_url).streams[0].download()
fname = local_download_path.split("//")[-1]
return send_file(fname, as_attachment=True)
except:
logging.exception("Failed download")
return """<html>
<head>
<Title>Youtube Downloader - Online Youtube Video Downloader | YT5</Title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<link href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" rel="stylesheet"/>
<link href="view-source:http://codenow.epizy.com/CSS/socialmedia/style.css" rel="stylesheet"/>
<link rel="stylesheet" type="text/css" href="https://yt1s.com/statics/css/style.css?v=1.99">
<link rel="alternate" href="https://yt1s.com/en6" hreflang="x-default" /><link rel="alternate" href="https://yt1s.com/en6" hreflang="en" /><link rel="alternate" href="https://yt1s.com/de5" hreflang="de" /><link rel="alternate" href="https://yt1s.com/es6" hreflang="es" /><link rel="alternate" href="https://yt1s.com/fr" hreflang="fr" /><link rel="alternate" href="https://yt1s.com/hi" hreflang="hi" /><link rel="alternate" href="https://yt1s.com/id" hreflang="id" /><link rel="alternate" href="https://yt1s.com/it1" hreflang="it" /><link rel="alternate" href="https://yt1s.com/ja1" hreflang="ja" /><link rel="alternate" href="https://yt1s.com/ko1" hreflang="ko" /><link rel="alternate" href="https://yt1s.com/my1" hreflang="my" /><link rel="alternate" href="https://yt1s.com/ms" hreflang="ms" /><link rel="alternate" href="https://yt1s.com/ph" hreflang="en-PH" /><link rel="alternate" href="https://yt1s.com/pt2" hreflang="pt" /><link rel="alternate" href="https://yt1s.com/ru1" hreflang="ru" /><link rel="alternate" href="https://yt1s.com/th" hreflang="th" /><link rel="alternate" href="https://yt1s.com/tr1" hreflang="tr" /><link rel="alternate" href="https://yt1s.com/vi1" hreflang="vi" /><link rel="alternate" href="https://yt1s.com/zh-cn" hreflang="zh-CN" /><link rel="alternate" href="https://yt1s.com/zh-tw" hreflang="zh-TW" /><link rel="alternate" href="https://yt1s.com/sa1" hreflang="sa" /><link rel="alternate" href="https://yt1s.com/bn" hreflang="bn" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon.png" />
<link rel="apple-touch-icon-precomposed" sizes="57x57" href="https://yt1s.com/statics/image/apple-touch-icon-57x57.png" />
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="https://yt1s.com/statics/image/apple-touch-icon-114x114.png" />
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="https://yt1s.com/statics/image/apple-touch-icon-72x72.png" />
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="https://yt1s.com/statics/image/apple-touch-icon-144x144.png" />
<link rel="apple-touch-icon-precomposed" sizes="60x60" href="https://yt1s.com/statics/image/apple-touch-icon-60x60.png" />
<link rel="apple-touch-icon-precomposed" sizes="120x120" href="https://yt1s.com/statics/image/apple-touch-icon-120x120.png" />
<link rel="apple-touch-icon-precomposed" sizes="76x76" href="https://yt1s.com/statics/image/apple-touch-icon-76x76.png" />
<link rel="apple-touch-icon-precomposed" sizes="152x152" href="https://yt1s.com/statics/image/apple-touch-icon-152x152.png" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-196x196.png" sizes="196x196" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-96x96.png" sizes="96x96" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-16x16.png" sizes="16x16" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-128x128.png" sizes="128x128" />
<link rel="icon" type="image/png" href="https://yt1s.com/statics/image/favicon-128x128.png" sizes="128x128">
</head>
<body style="font-family: Arial, Helvetica, sans-serif;">
<header>
<div class="main_nav">
<a class="logo" href="https://sherifzeet-yt5-4075.zeet.app/">
<svg xmlns="http://www.w3.org/2000/svg" width="37" height="28" viewBox="0 0 37 28" fill="none" alt="YT1S">
<g clip-path="url(#clip0)">
<path d="M0.413567 5.80249C0.646742 2.9375 2.94402 0.705376 5.81232 0.517162C9.497 0.275378 14.5363 0 18.375 0C22.2137 0 27.253 0.275378 30.9377 0.517162C33.806 0.705375 36.1033 2.9375 36.3364 5.80249C36.5469 8.38873 36.75 11.5252 36.75 14C36.75 16.4748 36.5469 19.6113 36.3364 22.1975C36.1033 25.0625 33.806 27.2946 30.9377 27.4828C27.253 27.7246 22.2137 28 18.375 28C14.5363 28 9.497 27.7246 5.81232 27.4828C2.94402 27.2946 0.646742 25.0625 0.413567 22.1975C0.203079 19.6113 0 16.4748 0 14C0 11.5252 0.203079 8.38873 0.413567 5.80249Z" fill="#FF0000"></path>
<path d="M11.1223 8.18535L8 11.1334L18 21L28 11.1334L24.8777 8.18535L20.1879 12.8132V0H15.8121V12.8132L11.1223 8.18535Z" fill="white"></path>
</g>
<defs>
<clipPath id="clip0">
<rect width="36.75" height="28" fill="white"></rect>
</clipPath>
</defs>
</svg> <span> YT5</span> </a>
</div>
<div id="navbar" class="navbar-collapse">
</div>
</header>
<br><br><br>
<center><h1 style="color:#4A474C">Youtube Downloader</h1>
<p style="color:#4A474C">Convert and download Youtube videos in MP4 for free</p> </center>
<div class="form">
<form action="/download_video" method="post">
<br><center>
<input class="pl-3" type="url" name="URL" aria-label="Search" placeholder="Search or paste Youtube link here" style="width: 500px; height: 50px; border-radius: 50px; outline: red; border:1px solid #4A474C ;" required ></input>
<input type="submit" value="Download" style="width: 125px; height: 50px; border-radius: 50px; outline: red; border:1px solid #4A474C ; background: #4A474C; color: white;">
</form></div><br><br><br><br>
<center><h2 class="title">Youtube Video Downloader</h2></center>
<div class="mw70">Download Youtube videos with YT1s YouTube Downloader. By using our downloader you can easily convert YouTube videos to MP3 file... and download them for free - this service works for computers, tablets and mobile devices. The videos are always converted in the highest available quality.</div>
<div class="ftco-section center">
<h2 class="title">How to download Youtube video? </h2>
<ul class="listicon liststep">
<li>
<span class="number">1</span>
<span>Paste YouTube url or enter keywords into the search box.</span>
</li>
<li>
<span class="number">2</span>
<span>Choose output MP4 format you want to convert and click "Download" button.</span>
</li>
<li>
<span class="number">3</span>
<span>Wait until the conversion is completed and download the file. Very easy and fast.</span>
</li>
</ul>
</div>
<center><a class="btn-red mag0" type="button" href="#">Convert now</a></center>
<cneter>
<div class="ftco-section section-left">
<section itemscope="" itemtype="https://schema.org/FAQPage">
<div class="sf_container">
<div class="wrapper">
<div class="sf_faq">
<h3 class="title center">Questions and Answers</h3>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">What is the fastest way to download Youtube videos?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
<ol>
<li>Access the Youtube URL you need to download.</li>
<li>Add "<b>pp</b>" after the word "youtube" then click "Enter". For example: youtube.com/watch?v=1PJIqpLInrw => youtubePP.com/watch?v=1PJIqpLInrw</li>
<li>Select the file format you wish then click to "Download" button.</li>
</ol> </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">Is there any limit on the amount of downloaded files applied for users?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
No. Our website allows users to convert and download unlimited amount of file and for free. </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">What are the supported video/audio formats?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
We offer a ton of conversion options and allow you to download MP4 format. You can watch video right after that on your device without installing any other software. </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">What are the compatible devices for the conversion?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
We offer the service that is compatible with all PC devices, smart phones and tablets. </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">How to download Youtube video to Android mobile phone?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
<ol>
<li>Access Youtube from your browser or open Youtube app on your Android device; after that, coppy the video URL you wish to download. </li>
<li>Paste the URL into the search box. You also can enter keyword to look for the video you wish. </li>
<li>Select the format you wish to download then tap "Download". After a few seconds, you can download the file.</li>
</ol> </div>
</div>
</div>
<div class="faq_item" itemprop="mainEntity" itemscope="" itemtype="https://schema.org/Question">
<div class="faq_item_title">
<h4 itemprop="name">Where do the downloaded files get saved?</h4>
</div>
<div class="faq_item_content" id="divId" itemprop="acceptedAnswer" itemscope="" itemtype="https://schema.org/Answer">
<div itemprop="text">
Files you've downloaded are automatically saved in the Downloads folder or the "download history" section on your device. </div>
</div>
</div>
</div>
</div>
</div>
</section>
</div>
</center>
<center>
<a href="https://www.facebook.com/profile.php?id=100068549361726" title="facebook" class="fa fa-facebook"></a>
<a href="#" title="twitter" class="fa fa-twitter"></a>
<a href="#" title="linkedin" class="fa fa-linkedin"></a>
<br><br> <hr>
</center>
<footer>
<div class="copyright">
<cneter>yt5.com © Sherif Abdullah Mahmoud 2021</center>
</div>
</footer>
<script>
console.log("Created by : Sherif Abdullah Mahmoud")
</script>
</body></html>"""
if __name__ == '__main__':
app.run(host = '0.0.0.0', port=81)
| 58.485222 | 1,486 | 0.680017 |
4818bdb7fa22ed847a65aa9c1e214311fff4ea58 | 5,960 | py | Python | DominantSparseEigenAD/CG.py | buwantaiji/DominantSparseEigenAD | 36d534b6713ba256309b07116ebc542bee01cd51 | [
"Apache-2.0"
] | 23 | 2019-10-29T03:35:18.000Z | 2022-02-11T16:38:24.000Z | DominantSparseEigenAD/CG.py | buwantaiji/DominantSparseEigenAD | 36d534b6713ba256309b07116ebc542bee01cd51 | [
"Apache-2.0"
] | null | null | null | DominantSparseEigenAD/CG.py | buwantaiji/DominantSparseEigenAD | 36d534b6713ba256309b07116ebc542bee01cd51 | [
"Apache-2.0"
] | 6 | 2019-11-06T09:09:45.000Z | 2022-02-09T06:24:15.000Z | import torch
def CG_torch(A, b, initialx, sparse=False):
"""
Compute the unique solution x of the system of linear equation Ax = b,
using Conjugate Gradient(CG) method and implemented by Pytorch.
Input:
`A`: The square matrix A, which is assumed to be
real symmetric and positive definite.
`b`: The vector on the right hand side of the linear system.
`initialx`: The initial vector of the CG algorithm.
For certain cases, the initial vector should be properly chosen
in order to get expected result, as is the case in backprop of
diagonalization of real symmetric matrices by adjoint method.
`sparse` indicates whether a bare linear function representation of the
matrix is adopted. In any cases, the dimension of A is inferred
from the size of the vector b.
"""
if sparse:
Amap = A
else:
Amap = lambda v: torch.matmul(A, v)
n = b.shape[0]
eps = 1e-7
x = initialx
r = b - Amap(x)
if(torch.norm(r).item() < eps):
return x
d = r
alpha = torch.matmul(r, r) / Amap(d).matmul(d)
for i in range(n):
x = x + alpha * d
r_next = r - alpha * Amap(d)
if(torch.norm(r_next).item() < eps):
break
beta = torch.matmul(r_next, r_next) / torch.matmul(r, r)
r = r_next
d = r + beta * d
alpha = torch.matmul(r, r) / Amap(d).matmul(d)
return x
class CGSubspace(torch.autograd.Function):
"""
Function primitive of low-rank CG linear system solver, where the matrix is
represented in normal form as a torch.Tensor.
input: A, b, alpha, where A is a N-dimensional real symmetric
matrix of rank N - 1, and alpha is the unique eigenvector of A of eigenvalue
zero.(The other eigenvalues of A are all greater than zero.)
output: the unique solution x of the low-rank linear system Ax = b in addition to
the condition alpha^T x = 0.
For details, c.f. https://buwantaiji.github.io/2019/10/CG-backward/
"""
@staticmethod
def forward(ctx, A, b, alpha):
initialx = torch.randn(b.shape[0], device=b.device, dtype=b.dtype)
initialx = initialx - torch.matmul(alpha, initialx) * alpha
x = CG_torch(A, b, initialx)
ctx.save_for_backward(A, alpha, x)
return x
@staticmethod
def backward(ctx, grad_x):
A, alpha, x = ctx.saved_tensors
CG = CGSubspace.apply
b = grad_x - torch.matmul(alpha, grad_x) * alpha
grad_b = CG(A, b, alpha)
grad_A = - grad_b[:, None] * x
grad_alpha = - x * torch.matmul(alpha, grad_x)
return grad_A, grad_b, grad_alpha
def setCGSubspaceSparse(A, Aadjoint_to_gadjoint):
"""
Function primitive of low-rank CG linear system solver, where the matrix is
"sparse" and represented as a function.
As a workaround of the fact that Pytorch doesn't support taking gradient of
objects of type other than torch.tensor, the computation graph of this primitive
is wrapped compared to CGSubspace, which the version in which the matrix A is
normally represented as a torch.Tensor.
In particular, this wrapped version is mainly used to make the back-propagation
of the dominant sparse eigensolver primitive -- i.e., DominantSparseSymeig -- work
properly. The computation graph is schematically shown below.
----------------------
g --|--> A |
| \ |
| A-E_0I -- |
| / \ |
E_0 --|-->-- |||--|--> x
| / / |
b --|------->------- / |
alpha --|------->-------- |
----------------------
input: g -- The parameter(s) of interest of the matrix A, whose gradients are requested.
In current version, g must be a torch.Tensor of arbitrary shape.
E0, alpha are the smallest eigvalue and corresponding (non-degenerate)
eigenvector, respectively.
output: x.
The computation process involves using CG algorithm to solve a low-rank linear
system of the form (A - E_0I)x = b, alpha^T x = 0. For more details of this part,
c.f. https://buwantaiji.github.io/2019/10/CG-backward/
USER NOTE: The mechanism of wrapping relies on user's providing two quantities:
A -- The "sparse" representation of the matrix A as a function.
Aadjoint_to_gadjoint -- A function that receive the adjoint of the matrix A
as input, and return the adjoint of the pamameters(g) as output.
The input should be of the form of two vectors represented as torch.Tensor,
say, v1 and v2, and the adjoint of A = v1 * v2^T.(outer product)
User may do whatever he want to get the adjoint of g using these
two vectors.
"""
global CGSubspaceSparse
@staticmethod
def forward(ctx, g, E0, b, alpha):
Aprime = lambda v: A(v) - E0 * v
initialx = torch.randn(b.shape[0], device=b.device, dtype=b.dtype)
initialx = initialx - torch.matmul(alpha, initialx) * alpha
x = CG_torch(Aprime, b, initialx, sparse=True)
ctx.g = g
ctx.save_for_backward(E0, alpha, x)
return x
@staticmethod
def backward(ctx, grad_x):
g = ctx.g
E0, alpha, x = ctx.saved_tensors
CG = CGSubspaceSparse.apply
b = grad_x - torch.matmul(alpha, grad_x) * alpha
grad_b = CG(g, E0, b, alpha)
v1, v2 = - grad_b, x
grad_alpha = - x * torch.matmul(alpha, grad_x)
grad_E0 = - torch.matmul(v1, v2)
grad_g = Aadjoint_to_gadjoint(v1, v2)
return grad_g, grad_E0, grad_b, grad_alpha
CGSubspaceSparse = type("CGSubspaceSparse", (torch.autograd.Function, ),
{"forward": forward, "backward": backward})
| 42.269504 | 92 | 0.602852 |
8345008d0529dad735c9093b79a4258545bf7adf | 4,147 | py | Python | bot/views.py | ujlbu4/vas3k.club | 1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e | [
"MIT"
] | 2 | 2020-12-31T06:21:54.000Z | 2021-11-15T15:35:45.000Z | bot/views.py | ujlbu4/vas3k.club | 1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e | [
"MIT"
] | null | null | null | bot/views.py | ujlbu4/vas3k.club | 1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e | [
"MIT"
] | null | null | null | import hashlib
import hmac
import json
import logging
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse, Http404
from django.shortcuts import render
from telegram import Update
from auth.helpers import auth_required
from bot.bot import bot
from bot.handlers.moderator import process_moderator_actions
from bot.handlers.personal import process_personal_chat_updates, process_auth
from bot.handlers.replies import process_comment_reply
from club.exceptions import AccessDenied
from common.request import ajax_request
from users.models.user import User
log = logging.getLogger(__name__)
def webhook_telegram(request, token):
if not bot:
return HttpResponse("Not configured", status=500)
if token != settings.TELEGRAM_TOKEN:
return HttpResponse("Go away", status=400)
# try to get the json body or poll the latest updates
if request.body:
updates = [Update.de_json(json.loads(request.body), bot)]
else:
# useful in development
updates = bot.get_updates()
for update in updates:
# log.info(f"Update: {update.to_dict()}")
if update.effective_chat:
# admin chat
if str(update.effective_chat.id) == settings.TELEGRAM_ADMIN_CHAT_ID:
if update.callback_query:
process_moderator_actions(update)
# reply to a comment (in any chat excluding admin)
elif update.message and update.message.reply_to_message \
and update.message.reply_to_message.text \
and update.message.reply_to_message.text.startswith("💬"):
if is_club_user(update.effective_user.id):
process_comment_reply(update)
# personal chats with users
elif update.effective_user and update.effective_chat.id == update.effective_user.id:
if is_club_user(update.effective_user.id):
process_personal_chat_updates(update)
else:
process_auth(update) # new user?
return HttpResponse("OK")
def is_club_user(telegram_user_id):
club_users = cache.get("bot:telegram_user_ids")
if not club_users:
club_users = User.objects\
.filter(telegram_id__isnull=False, moderation_status=User.MODERATION_STATUS_APPROVED)\
.values_list("telegram_id", flat=True)
cache.set("bot:telegram_user_ids", list(club_users), 5 * 60)
return str(telegram_user_id) in set(club_users)
@auth_required
@ajax_request
def link_telegram(request):
if not request.body:
raise Http404()
if request.method == "POST":
data = json.loads(request.body)
if not data.get("id") or not data.get("hash"):
return render(request, "error.html", {
"title": "Что-то пошло не так",
"message": "Попробуйте авторизоваться снова.",
})
if not is_valid_telegram_data(data, settings.TELEGRAM_TOKEN):
raise AccessDenied(title="Подпись сообщения не совпадает")
request.me.telegram_id = data["id"]
request.me.telegram_data = data
request.me.save()
cache.delete("bot:telegram_user_ids")
full_name = str(request.me.telegram_data.get("first_name") or "") \
+ str(request.me.telegram_data.get("last_name") or "")
return {
"status": "success",
"telegram": {
"id": request.me.telegram_id,
"username": request.me.telegram_data.get("username") or full_name,
"full_name": full_name,
}
}
return {"status": "nope"}
def is_valid_telegram_data(data, bot_token):
data = dict(data)
check_hash = data.pop('hash')
check_list = ['{}={}'.format(k, v) for k, v in data.items()]
check_string = '\n'.join(sorted(check_list))
secret_key = hashlib.sha256(bot_token.encode()).digest()
hmac_hash = hmac.new(
secret_key,
check_string.encode(),
hashlib.sha256,
).hexdigest()
return hmac_hash == check_hash
| 32.912698 | 98 | 0.648421 |
c16483b5705e000692b77e94b67bb4bcb6012113 | 998 | py | Python | src/izi/apps/promotions/app.py | izi-core/izi-core | 21176be2d41f0cf54ca954f294209c585f643dba | [
"BSD-3-Clause"
] | null | null | null | src/izi/apps/promotions/app.py | izi-core/izi-core | 21176be2d41f0cf54ca954f294209c585f643dba | [
"BSD-3-Clause"
] | null | null | null | src/izi/apps/promotions/app.py | izi-core/izi-core | 21176be2d41f0cf54ca954f294209c585f643dba | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from izi.core.application import Application
from izi.core.loading import get_class, get_model
KeywordPromotion = get_model('promotions', 'KeywordPromotion')
PagePromotion = get_model('promotions', 'PagePromotion')
class PromotionsApplication(Application):
name = 'promotions'
home_view = get_class('promotions.views', 'HomeView')
record_click_view = get_class('promotions.views', 'RecordClickView')
def get_urls(self):
urls = [
url(r'page-redirect/(?P<page_promotion_id>\d+)/$',
self.record_click_view.as_view(model=PagePromotion),
name='page-click'),
url(r'keyword-redirect/(?P<keyword_promotion_id>\d+)/$',
self.record_click_view.as_view(model=KeywordPromotion),
name='keyword-click'),
url(r'^$', self.home_view.as_view(), name='home'),
]
return self.post_process_urls(urls)
application = PromotionsApplication()
| 32.193548 | 72 | 0.667335 |
4e37880211a73d771c180ab2b1b7332b962585ac | 184 | py | Python | src/spaceone/secret/connector/__init__.py | ku524/secret | c5dad49f40ab1cbbaa0b8f01222de10ae73d1fb1 | [
"Apache-2.0"
] | null | null | null | src/spaceone/secret/connector/__init__.py | ku524/secret | c5dad49f40ab1cbbaa0b8f01222de10ae73d1fb1 | [
"Apache-2.0"
] | null | null | null | src/spaceone/secret/connector/__init__.py | ku524/secret | c5dad49f40ab1cbbaa0b8f01222de10ae73d1fb1 | [
"Apache-2.0"
] | null | null | null | from spaceone.secret.connector.aws_secret_manager_connector import *
from spaceone.secret.connector.vault_connector import *
from spaceone.secret.connector.identity_connector import *
| 46 | 68 | 0.869565 |
e716854030dbc0f82d6a31e5382aa06affcbe378 | 188 | py | Python | ecliptic/studies/forms.py | ryanroser/ecliptic | 1734624af129809416b95331cd9d5d7d892e3d1c | [
"MIT"
] | 1 | 2016-07-24T08:20:48.000Z | 2016-07-24T08:20:48.000Z | ecliptic/studies/forms.py | ryanroser/ecliptic | 1734624af129809416b95331cd9d5d7d892e3d1c | [
"MIT"
] | null | null | null | ecliptic/studies/forms.py | ryanroser/ecliptic | 1734624af129809416b95331cd9d5d7d892e3d1c | [
"MIT"
] | null | null | null | from django import forms
from studies.models import Study
class StudyForm(forms.ModelForm):
class Meta:
model = Study
fields = ['name', 'hypothesis', 'conclusion',]
| 23.5 | 54 | 0.670213 |
749bef0e9146d365e9f7f7114ce92a631ffed7bb | 27,782 | py | Python | cinder/tests/unit/api/v3/test_group_types.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2015-04-02T21:44:36.000Z | 2016-04-29T21:19:04.000Z | cinder/tests/unit/api/v3/test_group_types.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2016-04-29T21:45:26.000Z | 2016-05-04T19:41:23.000Z | cinder/tests/unit/api/v3/test_group_types.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 4 | 2016-01-27T00:25:52.000Z | 2021-03-25T19:54:08.000Z | # Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
import ddt
from oslo_utils import strutils
from oslo_utils import timeutils
import six
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import group_specs as v3_group_specs
from cinder.api.v3 import group_types as v3_group_types
from cinder.api.v3.views import group_types as views_types
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import group_types
IN_USE_GROUP_TYPE = fake.GROUP_TYPE3_ID
def stub_group_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='group_type_%s' % six.text_type(id),
description='group_type_desc_%s' % six.text_type(id),
group_specs=specs,
)
def return_group_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
result = dict(group_type_1=stub_group_type(1),
group_type_2=stub_group_type(2),
group_type_3=stub_group_type(3)
)
if list_result:
return list(result.values())
return result
def return_empty_group_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
if list_result:
return []
return {}
def return_group_types_get_group_type(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.GroupTypeNotFound(group_type_id=id)
return stub_group_type(id)
def return_group_types_get_default():
return stub_group_type(1)
def return_group_types_get_default_not_found():
return {}
def return_group_types_with_groups_destroy(context, id):
if id == IN_USE_GROUP_TYPE:
raise exception.GroupTypeInUse(group_type_id=id)
@ddt.ddt
class GroupTypesApiTest(test.TestCase):
def _create_group_type(self, group_type_name, group_specs=None,
is_public=True, projects=None):
return group_types.create(self.ctxt, group_type_name, group_specs,
is_public, projects).get('id')
def setUp(self):
super(GroupTypesApiTest, self).setUp()
self.controller = v3_group_types.GroupTypesController()
self.specs_controller = v3_group_specs.GroupTypeSpecsController()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
self.user_ctxt = context.RequestContext(user_id=fake.USER2_ID,
project_id=fake.PROJECT2_ID,
is_admin=False)
self.type_id1 = self._create_group_type('group_type1',
{'key1': 'value1'})
self.type_id2 = self._create_group_type('group_type2',
{'key2': 'value2'})
self.type_id3 = self._create_group_type('group_type3',
{'key3': 'value3'}, False,
[fake.PROJECT_ID])
self.type_id0 = group_types.get_default_cgsnapshot_type()['id']
@ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on',
'y', 'yes')
@mock.patch.object(group_types, "get_group_type_by_name")
@mock.patch.object(group_types, "create")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_create_group_type_with_valid_is_public_in_string(
self, is_public, mock_show, mock_cache_resource,
mock_create, mock_get):
boolean_is_public = strutils.bool_from_string(is_public)
req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"is_public": is_public, "name": "group_type1",
"description": None}}
self.controller.create(req, body=body)
mock_create.assert_called_once_with(
self.ctxt, 'group_type1', {},
boolean_is_public, description=None)
@mock.patch.object(group_types, "get_group_type_by_name")
@mock.patch.object(group_types, "create")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_create_group_type_with_group_specs_null(
self, mock_show, mock_cache_resource,
mock_create, mock_get):
req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"name": "group_type1",
"group_specs": None}}
self.controller.create(req, body=body)
mock_create.assert_called_once_with(
self.ctxt, 'group_type1', None, True, description=None)
@ddt.data(fake.GROUP_TYPE_ID, IN_USE_GROUP_TYPE)
def test_group_type_destroy(self, grp_type_id):
grp_type = {'id': grp_type_id, 'name': 'grp' + grp_type_id}
self.mock_object(group_types, 'get_group_type',
return_value=grp_type)
self.mock_object(group_types, 'destroy',
return_group_types_with_groups_destroy)
mock_notify_info = self.mock_object(
v3_group_types.GroupTypesController,
'_notify_group_type_info')
mock_notify_error = self.mock_object(
v3_group_types.GroupTypesController,
'_notify_group_type_error')
req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % (
fake.PROJECT_ID, grp_type_id),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
if grp_type_id == IN_USE_GROUP_TYPE:
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete,
req, grp_type_id)
mock_notify_error.assert_called_once_with(
self.ctxt, 'group_type.delete', mock.ANY,
group_type=grp_type)
else:
self.controller.delete(req, grp_type_id)
mock_notify_info.assert_called_once_with(
self.ctxt, 'group_type.delete', grp_type)
def test_group_types_index(self):
self.mock_object(group_types, 'get_all_group_types',
return_group_types_get_all_types)
req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID,
use_admin_context=True,
version=mv.GROUP_TYPE)
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['group_types']))
expected_names = ['group_type_1', 'group_type_2', 'group_type_3']
actual_names = map(lambda e: e['name'], res_dict['group_types'])
self.assertEqual(set(expected_names), set(actual_names))
for entry in res_dict['group_types']:
self.assertEqual('value1', entry['group_specs']['key1'])
def test_group_types_index_no_data(self):
self.mock_object(group_types, 'get_all_group_types',
return_empty_group_types_get_all_types)
req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['group_types']))
def test_group_types_index_with_limit(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
expect_next_link = ('http://localhost/v3/%s/group_types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID, res['group_types'][0]['id']))
self.assertEqual(expect_next_link, res['group_type_links'][0]['href'])
def test_group_types_index_with_offset(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?offset=1' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(3, len(res['group_types']))
def test_group_types_index_with_offset_out_of_range(self):
url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=mv.GROUP_TYPE)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_group_types_index_with_limit_and_offset(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?limit=2&offset=1' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(self.type_id2, res['group_types'][0]['id'])
self.assertEqual(self.type_id1, res['group_types'][1]['id'])
def test_group_types_index_with_limit_and_marker(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID,
self.type_id2),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['group_types']))
self.assertEqual(self.type_id1, res['group_types'][0]['id'])
def test_group_types_index_with_valid_filter(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?is_public=True' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(4, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
self.assertEqual(self.type_id2, res['group_types'][1]['id'])
self.assertEqual(self.type_id1, res['group_types'][2]['id'])
self.assertEqual(self.type_id0, res['group_types'][3]['id'])
def test_group_types_index_with_invalid_filter(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?id=%s' % (fake.PROJECT_ID, self.type_id1),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(4, len(res['group_types']))
def test_group_types_index_with_sort_keys(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
def test_group_types_index_with_sort_and_limit(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?sort=id&limit=2' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
def test_group_types_index_with_sort_keys_and_sort_dirs(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?sort=id:asc' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort()
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
@ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on',
'y', 'yes')
@mock.patch.object(group_types, "get_group_type")
@mock.patch.object(group_types, "update")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_update_group_type_with_valid_is_public_in_string(
self, is_public, mock_show, mock_cache_resource,
mock_update, mock_get):
type_id = six.text_type(uuid.uuid4())
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, type_id),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
boolean_is_public = strutils.bool_from_string(is_public)
body = {"group_type": {"is_public": is_public, "name": "group_type1"}}
self.controller.update(req, type_id, body=body)
mock_update.assert_called_once_with(
self.ctxt, type_id, 'group_type1', None,
is_public=boolean_is_public)
def test_update_group_type_with_name_null(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"name": None}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, fake.GROUP_TYPE_ID, body=body)
@ddt.data({"group_type": {"name": None,
"description": "description"}},
{"group_type": {"name": "test",
"is_public": True}},
{"group_type": {"description": None,
"is_public": True}})
def test_update_group_type(self, body):
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID),
version=mv.GROUP_TYPE)
group_type_1 = group_types.create(self.ctxt, 'group_type')
req.environ['cinder.context'] = self.ctxt
res = self.controller.update(req, group_type_1.get('id'), body=body)
expected_name = body['group_type'].get('name')
if expected_name is not None:
self.assertEqual(expected_name, res['group_type']['name'])
expected_is_public = body['group_type'].get('is_public')
if expected_is_public is not None:
self.assertEqual(expected_is_public,
res['group_type']['is_public'])
self.assertEqual(body['group_type'].get('description'),
res['group_type']['description'])
def test_group_types_show(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
type_id = six.text_type(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID
+ type_id,
version=mv.GROUP_TYPE)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['group_type']['id'])
type_name = 'group_type_' + type_id
self.assertEqual(type_name, res_dict['group_type']['name'])
def test_group_types_show_pre_microversion(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
type_id = uuid.uuid4()
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types/%s' % (fake.PROJECT_ID, type_id),
version=mv.get_prior_version(mv.GROUP_TYPE))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, req, type_id)
def test_group_types_show_not_found(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=mv.GROUP_TYPE)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_get_default(self):
self.mock_object(group_types, 'get_default_group_type',
return_group_types_get_default)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('group_type_1', res_dict['group_type']['name'])
self.assertEqual('group_type_desc_1',
res_dict['group_type']['description'])
def test_get_default_not_found(self):
self.mock_object(group_types, 'get_default_group_type',
return_group_types_get_default_not_found)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.method = 'GET'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3", use_admin_context=True,
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
group_specs={},
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def __test_view_builder_show_qos_specs_id_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[False, True]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_group_specs_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
group_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_pass_all_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
group_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_types = []
for i in range(0, 10):
raw_group_types.append(
dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.index(request, raw_group_types)
self.assertIn('group_types', output)
for i in range(0, 10):
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42 + i
)
self.assertDictEqual(expected_group_type,
output['group_types'][i])
def test_view_builder_list_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_types = []
for i in range(0, 10):
raw_group_types.append(
dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v3", use_admin_context=True,
version=mv.GROUP_TYPE)
output = view_builder.index(request, raw_group_types)
self.assertIn('group_types', output)
for i in range(0, 10):
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
group_specs={},
id=42 + i
)
self.assertDictEqual(expected_group_type,
output['group_types'][i])
| 40.73607 | 78 | 0.570153 |
9313d99fe6aba852c87659d12267f433f217ea21 | 12,398 | py | Python | tools/perf/metrics/timeline.py | anirudhSK/chromium | a8f23c87e656ab9ba49de9ccccbc53f614cdcb41 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/metrics/timeline.py | anirudhSK/chromium | a8f23c87e656ab9ba49de9ccccbc53f614cdcb41 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/metrics/timeline.py | anirudhSK/chromium | a8f23c87e656ab9ba49de9ccccbc53f614cdcb41 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-04-17T13:19:09.000Z | 2021-10-21T12:55:15.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
from metrics import Metric
from telemetry.core.timeline.model import TimelineModel
from telemetry.core.timeline import bounds
from telemetry.page import page_measurement
TRACING_MODE = 'tracing-mode'
TIMELINE_MODE = 'timeline-mode'
# All tracing categories not disabled-by-default
DEFAULT_TRACE_CATEGORIES = None
# Categories for absolute minimum overhead tracing. This contains no
# sub-traces of thread tasks, so it's only useful for capturing the
# cpu-time spent on threads (as well as needed benchmark traces)
MINIMAL_TRACE_CATEGORIES = ("toplevel,"
"benchmark,"
"webkit.console,"
"trace_event_overhead")
class MissingFramesError(page_measurement.MeasurementFailure):
def __init__(self):
super(MissingFramesError, self).__init__(
'No frames found in trace. Unable to normalize results.')
class TimelineMetric(Metric):
def __init__(self, mode):
""" Initializes a TimelineMetric object.
"""
super(TimelineMetric, self).__init__()
assert mode in (TRACING_MODE, TIMELINE_MODE)
self.trace_categories = DEFAULT_TRACE_CATEGORIES
self._mode = mode
self._model = None
self._renderer_process = None
self._actions = []
self._action_ranges = []
def Start(self, page, tab):
"""Starts gathering timeline data.
mode: TRACING_MODE or TIMELINE_MODE
"""
self._model = None
if self._mode == TRACING_MODE:
if not tab.browser.supports_tracing:
raise Exception('Not supported')
tab.browser.StartTracing(self.trace_categories)
else:
assert self._mode == TIMELINE_MODE
tab.StartTimelineRecording()
def Stop(self, page, tab):
if self._mode == TRACING_MODE:
timeline_data = tab.browser.StopTracing()
self._model = TimelineModel(timeline_data)
self._renderer_process = self._model.GetRendererProcessFromTab(tab)
self._action_ranges = [ action.GetActiveRangeOnTimeline(self._model)
for action in self._actions ]
# Make sure no action ranges overlap
for combo in itertools.combinations(self._action_ranges, 2):
assert not combo[0].Intersects(combo[1])
else:
tab.StopTimelineRecording()
self._model = tab.timeline_model
self._renderer_process = self._model.GetAllProcesses()[0]
def AddActionToIncludeInMetric(self, action):
self._actions.append(action)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
self._model = model
@property
def renderer_process(self):
return self._renderer_process
@renderer_process.setter
def renderer_process(self, p):
self._renderer_process = p
def AddResults(self, tab, results):
return
class LoadTimesTimelineMetric(TimelineMetric):
def __init__(self, mode):
super(LoadTimesTimelineMetric, self).__init__(mode)
self.report_main_thread_only = True
def AddResults(self, _, results):
assert self._model
if self.report_main_thread_only:
if self._mode == TRACING_MODE:
thread_filter = 'CrRendererMain'
else:
thread_filter = 'thread 0'
else:
thread_filter = None
events_by_name = collections.defaultdict(list)
for thread in self.renderer_process.threads.itervalues():
if thread_filter and not thread.name in thread_filter:
continue
thread_name = thread.name.replace('/','_')
events = thread.all_slices
for e in events:
events_by_name[e.name].append(e)
for event_name, event_group in events_by_name.iteritems():
times = [event.self_time for event in event_group]
total = sum(times)
biggest_jank = max(times)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_event_name = event_name.replace('.', '_')
full_name = thread_name + '|' + sanitized_event_name
results.Add(full_name, 'ms', total)
results.Add(full_name + '_max', 'ms', biggest_jank)
results.Add(full_name + '_avg', 'ms', total / len(times))
for counter_name, counter in self.renderer_process.counters.iteritems():
total = sum(counter.totals)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_counter_name = counter_name.replace('.', '_')
results.Add(sanitized_counter_name, 'count', total)
results.Add(sanitized_counter_name + '_avg', 'count',
total / float(len(counter.totals)))
# We want to generate a consistant picture of our thread usage, despite
# having several process configurations (in-proc-gpu/single-proc).
# Since we can't isolate renderer threads in single-process mode, we
# always sum renderer-process threads' times. We also sum all io-threads
# for simplicity.
TimelineThreadCategories = {
"Chrome_InProcGpuThread": "GPU",
"CrGpuMain" : "GPU",
"AsyncTransferThread" : "GPU_transfer",
"CrBrowserMain" : "browser",
"Browser Compositor" : "browser",
"CrRendererMain" : "renderer_main",
"Compositor" : "renderer_compositor",
"IOThread" : "IO",
"CompositorRasterWorker": "raster",
"DummyThreadName1" : "other",
"DummyThreadName2" : "total_fast_path",
"DummyThreadName3" : "total_all"
}
_MatchBySubString = ["IOThread", "CompositorRasterWorker"]
AllThreads = TimelineThreadCategories.values()
NoThreads = []
FastPathThreads = ["GPU", "renderer_compositor", "browser", "IO"]
ReportMainThreadOnly = ["renderer_main"]
ReportFastPathResults = AllThreads
ReportFastPathDetails = NoThreads
ReportSilkResults = ["renderer_main", "total_all"]
ReportSilkDetails = ["renderer_main"]
# TODO(epenner): Thread names above are likely fairly stable but trace names
# could change. We should formalize these traces to keep this robust.
OverheadTraceCategory = "trace_event_overhead"
OverheadTraceName = "overhead"
FrameTraceName = "::SwapBuffers"
FrameTraceThreadName = "renderer_compositor"
def ClockOverheadForEvent(event):
if (event.category == OverheadTraceCategory and
event.name == OverheadTraceName):
return event.duration
else:
return 0
def CpuOverheadForEvent(event):
if (event.category == OverheadTraceCategory and
event.thread_duration):
return event.thread_duration
else:
return 0
def ThreadCategoryName(thread_name):
thread_category = "other"
for substring, category in TimelineThreadCategories.iteritems():
if substring in _MatchBySubString and substring in thread_name:
thread_category = category
if thread_name in TimelineThreadCategories:
thread_category = TimelineThreadCategories[thread_name]
return thread_category
def ThreadTimeResultName(thread_category):
return "thread_" + thread_category + "_clock_time_per_frame"
def ThreadCpuTimeResultName(thread_category):
return "thread_" + thread_category + "_cpu_time_per_frame"
def ThreadDetailResultName(thread_category, detail):
detail_sanitized = detail.replace('.','_')
return "thread_" + thread_category + "|" + detail_sanitized
class ResultsForThread(object):
def __init__(self, model, action_ranges, name):
self.model = model
self.toplevel_slices = []
self.all_slices = []
self.name = name
self.action_ranges = action_ranges
@property
def clock_time(self):
clock_duration = sum([x.duration for x in self.toplevel_slices])
clock_overhead = sum([ClockOverheadForEvent(x) for x in self.all_slices])
return clock_duration - clock_overhead
@property
def cpu_time(self):
cpu_duration = 0
cpu_overhead = sum([CpuOverheadForEvent(x) for x in self.all_slices])
for x in self.toplevel_slices:
# Only report thread-duration if we have it for all events.
#
# A thread_duration of 0 is valid, so this only returns 0 if it is None.
if x.thread_duration == None:
if not x.duration:
continue
else:
return 0
else:
cpu_duration += x.thread_duration
return cpu_duration - cpu_overhead
def SlicesInActions(self, slices):
slices_in_actions = []
for event in slices:
for action_range in self.action_ranges:
if action_range.Contains(bounds.Bounds.CreateFromEvent(event)):
slices_in_actions.append(event)
break
return slices_in_actions
def AppendThreadSlices(self, thread):
self.all_slices.extend(self.SlicesInActions(thread.all_slices))
self.toplevel_slices.extend(self.SlicesInActions(thread.toplevel_slices))
def AddResults(self, num_frames, results):
cpu_per_frame = (float(self.cpu_time) / num_frames) if num_frames else 0
results.Add(ThreadCpuTimeResultName(self.name), 'ms', cpu_per_frame)
def AddDetailedResults(self, num_frames, results):
slices_by_category = collections.defaultdict(list)
for s in self.all_slices:
slices_by_category[s.category].append(s)
all_self_times = []
for category, slices_in_category in slices_by_category.iteritems():
self_time = sum([x.self_time for x in slices_in_category])
all_self_times.append(self_time)
self_time_result = (float(self_time) / num_frames) if num_frames else 0
results.Add(ThreadDetailResultName(self.name, category),
'ms', self_time_result)
all_measured_time = sum(all_self_times)
all_action_time = sum([action.bounds for action in self.action_ranges])
idle_time = max(0, all_action_time - all_measured_time)
idle_time_result = (float(idle_time) / num_frames) if num_frames else 0
results.Add(ThreadDetailResultName(self.name, "idle"),
'ms', idle_time_result)
class ThreadTimesTimelineMetric(TimelineMetric):
def __init__(self):
super(ThreadTimesTimelineMetric, self).__init__(TRACING_MODE)
# Minimal traces, for minimum noise in CPU-time measurements.
self.trace_categories = MINIMAL_TRACE_CATEGORIES
self.results_to_report = AllThreads
self.details_to_report = NoThreads
def Start(self, page, tab):
# We need the other traces in order to have any details to report.
if not self.details_to_report == NoThreads:
self.trace_categories = DEFAULT_TRACE_CATEGORIES
super(ThreadTimesTimelineMetric, self).Start(page, tab)
def CountSlices(self, slices, substring):
count = 0
for event in slices:
if substring in event.name:
count += 1
return count
def AddResults(self, tab, results):
# We need at least one action or we won't count any slices.
assert len(self._action_ranges) > 0
# Set up each thread category for consistant results.
thread_category_results = {}
for name in TimelineThreadCategories.values():
thread_category_results[name] = ResultsForThread(self._model,
self._action_ranges,
name)
# Group the slices by their thread category.
for thread in self._model.GetAllThreads():
thread_category = ThreadCategoryName(thread.name)
thread_category_results[thread_category].AppendThreadSlices(thread)
# Group all threads.
for thread in self._model.GetAllThreads():
thread_category_results['total_all'].AppendThreadSlices(thread)
# Also group fast-path threads.
for thread in self._model.GetAllThreads():
if ThreadCategoryName(thread.name) in FastPathThreads:
thread_category_results['total_fast_path'].AppendThreadSlices(thread)
# Calculate the number of frames.
frame_slices = thread_category_results[FrameTraceThreadName].all_slices
num_frames = self.CountSlices(frame_slices, FrameTraceName)
# Report the desired results and details.
for thread_results in thread_category_results.values():
if thread_results.name in self.results_to_report:
thread_results.AddResults(num_frames, results)
# TOOD(nduca): When generic results objects are done, this special case
# can be replaced with a generic UI feature.
if thread_results.name in self.details_to_report:
thread_results.AddDetailedResults(num_frames, results)
| 35.626437 | 80 | 0.710437 |
f0f5bd7c3e2b2b00aa2555eecd0cbe55dad993ee | 170,499 | py | Python | pandas/core/series.py | mntss/pandas | fc6b441ba527ca32b460ae4f4f5a6802335497f9 | [
"BSD-3-Clause"
] | 2 | 2021-12-28T09:06:32.000Z | 2022-01-28T06:47:12.000Z | pandas/core/series.py | soumyas567/pandas | ccb36cc8f1eeed53dea321ee7381602a6957de54 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/series.py | soumyas567/pandas | ccb36cc8f1eeed53dea321ee7381602a6957de54 | [
"BSD-3-Clause"
] | 1 | 2021-02-23T20:52:58.000Z | 2021-02-23T20:52:58.000Z | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import annotations
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Hashable,
Iterable,
Literal,
Sequence,
Union,
cast,
overload,
)
import warnings
import weakref
import numpy as np
from pandas._config import get_option
from pandas._libs import (
lib,
properties,
reshape,
tslibs,
)
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
ArrayLike,
Axis,
Dtype,
DtypeObj,
FillnaOptions,
IndexKeyFunc,
SingleManager,
StorageOptions,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import InvalidIndexError
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_nonkeyword_arguments,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
validate_ascending,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
convert_dtypes,
maybe_box_native,
maybe_cast_pointwise_result,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_dict_like,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
validate_all_hashable,
)
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
notna,
remove_na_arraylike,
)
from pandas.core import (
algorithms,
base,
generic,
missing,
nanops,
ops,
)
from pandas.core.accessor import CachedAccessor
from pandas.core.apply import SeriesApply
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
import pandas.core.common as com
from pandas.core.construction import (
create_series_with_explicit_dtype,
extract_array,
is_empty_data,
sanitize_array,
)
from pandas.core.generic import NDFrame
from pandas.core.indexers import (
deprecate_ndim_indexing,
unpack_1tuple,
)
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import (
CategoricalIndex,
DatetimeIndex,
Float64Index,
Index,
MultiIndex,
PeriodIndex,
TimedeltaIndex,
default_index,
ensure_index,
)
import pandas.core.indexes.base as ibase
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import (
SingleArrayManager,
SingleBlockManager,
)
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import (
ensure_key_mapped,
nargsort,
)
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
import pandas.io.formats.format as fmt
import pandas.plotting
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas.core.frame import DataFrame
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.resample import Resampler
__all__ = ["Series"]
_shared_doc_kwargs = {
"axes": "index",
"klass": "Series",
"axes_single_arg": "{0 or 'index'}",
"axis": """axis : {0 or 'index'}
Parameter needed for compatibility with DataFrame.""",
"inplace": """inplace : bool, default False
If True, performs operation inplace and returns None.""",
"unique": "np.ndarray",
"duplicated": "Series",
"optional_by": "",
"optional_mapper": "",
"optional_labels": "",
"optional_axis": "",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
def _coerce_method(converter):
"""
Install the scalar coercion methods.
"""
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError(f"cannot convert the series to {converter}")
wrapper.__name__ = f"__{converter.__name__}__"
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, \\*, \\*\\*) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series. If data is a dict, argument order is
maintained.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like
and index is None, then the keys in the data are used as the index. If the
index is not None, the resulting Series is reindexed with the index values.
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Series. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
name : str, optional
The name to give to the Series.
copy : bool, default False
Copy input data. Only affects Series or 1d ndarray input. See examples.
Examples
--------
Constructing Series from a dictionary with an Index specified
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> ser = pd.Series(data=d, index=['a', 'b', 'c'])
>>> ser
a 1
b 2
c 3
dtype: int64
The keys of the dictionary match with the Index values, hence the Index
values have no effect.
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> ser = pd.Series(data=d, index=['x', 'y', 'z'])
>>> ser
x NaN
y NaN
z NaN
dtype: float64
Note that the Index is first build with the keys from the dictionary.
After this the Series is reindexed with the given Index values, hence we
get all NaN as a result.
Constructing Series from a list with `copy=False`.
>>> r = [1, 2]
>>> ser = pd.Series(r, copy=False)
>>> ser.iloc[0] = 999
>>> r
[1, 2]
>>> ser
0 999
1 2
dtype: int64
Due to input data type the Series has a `copy` of
the original data even though `copy=False`, so
the data is unchanged.
Constructing Series from a 1d ndarray with `copy=False`.
>>> r = np.array([1, 2])
>>> ser = pd.Series(r, copy=False)
>>> ser.iloc[0] = 999
>>> r
array([999, 2])
>>> ser
0 999
1 2
dtype: int64
Due to input data type the Series has a `view` on
the original data, so
the data is changed as well.
"""
_typ = "series"
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
_name: Hashable
_metadata: list[str] = ["name"]
_internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
_hidden_attrs = (
base.IndexOpsMixin._hidden_attrs
| generic.NDFrame._hidden_attrs
| frozenset(["compress", "ptp"])
)
# Override cache_readonly bc Series is mutable
# error: Incompatible types in assignment (expression has type "property",
# base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]")
hasnans = property( # type: ignore[assignment]
# error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget"
base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined]
doc=base.IndexOpsMixin.hasnans.__doc__,
)
_mgr: SingleManager
div: Callable[[Series, Any], Series]
rdiv: Callable[[Series, Any], Series]
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index=None,
dtype: Dtype | None = None,
name=None,
copy: bool = False,
fastpath: bool = False,
):
if (
isinstance(data, (SingleBlockManager, SingleArrayManager))
and index is None
and dtype is None
and copy is False
):
# GH#33357 called with just the SingleBlockManager
NDFrame.__init__(self, data)
self.name = name
return
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, (SingleBlockManager, SingleArrayManager)):
manager = get_option("mode.data_manager")
if manager == "block":
data = SingleBlockManager.from_array(data, index)
elif manager == "array":
data = SingleArrayManager.from_array(data, index)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
name = ibase.maybe_extract_name(name, data, type(self))
if is_empty_data(data) and dtype is None:
# gh-17261
warnings.warn(
"The default dtype for empty Series will be 'object' instead "
"of 'float64' in a future version. Specify a dtype explicitly "
"to silence this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
# uncomment the line below when removing the FutureWarning
# dtype = np.dtype(object)
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# GH#24096 we need to ensure the index remains immutable
data = data._values.copy()
copy = False
elif isinstance(data, np.ndarray):
if len(data.dtype):
# GH#13296 we are dealing with a compound dtype, which
# should be treated as 2D
raise ValueError(
"Cannot construct a Series from an ndarray with "
"compound dtype. Use DataFrame instead."
)
elif isinstance(data, Series):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
copy = False
data = data._mgr
elif is_dict_like(data):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, (SingleBlockManager, SingleArrayManager)):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError(
"Cannot pass both SingleBlockManager "
"`data` argument and a different "
"`index` argument. `copy` must be False."
)
elif isinstance(data, ExtensionArray):
pass
else:
data = com.maybe_iterable_to_list(data)
if index is None:
if not is_list_like(data):
data = [data]
index = default_index(len(data))
elif is_list_like(data):
com.require_length_match(data, index)
# create/copy the manager
if isinstance(data, (SingleBlockManager, SingleArrayManager)):
if dtype is not None:
data = data.astype(dtype=dtype, errors="ignore", copy=copy)
elif copy:
data = data.copy()
else:
data = sanitize_array(data, index, dtype, copy)
manager = get_option("mode.data_manager")
if manager == "block":
data = SingleBlockManager.from_array(data, index)
elif manager == "array":
data = SingleArrayManager.from_array(data, index)
generic.NDFrame.__init__(self, data)
self.name = name
self._set_axis(0, index, fastpath=True)
def _init_dict(
self, data, index: Index | None = None, dtype: DtypeObj | None = None
):
"""
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series.
index : Index or None, default None
Index for the new Series: if None, use dict keys.
dtype : np.dtype, ExtensionDtype, or None, default None
The dtype for the new Series: if None, infer from data.
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
keys: Index | tuple
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
# GH:34717, issue was using zip to extract key and values from data.
# using generators in effects the performance.
# Below is the new way of extracting the keys and values
keys = tuple(data.keys())
values = list(data.values()) # Generating list of values- faster way
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
values = na_value_for_dtype(pandas_dtype(dtype), compat=False)
keys = index
else:
keys, values = (), []
# Input is now list-like, so rely on "standard" construction:
# TODO: passing np.float64 to not break anything yet. See GH-17261
s = create_series_with_explicit_dtype(
# error: Argument "index" to "create_series_with_explicit_dtype" has
# incompatible type "Tuple[Any, ...]"; expected "Union[ExtensionArray,
# ndarray, Index, None]"
values,
index=keys, # type: ignore[arg-type]
dtype=dtype,
dtype_if_empty=np.float64,
)
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
return s._mgr, s.index
# ----------------------------------------------------------------------
@property
def _constructor(self) -> type[Series]:
return Series
@property
def _constructor_expanddim(self) -> type[DataFrame]:
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self) -> bool:
return self._mgr._can_hold_na
def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:
"""
Override generic, we want to set the _typ here.
This is called from the cython code when we set the `index` attribute
directly, e.g. `series.index = [1, 2, 3]`.
"""
if not fastpath:
labels = ensure_index(labels)
if labels._is_all_dates:
deep_labels = labels
if isinstance(labels, CategoricalIndex):
deep_labels = labels.categories
if not isinstance(
deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)
):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._mgr.set_axis(axis, labels)
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
if not fastpath:
# The ensure_index call above ensures we have an Index object
self._mgr.set_axis(axis, labels)
# ndarray compatibility
@property
def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._mgr.dtype
@property
def dtypes(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
# DataFrame compatibility
return self.dtype
@property
def name(self) -> Hashable:
"""
Return the name of the Series.
The name of a Series becomes its index or column name if it is used
to form a DataFrame. It is also used whenever displaying the Series
using the interpreter.
Returns
-------
label (hashable object)
The name of the Series, also the column name if part of a DataFrame.
See Also
--------
Series.rename : Sets the Series name when given a scalar input.
Index.name : Corresponding Index property.
Examples
--------
The Series name can be set initially when calling the constructor.
>>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')
>>> s
0 1
1 2
2 3
Name: Numbers, dtype: int64
>>> s.name = "Integers"
>>> s
0 1
1 2
2 3
Name: Integers, dtype: int64
The name of a Series within a DataFrame is its column name.
>>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],
... columns=["Odd Numbers", "Even Numbers"])
>>> df
Odd Numbers Even Numbers
0 1 2
1 3 4
2 5 6
>>> df["Even Numbers"].name
'Even Numbers'
"""
return self._name
@name.setter
def name(self, value: Hashable) -> None:
validate_all_hashable(value, error_name=f"{type(self).__name__}.name")
object.__setattr__(self, "_name", value)
@property
def values(self):
"""
Return Series as ndarray or ndarray-like depending on the dtype.
.. warning::
We recommend using :attr:`Series.array` or
:meth:`Series.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
numpy.ndarray or ndarray-like
See Also
--------
Series.array : Reference to the underlying data.
Series.to_numpy : A NumPy array representing the underlying data.
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
['a', 'a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._mgr.external_values()
@property
def _values(self):
"""
Return the internal repr of this data (defined by Block.interval_values).
This are the values as stored in the Block (ndarray or ExtensionArray
depending on the Block class), with datetime64[ns] and timedelta64[ns]
wrapped in ExtensionArrays to match Index._values behavior.
Differs from the public ``.values`` for certain data types, because of
historical backwards compatibility of the public attribute (e.g. period
returns object ndarray and datetimetz a datetime64[ns] ndarray for
``.values`` while it returns an ExtensionArray for ``._values`` in those
cases).
Differs from ``.array`` in that this still returns the numpy array if
the Block is backed by a numpy array (except for datetime64 and
timedelta64 dtypes), while ``.array`` ensures to always return an
ExtensionArray.
Overview:
dtype | values | _values | array |
----------- | ------------- | ------------- | ------------- |
Numeric | ndarray | ndarray | PandasArray |
Category | Categorical | Categorical | Categorical |
dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray |
dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray |
td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] |
Period | ndarray[obj] | PeriodArray | PeriodArray |
Nullable | EA | EA | EA |
"""
return self._mgr.internal_values()
# error: Decorated property not supported
@Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]
@property
def array(self) -> ExtensionArray:
return self._mgr.array_values()
# ops
def ravel(self, order="C"):
"""
Return the flattened underlying data as an ndarray.
Returns
-------
numpy.ndarray or ndarray-like
Flattened data of the Series.
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
"""
return self._values.ravel(order=order)
def __len__(self) -> int:
"""
Return the length of the Series.
"""
return len(self._mgr)
def view(self, dtype: Dtype | None = None) -> Series:
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(
self._values.view(dtype), index=self.index
).__finalize__(self, method="view")
# ----------------------------------------------------------------------
# NDArray Compat
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
"""
Return the values as a NumPy array.
Users should not call this directly. Rather, it is invoked by
:func:`numpy.array` and :func:`numpy.asarray`.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
Returns
-------
numpy.ndarray
The values in the series converted to a :class:`numpy.ndarray`
with the specified `dtype`.
See Also
--------
array : Create a new array from data.
Series.array : Zero-copy view to the array backing the Series.
Series.to_numpy : Series method for similar behavior.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> np.asarray(ser)
array([1, 2, 3])
For timezone-aware data, the timezones may be retained with
``dtype='object'``
>>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or the values may be localized to UTC and the tzinfo discarded with
``dtype='datetime64[ns]'``
>>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', ...],
dtype='datetime64[ns]')
"""
return np.asarray(self._values, dtype)
# ----------------------------------------------------------------------
# Unary Methods
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
# ----------------------------------------------------------------------
# indexers
@property
def axes(self) -> list[Index]:
"""
Return a list of the row axis labels.
"""
return [self.index]
# ----------------------------------------------------------------------
# Indexing Methods
@Appender(generic.NDFrame.take.__doc__)
def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=find_stack_level(),
)
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
result = self._constructor(new_values, index=new_index, fastpath=True)
return result.__finalize__(self, method="take")
def _take_with_is_copy(self, indices, axis=0) -> Series:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning). For Series this does the same
as the public take (it never sets `_is_copy`).
See the docstring of `take` for full explanation of the parameters.
"""
return self.take(indices=indices, axis=axis)
def _ixs(self, i: int, axis: int = 0):
"""
Return the i-th value or values in the Series by location.
Parameters
----------
i : int
Returns
-------
scalar (int) or Series (slice, sequence)
"""
return self._values[i]
def _slice(self, slobj: slice, axis: int = 0) -> Series:
# axis kwarg is retained for compat with NDFrame method
# _slice is *always* positional
return self._get_values(slobj)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
if key is Ellipsis:
return self
key_is_scalar = is_scalar(key)
if isinstance(key, (list, tuple)):
key = unpack_1tuple(key)
if is_integer(key) and self.index._should_fallback_to_positional:
return self._values[key]
elif key_is_scalar:
return self._get_value(key)
if is_hashable(key):
# Otherwise index.get_value will raise InvalidIndexError
try:
# For labels that don't resolve as scalars like tuples and frozensets
result = self._get_value(key)
return result
except (KeyError, TypeError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# We still have the corner case where a tuple is a key
# in the first level of our MultiIndex
return self._get_values_tuple(key)
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
return self._get_values(key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
# _convert_slice_indexer to determine if this slice is positional
# or label based, and if the latter, convert to positional
slobj = self.index._convert_slice_indexer(key, kind="getitem")
return self._slice(slobj)
elif isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
"supported, use the appropriate DataFrame column"
)
elif isinstance(key, tuple):
return self._get_values_tuple(key)
elif not is_list_like(key):
# e.g. scalars that aren't recognized by lib.is_scalar, GH#32684
return self.loc[key]
if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
# Note: The key_type == "boolean" case should be caught by the
# com.is_bool_indexer check in __getitem__
if key_type == "integer":
# We need to decide whether to treat this as a positional indexer
# (i.e. self.iloc) or label-based (i.e. self.loc)
if not self.index._should_fallback_to_positional:
return self.loc[key]
else:
return self.iloc[key]
# handle the dup indexing case GH#4246
return self.loc[key]
def _get_values_tuple(self, key):
# mpl hackaround
if com.any_none(*key):
result = self._get_values(key)
deprecate_ndim_indexing(result, stacklevel=find_stack_level())
return result
if not isinstance(self.index, MultiIndex):
raise KeyError("key of type tuple not found and not a MultiIndex")
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer], index=new_index).__finalize__(
self
)
def _get_values(self, indexer):
try:
new_mgr = self._mgr.getitem_mgr(indexer)
return self._constructor(new_mgr).__finalize__(self)
except ValueError:
# mpl compat if we look up e.g. ser[:, np.newaxis];
# see tests.series.timeseries.test_mpl_compat_hack
# the asarray is needed to avoid returning a 2D DatetimeArray
return np.asarray(self._values[indexer])
def _get_value(self, label, takeable: bool = False):
"""
Quickly retrieve single value at passed index label.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
scalar value
"""
if takeable:
return self._values[label]
# Similar to Index.get_value, but we do not fall back to positional
loc = self.index.get_loc(label)
return self.index._get_values_for_loc(self, loc, label)
def __setitem__(self, key, value) -> None:
key = com.apply_if_callable(key, self)
cacher_needs_updating = self._check_is_chained_assignment_possible()
if key is Ellipsis:
key = slice(None)
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
try:
self._set_with_engine(key, value)
except (KeyError, ValueError):
if is_integer(key) and self.index.inferred_type != "integer":
# positional setter
if not self.index._should_fallback_to_positional:
# GH#33469
warnings.warn(
"Treating integers as positional in Series.__setitem__ "
"with a Float64Index is deprecated. In a future version, "
"`series[an_int] = val` will insert a new key into the "
"Series. Use `series.iloc[an_int] = val` to treat the "
"key as positional.",
FutureWarning,
stacklevel=find_stack_level(),
)
# this is equivalent to self._values[key] = value
self._mgr.setitem_inplace(key, value)
else:
# GH#12862 adding a new key to the Series
self.loc[key] = value
except (InvalidIndexError, TypeError) as err:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
# cases with MultiIndex don't get here bc they raise KeyError
raise KeyError(
"key of type tuple not found and not a MultiIndex"
) from err
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
if (
is_list_like(value)
and len(value) != len(self)
and not isinstance(value, Series)
and not is_object_dtype(self.dtype)
):
# Series will be reindexed to have matching length inside
# _where call below
# GH#44265
indexer = key.nonzero()[0]
self._set_values(indexer, value)
return
# otherwise with listlike other we interpret series[mask] = other
# as series[mask] = other[mask]
try:
self._where(~key, value, inplace=True)
except InvalidIndexError:
# test_where_dups
self.iloc[key] = value
return
else:
self._set_with(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value) -> None:
loc = self.index.get_loc(key)
# error: Argument 1 to "validate_numeric_casting" has incompatible type
# "Union[dtype, ExtensionDtype]"; expected "dtype"
validate_numeric_casting(self.dtype, value) # type: ignore[arg-type]
# this is equivalent to self._values[key] = value
self._mgr.setitem_inplace(loc, value)
def _set_with(self, key, value):
# other: fancy integer or otherwise
assert not isinstance(key, tuple)
if is_scalar(key):
key = [key]
elif is_iterator(key):
# Without this, the call to infer_dtype will consume the generator
key = list(key)
key_type = lib.infer_dtype(key, skipna=False)
# Note: key_type == "boolean" should not occur because that
# should be caught by the is_bool_indexer check in __setitem__
if key_type == "integer":
if not self.index._should_fallback_to_positional:
self._set_labels(key, value)
else:
self._set_values(key, value)
else:
self.loc[key] = value
def _set_labels(self, key, value) -> None:
key = com.asarray_tuplesafe(key)
indexer: np.ndarray = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise KeyError(f"{key[mask]} not in index")
self._set_values(indexer, value)
def _set_values(self, key, value) -> None:
if isinstance(key, (Index, Series)):
key = key._values
self._mgr = self._mgr.setitem(indexer=key, value=value)
self._maybe_update_cacher()
def _set_value(self, label, value, takeable: bool = False):
"""
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
"""
if not takeable:
try:
loc = self.index.get_loc(label)
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return
else:
loc = label
self._set_values(loc, value)
# ----------------------------------------------------------------------
# Lookup Caching
@property
def _is_cached(self) -> bool:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
# should only get here with self.ndim == 1
del self._cacher
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _clear_item_cache(self) -> None:
# no-op for Series
pass
def _check_is_chained_assignment_possible(self) -> bool:
"""
See NDFrame._check_is_chained_assignment_possible.__doc__
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(t="referent", force=True)
return True
return super()._check_is_chained_assignment_possible()
def _maybe_update_cacher(
self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False
) -> None:
"""
See NDFrame._maybe_update_cacher.__doc__
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
assert self.ndim == 1
ref: DataFrame = cacher[1]()
# we are trying to reference a dead referent, hence
# a copy
if ref is None:
del self._cacher
elif len(self) == len(ref) and self.name in ref.columns:
# GH#42530 self.name must be in ref.columns
# to ensure column still in dataframe
# otherwise, either self or ref has swapped in new arrays
ref._maybe_cache_changed(cacher[0], self, inplace=inplace)
else:
# GH#33675 we have swapped in a new array, so parent
# reference to self is now invalid
ref._item_cache.pop(cacher[0], None)
super()._maybe_update_cacher(
clear=clear, verify_is_copy=verify_is_copy, inplace=inplace
)
# ----------------------------------------------------------------------
# Unsorted
@property
def _is_mixed_type(self):
return False
def repeat(self, repeats, axis=None) -> Series:
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
"""
nv.validate_repeat((), {"axis": axis})
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="repeat"
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(self, level=None, drop=False, name=lib.no_default, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame or None
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
if name is lib.no_default:
name = self.name
new_index = default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(
self._values.copy(), index=new_index
).__finalize__(self, method="reset_index")
elif inplace:
raise TypeError(
"Cannot reset_index inplace on a Series to create a DataFrame"
)
else:
if name is lib.no_default:
# For backwards compatibility, keep columns as [0] instead of
# [None] when self.name is None
if self.name is None:
name = 0
else:
name = self.name
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
"""
Return a string representation for a particular Series.
"""
repr_params = fmt.get_series_repr_params()
return self.to_string(**repr_params)
def to_string(
self,
buf=None,
na_rep="NaN",
float_format=None,
header=True,
index=True,
length=False,
dtype=False,
name=False,
max_rows=None,
min_rows=None,
):
"""
Render a string representation of the Series.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
na_rep : str, optional
String representation of NaN to use, default 'NaN'.
float_format : one-parameter function, optional
Formatter function to apply to columns' elements if they are
floats, default None.
header : bool, default True
Add the Series header (index name).
index : bool, optional
Add index (row) labels, default True.
length : bool, default False
Add the Series length.
dtype : bool, default False
Add the Series dtype.
name : bool, default False
Add the Series name if not None.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
min_rows : int, optional
The number of rows to display in a truncated repr (when number
of rows is above `max_rows`).
Returns
-------
str or None
String representation of Series if ``buf=None``, otherwise None.
"""
formatter = fmt.SeriesFormatter(
self,
name=name,
length=length,
header=header,
index=index,
dtype=dtype,
na_rep=na_rep,
float_format=float_format,
min_rows=min_rows,
max_rows=max_rows,
)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
"result must be of type str, type "
f"of result is {repr(type(result).__name__)}"
)
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, "w") as f:
f.write(result)
@doc(
klass=_shared_doc_kwargs["klass"],
storage_options=generic._shared_docs["storage_options"],
examples=dedent(
"""Examples
--------
>>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(s.to_markdown())
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
Output markdown with a tabulate option.
>>> print(s.to_markdown(tablefmt="grid"))
+----+----------+
| | animal |
+====+==========+
| 0 | elk |
+----+----------+
| 1 | pig |
+----+----------+
| 2 | dog |
+----+----------+
| 3 | quetzal |
+----+----------+"""
),
)
def to_markdown(
self,
buf: IO[str] | None = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> str | None:
"""
Print {klass} in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened, "wt" by default.
index : bool, optional, default True
Add index (row) labels.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
These parameters will be passed to `tabulate \
<https://pypi.org/project/tabulate>`_.
Returns
-------
str
{klass} in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
{examples}
"""
return self.to_frame().to_markdown(
buf, mode, index, storage_options=storage_options, **kwargs
)
# ----------------------------------------------------------------------
def items(self) -> Iterable[tuple[Hashable, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
def iteritems(self) -> Iterable[tuple[Hashable, Any]]:
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
def keys(self) -> Index:
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = com.standardize_mapping(into)
return into_c((k, maybe_box_native(v)) for k, v in self.items())
def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = pd.Series(["a", "b", "c"],
... name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
columns: Index
if name is lib.no_default:
name = self.name
if name is None:
# default to [0], same as we would get with DataFrame(self)
columns = default_index(1)
else:
columns = Index([name])
else:
columns = Index([name])
mgr = self._mgr.to_2d_mgr(columns)
return self._constructor_expanddim(mgr)
def _set_name(self, name, inplace=False) -> Series:
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy()
ser.name = name
return ser
@Appender(
"""
Examples
--------
>>> ser = pd.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", "b"]).mean()
a 210.0
b 185.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**Grouping by Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
>>> ser
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Animal
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level="Type").mean()
Type
Captive 210.0
Wild 185.0
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
`dropna` parameter, the default setting is `True`.
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
a 3
b 3
dtype: int64
>>> ser.groupby(level=0, dropna=False).sum()
a 3
b 3
NaN 3
dtype: int64
>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
>>> ser.groupby(["a", "b", "a", np.nan]).mean()
a 210.0
b 350.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
a 210.0
b 350.0
NaN 20.0
Name: Max Speed, dtype: float64
"""
)
@Appender(generic._shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
) -> SeriesGroupBy:
from pandas.core.groupby.generic import SeriesGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=find_stack_level(),
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
# error: Argument "squeeze" to "SeriesGroupBy" has incompatible type
# "Union[bool, NoDefault]"; expected "bool"
return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze, # type: ignore[arg-type]
observed=observed,
dropna=dropna,
)
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
if level is None:
return notna(self._values).sum().astype("int64")
else:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. ser.count(level=1) should use ser.groupby(level=1).count().",
FutureWarning,
stacklevel=find_stack_level(),
)
if not isinstance(self.index, MultiIndex):
raise ValueError("Series.count level is only valid with a MultiIndex")
index = self.index
assert isinstance(index, MultiIndex) # for mypy
if isinstance(level, str):
level = index._get_level_number(level)
lev = index.levels[level]
level_codes = np.array(index.codes[level], subok=False, copy=True)
mask = level_codes == -1
if mask.any():
level_codes[mask] = cnt = len(lev)
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self._values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(
self, method="count"
)
def mode(self, dropna=True) -> Series:
"""
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self, dropna=dropna)
def unique(self) -> ArrayLike:
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
... for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
An Categorical will return categories in the order of
appearance and with the same dtype.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
"""
return super().unique()
@overload
def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series:
...
@overload
def drop_duplicates(self, keep, inplace: Literal[True]) -> None:
...
@overload
def drop_duplicates(self, *, inplace: Literal[True]) -> None:
...
@overload
def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def drop_duplicates(self, keep="first", inplace=False) -> Series | None:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(self, keep="first") -> Series:
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
res = self._duplicated(keep=keep)
result = self._constructor(res, index=self.index)
return result.__finalize__(self, method="duplicated")
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
i = self.argmin(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmax. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
i = self.argmax(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
def round(self, decimals=0, *args, **kwargs) -> Series:
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Examples
--------
>>> s = pd.Series([0.1, 1.3, 2.7])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
"""
nv.validate_round(args, kwargs)
result = self._values.round(decimals)
result = self._constructor(result, index=self.index).__finalize__(
self, method="round"
)
return result
def quantile(self, q=0.5, interpolation="linear"):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile : Calculate the rolling quantile.
numpy.percentile : Returns the q-th percentile(s) of the array elements.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
validate_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result, index=Float64Index(q), name=self.name)
else:
# scalar
return result.iloc[0]
def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
Method used to compute correlation:
- pearson : Standard correlation coefficient
- kendall : Kendall Tau correlation coefficient
- spearman : Spearman rank correlation
- callable: Callable with input two 1d ndarrays and returning a float.
.. warning::
Note that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
See Also
--------
DataFrame.corr : Compute pairwise correlation between columns.
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
if method in ["pearson", "spearman", "kendall"] or callable(method):
return nanops.nancorr(
this.values, other.values, method=method, min_periods=min_periods
)
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
def cov(
self,
other: Series,
min_periods: int | None = None,
ddof: int | None = 1,
) -> float:
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(
this.values, other.values, min_periods=min_periods, ddof=ddof
)
@doc(
klass="Series",
extra_params="",
other_klass="DataFrame",
examples=dedent(
"""
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
Overflow in input dtype
>>> s = pd.Series([1, 0], dtype=np.uint8)
>>> s.diff()
0 NaN
1 255.0
dtype: float64"""
),
)
def diff(self, periods: int = 1) -> Series:
"""
First discrete difference of element.
Calculates the difference of a {klass} element compared with another
element in the {klass} (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
{extra_params}
Returns
-------
{klass}
First differences of the Series.
See Also
--------
{klass}.pct_change: Percent change over given number of periods.
{klass}.shift: Shift index by desired number of periods with an
optional time freq.
{other_klass}.diff: First discrete difference of object.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
The result is calculated according to current dtype in {klass},
however dtype of the result is always float64.
Examples
--------
{examples}
"""
result = algorithms.diff(self._values, periods)
return self._constructor(result, index=self.index).__finalize__(
self, method="diff"
)
def autocorr(self, lag=1) -> float:
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, ABCDataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns
).__finalize__(self, method="dot")
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(np.transpose(other))
@doc(base.IndexOpsMixin.searchsorted, klass="Series")
# Signature of "searchsorted" incompatible with supertype "IndexOpsMixin"
def searchsorted( # type: ignore[override]
self,
value: NumpyValueArrayLike | ExtensionArray,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(
self, to_append, ignore_index: bool = False, verify_integrity: bool = False
):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self]
to_concat.extend(to_append)
else:
to_concat = [self, to_append]
if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]):
msg = "to_append should be a Series or list/tuple of Series, got DataFrame"
raise TypeError(msg)
return concat(
to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
)
def _binop(self, other: Series, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value.
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
Returns
-------
Series
"""
if not isinstance(other, Series):
raise AssertionError("Other operand must be Series")
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join="outer", copy=False)
this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value)
with np.errstate(all="ignore"):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
return this._construct_result(result, name)
def _construct_result(
self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable
) -> Series | tuple[Series, Series]:
"""
Construct an appropriately-labelled Series from the result of an op.
Parameters
----------
result : ndarray or ExtensionArray
name : Label
Returns
-------
Series
In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.
"""
if isinstance(result, tuple):
# produced by divmod or rdivmod
res1 = self._construct_result(result[0], name=name)
res2 = self._construct_result(result[1], name=name)
# GH#33427 assertions to keep mypy happy
assert isinstance(res1, Series)
assert isinstance(res2, Series)
return (res1, res2)
# We do not pass dtype to ensure that the Series constructor
# does inference in the case where `result` has object-dtype.
out = self._constructor(result, index=self.index)
out = out.__finalize__(self)
# Set the result's name after __finalize__ is called because __finalize__
# would set it back to self.name
out.name = name
return out
@doc(
generic._shared_docs["compare"],
"""
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: Series,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame | Series:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(self, other, func, fill_value=None) -> Series:
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = np.empty(len(new_index), dtype=object)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all="ignore"):
new_values[i] = func(lv, rv)
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
new_values = np.empty(len(new_index), dtype=object)
with np.errstate(all="ignore"):
new_values[:] = [func(lv, other) for lv in self._values]
new_name = self.name
# try_float=False is to match agg_series
npvalues = lib.maybe_convert_objects(new_values, try_float=False)
res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False)
return self._constructor(res_values, index=new_index, name=new_name)
def combine_first(self, other) -> Series:
"""
Update null elements with value in the same location in 'other'.
Combine two Series objects by filling null values in one Series with
non-null values from the other Series. Result index will be the union
of the two indexes.
Parameters
----------
other : Series
The value(s) to be used for filling null values.
Returns
-------
Series
The result of combining the provided Series with the other object.
See Also
--------
Series.combine : Perform element-wise operation on two Series
using a given function.
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4, 5])
>>> s1.combine_first(s2)
0 1.0
1 4.0
2 5.0
dtype: float64
Null values still persist if the location of that null value
does not exist in `other`
>>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0})
>>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0})
>>> s1.combine_first(s2)
duck 30.0
eagle 160.0
falcon NaN
dtype: float64
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
if this.dtype.kind == "M" and other.dtype.kind != "M":
other = to_datetime(other)
return this.where(notna(this), other)
def update(self, other) -> None:
"""
Modify Series in place using values from passed Series.
Uses non-NA values from passed Series to make updates. Aligns
on index.
Parameters
----------
other : Series, or object coercible into Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
``other`` can also be a non-Series object type
that is coercible into a Series
>>> s = pd.Series([1, 2, 3])
>>> s.update([4, np.nan, 6])
>>> s
0 4
1 2
2 6
dtype: int64
>>> s = pd.Series([1, 2, 3])
>>> s.update({1: 9})
>>> s
0 1
1 9
2 3
dtype: int64
"""
if not isinstance(other, Series):
other = Series(other)
other = other.reindex_like(self)
mask = notna(other)
self._mgr = self._mgr.putmask(mask=mask, new=other)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def sort_values(
self,
axis=0,
ascending: bool | int | Sequence[bool | int] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
"""
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool or list of bools, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the series values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return an array-like.
.. versionadded:: 1.1.0
Returns
-------
Series or None
Series ordered by values or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
Sort using a key function. Your `key` function will be
given the ``Series`` of values and should return an array-like.
>>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])
>>> s.sort_values()
1 B
3 D
0 a
2 c
4 e
dtype: object
>>> s.sort_values(key=lambda x: x.str.lower())
0 a
1 B
2 c
3 D
4 e
dtype: object
NumPy ufuncs work well here. For example, we can
sort by the ``sin`` of the value
>>> s = pd.Series([-4, -2, 0, 2, 4])
>>> s.sort_values(key=np.sin)
1 -2
4 4
2 0
0 -4
3 2
dtype: int64
More complicated user-defined functions can be used,
as long as they expect a Series and return an array-like
>>> s.sort_values(key=lambda x: (np.tan(x.cumsum())))
0 -4
3 2
4 4
1 -2
2 0
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError(
"This Series is a view of some other array, to "
"sort in-place you must create a copy"
)
if is_list_like(ascending):
ascending = cast(Sequence[Union[bool, int]], ascending)
if len(ascending) != 1:
raise ValueError(
f"Length of ascending ({len(ascending)}) must be 1 for Series"
)
ascending = ascending[0]
ascending = validate_ascending(ascending)
if na_position not in ["first", "last"]:
raise ValueError(f"invalid na_position: {na_position}")
# GH 35922. Make sorting stable by leveraging nargsort
values_to_sort = ensure_key_mapped(self, key)._values if key else self._values
sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position)
result = self._constructor(
self._values[sorted_index], index=self.index[sorted_index]
)
if ignore_index:
result.index = default_index(len(sorted_index))
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def sort_index(
self,
axis=0,
level=None,
ascending: bool | int | Sequence[bool | int] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort Series by index labels.
Returns a new Series sorted by label if `inplace` argument is
``False``, otherwise updates the original series and returns None.
Parameters
----------
axis : int, default 0
Axis to direct sorting. This can only be 0 for Series.
level : int, optional
If not None, sort on values in specified index level(s).
ascending : bool or list-like of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape.
.. versionadded:: 1.1.0
Returns
-------
Series or None
The original Series sorted by the labels or None if ``inplace=True``.
See Also
--------
DataFrame.sort_index: Sort DataFrame by the index.
DataFrame.sort_values: Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> s.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> s.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
Sort Inplace
>>> s.sort_index(inplace=True)
>>> s
1 c
2 b
3 a
4 d
dtype: object
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
>>> s.sort_index(na_position='first')
NaN d
1.0 c
2.0 b
3.0 a
dtype: object
Specify index level to sort
>>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
... 'baz', 'baz', 'bar', 'bar']),
... np.array(['two', 'one', 'two', 'one',
... 'two', 'one', 'two', 'one'])]
>>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
>>> s.sort_index(level=1)
bar one 8
baz one 6
foo one 4
qux one 2
bar two 7
baz two 5
foo two 3
qux two 1
dtype: int64
Does not sort by remaining levels when sorting by levels
>>> s.sort_index(level=1, sort_remaining=False)
qux one 2
foo one 4
baz one 6
bar one 8
qux two 1
foo two 3
baz two 5
bar two 7
dtype: int64
Apply a key function before sorting
>>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])
>>> s.sort_index(key=lambda x : x.str.lower())
A 1
b 2
C 3
d 4
dtype: int64
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def argsort(self, axis=0, kind="quicksort", order=None) -> Series:
"""
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or "index"}
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
"""
values = self._values
mask = isna(values)
if mask.any():
result = np.full(len(self), -1, dtype=np.intp)
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
else:
result = np.argsort(values, kind=kind)
res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp)
return res.__finalize__(self, method="argsort")
def nlargest(self, n=5, keep="first") -> Series:
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n: int = 5, keep: str = "first") -> Series:
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
@doc(
klass=_shared_doc_kwargs["klass"],
extra_params=dedent(
"""copy : bool, default True
Whether to copy underlying data."""
),
examples=dedent(
"""Examples
--------
>>> s = pd.Series(
... ["A", "B", "A", "C"],
... index=[
... ["Final exam", "Final exam", "Coursework", "Coursework"],
... ["History", "Geography", "History", "Geography"],
... ["January", "February", "March", "April"],
... ],
... )
>>> s
Final exam History January A
Geography February B
Coursework History March A
Geography April C
dtype: object
In the following example, we will swap the levels of the indices.
Here, we will swap the levels column-wise, but levels can be swapped row-wise
in a similar manner. Note that column-wise is the default behaviour.
By not supplying any arguments for i and j, we swap the last and second to
last indices.
>>> s.swaplevel()
Final exam January History A
February Geography B
Coursework March History A
April Geography C
dtype: object
By supplying one argument, we can choose which index to swap the last
index with. We can for example swap the first index with the last one as
follows.
>>> s.swaplevel(0)
January History Final exam A
February Geography Final exam B
March History Coursework A
April Geography Coursework C
dtype: object
We can also define explicitly which indices we want to swap by supplying values
for both i and j. Here, we for example swap the first and second indices.
>>> s.swaplevel(0, 1)
History Final exam January A
Geography Final exam February B
History Coursework March A
Geography Coursework April C
dtype: object"""
),
)
def swaplevel(self, i=-2, j=-1, copy=True) -> Series:
"""
Swap levels i and j in a :class:`MultiIndex`.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
{extra_params}
Returns
-------
{klass}
{klass} with levels swapped in MultiIndex.
{examples}
"""
assert isinstance(self.index, MultiIndex)
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
self, method="swaplevel"
)
def reorder_levels(self, order) -> Series:
"""
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
Reference level by number or key.
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy()
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
return result
def explode(self, ignore_index: bool = False) -> Series:
"""
Transform each element of a list-like to a row.
.. versionadded:: 0.25.0
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if not len(self) or not is_object_dtype(self):
result = self.copy()
return result.reset_index(drop=True) if ignore_index else result
values, counts = reshape.explode(np.asarray(self._values))
if ignore_index:
index = default_index(len(values))
else:
index = self.index.repeat(counts)
return self._constructor(values, index=index, name=self.name)
def unstack(self, level=-1, fill_value=None) -> DataFrame:
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN values.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None) -> Series:
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = self._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(
self, method="map"
)
def _gotitem(self, key, ndim, subset=None) -> Series:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
Requested ndim of result.
subset : object, default None
Subset to act on.
"""
return self
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Transform function producing a Series with like indexes.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.agg('min')
1
>>> s.agg(['min', 'max'])
min 1
max 4
dtype: int64
"""
)
@doc(
generic._shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
# if func is None, will switch to user-provided "named aggregation" kwargs
if func is None:
func = dict(kwargs.items())
op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs)
result = op.agg()
return result
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame | Series:
# Validate axis argument
self._get_axis_number(axis)
result = SeriesApply(
self, func=func, convert_dtype=True, args=args, kwargs=kwargs
).transform()
return result
def apply(
self,
func: AggFuncType,
convert_dtype: bool = True,
args: tuple[Any, ...] = (),
**kwargs,
) -> DataFrame | Series:
"""
Invoke function on values of Series.
Can be ufunc (a NumPy function that applies to the entire Series)
or a Python function that only works on single values.
Parameters
----------
func : function
Python function or NumPy ufunc to apply.
convert_dtype : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object. Note that the dtype is always
preserved for some extension array dtypes, such as Categorical.
args : tuple
Positional arguments passed to func after the series value.
**kwargs
Additional keyword arguments passed to func.
Returns
-------
Series or DataFrame
If func returns a Series object the result will be a DataFrame.
See Also
--------
Series.map: For element-wise operations.
Series.agg: Only perform aggregating type operations.
Series.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
Create a series with typical summer temperatures for each city.
>>> s = pd.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> s.apply(lambda x: x ** 2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
return SeriesApply(self, func, convert_dtype, args, kwargs).apply()
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform a reduction operation.
If we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object.
"""
delegate = self._values
if axis is not None:
self._get_axis_number(axis)
if isinstance(delegate, ExtensionArray):
# dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
else:
# dispatch to numpy arrays
if numeric_only:
kwd_name = "numeric_only"
if name in ["any", "all"]:
kwd_name = "bool_only"
raise NotImplementedError(
f"Series.{name} does not implement {kwd_name}."
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
def _reindex_indexer(
self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool
) -> Series:
# Note: new_index is None iff indexer is None
# if not None, indexer is np.intp
if indexer is None:
if copy:
return self.copy()
return self
new_values = algorithms.take_nd(
self._values, indexer, allow_fill=True, fill_value=None
)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level) -> bool:
"""
Check if we do need a multi reindex; this is for compat with
higher dims.
"""
return False
# error: Cannot determine type of 'align'
@doc(
NDFrame.align, # type: ignore[has-type]
klass=_shared_doc_kwargs["klass"],
axes_single_arg=_shared_doc_kwargs["axes_single_arg"],
)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def rename(
self,
index=None,
*,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
axis : {0 or "index"}
Unused. Accepted for compatibility with DataFrame method only.
index : scalar, hashable sequence, dict-like or function, optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
**kwargs
Additional keyword arguments passed to the function. Only the
"inplace" keyword is used.
Returns
-------
Series or None
Series with index labels or name altered or None if ``inplace=True``.
See Also
--------
DataFrame.rename : Corresponding DataFrame method.
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
if axis is not None:
# Make sure we raise if an invalid 'axis' is passed.
axis = self._get_axis_number(axis)
if callable(index) or is_dict_like(index):
return super().rename(
index, copy=copy, inplace=inplace, level=level, errors=errors
)
else:
return self._set_name(index, inplace=inplace)
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: Literal[False] = ...
) -> Series:
...
@overload
def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:
...
@overload
def set_axis(self, labels, *, inplace: Literal[True]) -> None:
...
@overload
def set_axis(self, labels, axis: Axis = ..., inplace: bool = ...) -> Series | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
@Appender(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0)
a 1
b 2
c 3
dtype: int64
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub="",
axis_description_sub="",
see_also_sub="",
)
@Appender(generic.NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
# error: Cannot determine type of 'reindex'
@doc(
NDFrame.reindex, # type: ignore[has-type]
klass=_shared_doc_kwargs["klass"],
axes=_shared_doc_kwargs["axes"],
optional_labels=_shared_doc_kwargs["optional_labels"],
optional_axis=_shared_doc_kwargs["optional_axis"],
)
def reindex(self, index=None, **kwargs):
return super().reindex(index=index, **kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
) -> Series:
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
Series or None
Series with specified index labels removed or None if ``inplace=True``.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@overload
def fillna(
self,
value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
limit=...,
downcast=...,
) -> Series:
...
@overload
def fillna(
self,
value,
method: FillnaOptions | None,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
method: FillnaOptions | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
method: FillnaOptions | None,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
*,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
method: FillnaOptions | None,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool = ...,
limit=...,
downcast=...,
) -> Series | None:
...
# error: Cannot determine type of 'fillna'
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
@doc(NDFrame.fillna, **_shared_doc_kwargs) # type: ignore[has-type]
def fillna(
self,
value: object | ArrayLike | None = None,
method: FillnaOptions | None = None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Series | None:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Hashable) -> Any:
"""
Return item and drops from series. Raise KeyError if not found.
Parameters
----------
item : label
Index of the element that needs to be removed.
Returns
-------
Value that is popped from series.
Examples
--------
>>> ser = pd.Series([1,2,3])
>>> ser.pop(0)
1
>>> ser
1 2
2 3
dtype: int64
"""
return super().pop(item=item)
# error: Cannot determine type of 'replace'
@doc(
NDFrame.replace, # type: ignore[has-type]
klass=_shared_doc_kwargs["klass"],
inplace=_shared_doc_kwargs["inplace"],
replace_iloc=_shared_doc_kwargs["replace_iloc"],
)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_single(self, to_replace, method: str, inplace: bool, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
result = self if inplace else self.copy()
values = result._values
mask = missing.mask_missing(values, to_replace)
if isinstance(values, ExtensionArray):
# dispatch to the EA's _pad_mask_inplace method
values._fill_mask_inplace(method, limit, mask)
else:
fill_f = missing.get_fill_func(method)
values, _ = fill_f(values, limit=limit, mask=mask)
if inplace:
return
return result
# error: Cannot determine type of 'shift'
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def memory_usage(self, index: bool = True, deep: bool = False) -> int:
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
244
"""
v = self._memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
def isin(self, values) -> Series:
"""
Whether elements in Series are contained in `values`.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
* If `values` is a string
See Also
--------
DataFrame.isin : Equivalent method on DataFrame.
Examples
--------
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
To invert the boolean values, use the ``~`` operator:
>>> ~s.isin(['cow', 'lama'])
0 False
1 False
2 False
3 True
4 False
5 True
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Strings and integers are distinct and are therefore not comparable:
>>> pd.Series([1]).isin(['1'])
0 False
dtype: bool
>>> pd.Series([1.1]).isin(['1.1'])
0 False
dtype: bool
"""
result = algorithms.isin(self._values, values)
return self._constructor(result, index=self.index).__finalize__(
self, method="isin"
)
def between(self, left, right, inclusive="both") -> Series:
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : {"both", "neither", "left", "right"}
Include boundaries. Whether to set each bound as closed or open.
.. versionchanged:: 1.3.0
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``"neither"`` boundary values are excluded:
>>> s.between(1, 4, inclusive="neither")
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive is True or inclusive is False:
warnings.warn(
"Boolean inputs to the `inclusive` argument are deprecated in "
"favour of `both` or `neither`.",
FutureWarning,
stacklevel=find_stack_level(),
)
if inclusive:
inclusive = "both"
else:
inclusive = "neither"
if inclusive == "both":
lmask = self >= left
rmask = self <= right
elif inclusive == "left":
lmask = self >= left
rmask = self < right
elif inclusive == "right":
lmask = self > left
rmask = self <= right
elif inclusive == "neither":
lmask = self > left
rmask = self < right
else:
raise ValueError(
"Inclusive has to be either string of 'both',"
"'left', 'right', or 'neither'."
)
return lmask & rmask
# ----------------------------------------------------------------------
# Convert to types that support pd.NA
def _convert_dtypes(
self,
infer_objects: bool = True,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Series:
input_series = self
if infer_objects:
input_series = input_series.infer_objects()
if is_object_dtype(input_series):
input_series = input_series.copy()
if convert_string or convert_integer or convert_boolean or convert_floating:
inferred_dtype = convert_dtypes(
input_series._values,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
)
result = input_series.astype(inferred_dtype)
else:
result = input_series.copy()
return result
# error: Cannot determine type of 'isna'
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def isna(self) -> Series:
return generic.NDFrame.isna(self)
# error: Cannot determine type of 'isna'
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def isnull(self) -> Series:
return super().isnull()
# error: Cannot determine type of 'notna'
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def notna(self) -> Series:
return super().notna()
# error: Cannot determine type of 'notna'
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def notnull(self) -> Series:
return super().notnull()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def dropna(self, axis=0, inplace=False, how=None):
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
Returns
-------
Series or None
Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
# ----------------------------------------------------------------------
# Time series-oriented methods
# error: Cannot determine type of 'asfreq'
@doc(NDFrame.asfreq, **_shared_doc_kwargs) # type: ignore[has-type]
def asfreq(
self,
freq,
method=None,
how: str | None = None,
normalize: bool = False,
fill_value=None,
) -> Series:
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
# error: Cannot determine type of 'resample'
@doc(NDFrame.resample, **_shared_doc_kwargs) # type: ignore[has-type]
def resample(
self,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
) -> Resampler:
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
)
def to_timestamp(self, freq=None, how="start", copy=True) -> Series:
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
if not isinstance(self.index, PeriodIndex):
raise TypeError(f"unsupported Type {type(self.index).__name__}")
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="to_timestamp"
)
def to_period(self, freq=None, copy=True) -> Series:
"""
Convert Series from DatetimeIndex to PeriodIndex.
Parameters
----------
freq : str, default None
Frequency associated with the PeriodIndex.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series
Series with index converted to PeriodIndex.
"""
new_values = self._values
if copy:
new_values = new_values.copy()
if not isinstance(self.index, DatetimeIndex):
raise TypeError(f"unsupported Type {type(self.index).__name__}")
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="to_period"
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def ffill(
self: Series,
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
downcast=None,
) -> Series | None:
return super().ffill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def bfill(
self: Series,
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
downcast=None,
) -> Series | None:
return super().bfill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "lower", "upper"]
)
def clip(
self: Series,
lower=None,
upper=None,
axis: Axis | None = None,
inplace: bool = False,
*args,
**kwargs,
) -> Series | None:
return super().clip(lower, upper, axis, inplace, *args, **kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"])
def interpolate(
self: Series,
method: str = "linear",
axis: Axis = 0,
limit: int | None = None,
inplace: bool = False,
limit_direction: str | None = None,
limit_area: str | None = None,
downcast: str | None = None,
**kwargs,
) -> Series | None:
return super().interpolate(
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors=lib.no_default,
try_cast=lib.no_default,
):
return super().where(cond, other, inplace, axis, level, errors, try_cast)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors=lib.no_default,
try_cast=lib.no_default,
):
return super().mask(cond, other, inplace, axis, level, errors, try_cast)
# ----------------------------------------------------------------------
# Add index
_AXIS_ORDERS = ["index"]
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 0
_info_axis_name = "index"
index: Index = properties.AxisProperty(
axis=0, doc="The index (axis labels) of the Series."
)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
str = CachedAccessor("str", StringMethods)
dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
sparse = CachedAccessor("sparse", SparseAccessor)
# ----------------------------------------------------------------------
# Add plotting methods to Series
hist = pandas.plotting.hist_series
# ----------------------------------------------------------------------
# Template-Based Arithmetic/Comparison Methods
def _cmp_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
if isinstance(other, Series) and not self._indexed_same(other):
raise ValueError("Can only compare identically-labeled Series objects")
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
with np.errstate(all="ignore"):
res_values = ops.comparison_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
self, other = ops.align_method_SERIES(self, other, align_asobject=True)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
def _arith_method(self, other, op):
self, other = ops.align_method_SERIES(self, other)
return base.IndexOpsMixin._arith_method(self, other, op)
Series._add_numeric_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series)
| 30.665288 | 88 | 0.53242 |
d0a01b86f1f05022aea614bb4f8412f3ff55628e | 739 | py | Python | Python/creational_patterns/abstract_factory/list_factory/list_factory.py | ploukareas/Design-Patterns | 8effde38d73ae9058c3028c97ef395644a90d55b | [
"BSD-3-Clause",
"MIT"
] | 28 | 2018-09-28T07:45:35.000Z | 2022-02-12T12:25:05.000Z | Python/creational_patterns/abstract_factory/list_factory/list_factory.py | ploukareas/Design-Patterns | 8effde38d73ae9058c3028c97ef395644a90d55b | [
"BSD-3-Clause",
"MIT"
] | null | null | null | Python/creational_patterns/abstract_factory/list_factory/list_factory.py | ploukareas/Design-Patterns | 8effde38d73ae9058c3028c97ef395644a90d55b | [
"BSD-3-Clause",
"MIT"
] | 5 | 2021-05-10T23:19:55.000Z | 2022-03-04T20:26:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ˅
from creational_patterns.abstract_factory.factory.factory import Factory
from creational_patterns.abstract_factory.list_factory.list_data import ListData
from creational_patterns.abstract_factory.list_factory.list_link import ListLink
from creational_patterns.abstract_factory.list_factory.list_page import ListPage
# ˄
class ListFactory(Factory):
# ˅
# ˄
def create_page(self, title, author):
# ˅
return ListPage(title, author)
# ˄
def create_link(self, name, url):
# ˅
return ListLink(name, url)
# ˄
def create_data(self, name):
# ˅
return ListData(name)
# ˄
# ˅
# ˄
# ˅
# ˄
| 18.475 | 80 | 0.644114 |
067fe7cc7eb3536d42b3641880bdaeaa3f6d04d1 | 5,438 | py | Python | src/config/utils/provision_forwarding_mode.py | jnpr-pranav/contrail-controller | 428eee37c28c31830fd764315794e1a6e52720c1 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/config/utils/provision_forwarding_mode.py | jnpr-pranav/contrail-controller | 428eee37c28c31830fd764315794e1a6e52720c1 | [
"Apache-2.0"
] | null | null | null | src/config/utils/provision_forwarding_mode.py | jnpr-pranav/contrail-controller | 428eee37c28c31830fd764315794e1a6e52720c1 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import sys
import argparse
import configparser
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
class ForwardingModeSetup(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._vnc_lib = VncApiAdmin(
self._args.use_admin_api,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
api_server_use_ssl=self._args.api_server_use_ssl)
#import pdb;pdb.set_trace()
vxlan_id = self._args.vxlan_id
vn_name = self._args.vn_name
forwarding_mode = self._args.forwarding_mode
project_fq_name_str = self._args.project_fq_name
project_fq_name = project_fq_name_str.split(':')
#Figure out VN
vni_list = self._vnc_lib.virtual_networks_list(
parent_fq_name = project_fq_name)['virtual-networks']
found = False
for vni_record in vni_list:
if (vni_record['fq_name'][0] == project_fq_name[0] and
vni_record['fq_name'][1] == project_fq_name[1] and
vni_record['fq_name'][2] == vn_name):
vni_obj = self._vnc_lib.virtual_network_read(
id = vni_record['uuid'])
vni_obj_properties = vni_obj.get_virtual_network_properties() or VirtualNetworkType()
if (vxlan_id is not None):
vni_obj_properties.set_vxlan_network_identifier(int(vxlan_id))
if (forwarding_mode is not None):
vni_obj_properties.set_forwarding_mode(forwarding_mode)
vni_obj.set_virtual_network_properties(vni_obj_properties)
self._vnc_lib.virtual_network_update(vni_obj)
found = True
if not found:
print("No Virtual Network %s" %(vn_name))
sys.exit(1)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_forwarding_mode.py
--project_fq_name 'default-domain:admin'
--vn_name vn1
--vxlan_id 100
--forwarding_mode l2
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': 'add',
'control_names': [],
'route_table_name': 'CustomRouteTable',
'project_fq_name' : 'default-domain:admin',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--vn_name", help="VN Name", required=True)
parser.add_argument(
"--project_fq_name", help="Fully qualified name of the Project", required=True)
parser.add_argument(
"--vxlan_id", help="VxLan ID")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--forwarding_mode", help="l2_l3 or l2 only", required=True)
parser.add_argument(
"--admin_tenant_name", help="Tenant to create the forwarding mode")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--api_server_ip", help="IP address of api server")
group.add_argument("--use_admin_api",
default=False,
help = "Connect to local api-server on admin port",
action="store_true")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
# end class ForwardingModeSetup
def main(args_str=None):
ForwardingModeSetup(args_str)
# end main
if __name__ == "__main__":
main()
| 37.246575 | 101 | 0.589187 |
b81701b1193d716f953e7edfe8d856bd136d4c7d | 7,523 | py | Python | 5-1-10/InputText.py | QueenChristina/Calculus-Zombies | 81611015ba505bff6679d029280854e3934fad15 | [
"MIT"
] | 3 | 2019-08-30T02:53:34.000Z | 2022-01-03T07:05:39.000Z | 5-1-10/InputText.py | QueenChristina/Calculus-Zombies | 81611015ba505bff6679d029280854e3934fad15 | [
"MIT"
] | null | null | null | 5-1-10/InputText.py | QueenChristina/Calculus-Zombies | 81611015ba505bff6679d029280854e3934fad15 | [
"MIT"
] | null | null | null | """
Copyright 2017, Silas Gyger, silasgyger@gmail.com, All rights reserved.
Borrowed from https://github.com/Nearoo/pygame-text-input under the MIT license.
"""
import os.path
import pygame, sys
import pygame.locals as pl
from Files import Var
pygame.font.init()
class TextInput:
"""
This class lets the user input a piece of text, e.g. a name or a message.
This class let's the user input a short, one-lines piece of text at a blinking cursor
that can be moved using the arrow-keys. Delete, home and end work as well.
"""
def __init__(
self,
initial_string="",
font_family="",
font_size=35,
antialias=True,
text_color=(0, 0, 0),
cursor_color=(0, 0, 1),
repeat_keys_initial_ms=400,
repeat_keys_interval_ms=35):
"""
:param initial_string: Initial text to be displayed
:param font_family: name or list of names for font (see pygame.font.match_font for precise format)
:param font_size: Size of font in pixels
:param antialias: Determines if antialias is applied to font (uses more processing power)
:param text_color: Color of text (duh)
:param cursor_color: Color of cursor
:param repeat_keys_initial_ms: Time in ms before keys are repeated when held
:param repeat_keys_interval_ms: Interval between key press repetition when helpd
"""
# Text related vars:
self.antialias = antialias
self.text_color = text_color
self.font_size = font_size
self.input_string = initial_string # Inputted text
if not os.path.isfile(font_family):
font_family = pygame.font.match_font(font_family)
self.font_object = pygame.font.Font(font_family, font_size)
# Text-surface will be created during the first update call:
self.surface = pygame.Surface((1, 1))
self.surface.set_alpha(0)
# Vars to make keydowns repeat after user pressed a key for some time:
self.keyrepeat_counters = {} # {event.key: (counter_int, event.unicode)} (look for "***")
self.keyrepeat_intial_interval_ms = repeat_keys_initial_ms
self.keyrepeat_interval_ms = repeat_keys_interval_ms
# Things cursor:
self.cursor_surface = pygame.Surface((int(self.font_size/20+1), self.font_size))
self.cursor_surface.fill(cursor_color)
self.cursor_position = len(initial_string) # Inside text
self.cursor_visible = True # Switches every self.cursor_switch_ms ms
self.cursor_switch_ms = 500 # /|\
self.cursor_ms_counter = 0
self.clock = pygame.time.Clock()
def update(self, events):
for event in events:
if event.type == pygame.QUIT:
RUNNING = False
pygame.quit()
sys.exit()
Var.keyENTER = False
if event.type == pygame.KEYDOWN:
self.cursor_visible = True # So the user sees where he writes
# If none exist, create counter for that key:
if event.key not in self.keyrepeat_counters:
self.keyrepeat_counters[event.key] = [0, event.unicode]
if event.key == pl.K_BACKSPACE:
self.input_string = (
self.input_string[:max(self.cursor_position - 1, 0)]
+ self.input_string[self.cursor_position:]
)
# Subtract one from cursor_pos, but do not go below zero:
self.cursor_position = max(self.cursor_position - 1, 0)
elif event.key == pl.K_DELETE:
self.input_string = (
self.input_string[:self.cursor_position]
+ self.input_string[self.cursor_position + 1:]
)
elif event.key == pl.K_RETURN:
Var.keyENTER = True
# return True
elif event.key == pl.K_RIGHT:
# Add one to cursor_pos, but do not exceed len(input_string)
self.cursor_position = min(self.cursor_position + 1, len(self.input_string))
elif event.key == pl.K_LEFT:
# Subtract one from cursor_pos, but do not go below zero:
self.cursor_position = max(self.cursor_position - 1, 0)
elif event.key == pl.K_END:
self.cursor_position = len(self.input_string)
elif event.key == pl.K_HOME:
self.cursor_position = 0
else:
# If no special key is pressed, add unicode of key to input_string
self.input_string = (
self.input_string[:self.cursor_position]
+ event.unicode
+ self.input_string[self.cursor_position:]
)
self.cursor_position += len(event.unicode) # Some are empty, e.g. K_UP
elif event.type == pl.KEYUP:
# *** Because KEYUP doesn't include event.unicode, this dict is stored in such a weird way
if event.key in self.keyrepeat_counters:
del self.keyrepeat_counters[event.key]
# Update key counters:
for key in self.keyrepeat_counters:
self.keyrepeat_counters[key][0] += self.clock.get_time() # Update clock
# Generate new key events if enough time has passed:
if self.keyrepeat_counters[key][0] >= self.keyrepeat_intial_interval_ms:
self.keyrepeat_counters[key][0] = (
self.keyrepeat_intial_interval_ms
- self.keyrepeat_interval_ms
)
event_key, event_unicode = key, self.keyrepeat_counters[key][1]
pygame.event.post(pygame.event.Event(pl.KEYDOWN, key=event_key, unicode=event_unicode))
# Re-render text surface:
self.surface = self.font_object.render(self.input_string, self.antialias, self.text_color)
# Update self.cursor_visible
self.cursor_ms_counter += self.clock.get_time()
if self.cursor_ms_counter >= self.cursor_switch_ms:
self.cursor_ms_counter %= self.cursor_switch_ms
self.cursor_visible = not self.cursor_visible
if self.cursor_visible:
cursor_y_pos = self.font_object.size(self.input_string[:self.cursor_position])[0]
# Without this, the cursor is invisible when self.cursor_position > 0:
if self.cursor_position > 0:
cursor_y_pos -= self.cursor_surface.get_width()
self.surface.blit(self.cursor_surface, (cursor_y_pos, 0))
self.clock.tick()
return False
def get_surface(self):
return self.surface
def get_text(self):
return self.input_string
def get_cursor_position(self):
return self.cursor_position
def set_text_color(self, color):
self.text_color = color
def set_cursor_color(self, color):
self.cursor_surface.fill(color)
def clear_text(self):
self.input_string = ""
self.cursor_position = 0
| 40.229947 | 107 | 0.580752 |
ea497a584fb9d46a73e6e039c1e8b561461f39ba | 2,233 | py | Python | sendowl_python/sendowl.py | aaron235/sendowl-python | a519431d88359f2147dc5288cf78ab579fc4a5d7 | [
"MIT"
] | null | null | null | sendowl_python/sendowl.py | aaron235/sendowl-python | a519431d88359f2147dc5288cf78ab579fc4a5d7 | [
"MIT"
] | null | null | null | sendowl_python/sendowl.py | aaron235/sendowl-python | a519431d88359f2147dc5288cf78ab579fc4a5d7 | [
"MIT"
] | null | null | null | import requests
import json
from typing import List, Dict, Union
class SendOwl:
def __init__(self, key: str = None, secret: str = None):
self._key = key
self._secret = secret
self._baseUrl = f"https://{self._key}:{self._secret}@www.sendowl.com/api/v1/"
def _apiRequest(self, method: str, path: str, params: Dict[str, str] = None) -> Union[List, Dict, None]:
headers = {'Accept': 'application/json'}
method = method.lower()
url = f'{self._baseUrl}{path}'
if method == 'get':
if params:
req = requests.get(url, params=params, headers=headers)
else:
req = requests.get(url, headers=headers)
elif method == 'post':
req = requests.post(url, data=params, headers=headers)
elif method == 'put':
req = requests.put(url, data=params, headers=headers)
elif method == 'delete':
req = requests.delete(url, headers=headers)
else:
raise ValueError(f'Invalid request method "{method}"')
if req.text:
try:
return req.json()
except json.JSONDecodeError:
raise RuntimeError(f'Response returned from SendOwl: {req.text}')
else:
return None
def get_products(self) -> Union[List, Dict]:
return self._apiRequest('get', 'products')
def search_products(self, term: str) -> Union[List, Dict]:
return self._apiRequest('get', 'products/search', {'term': term})
def shopify_lookup_product(self, variant_id: str) -> Union[List, Dict]:
return self._apiRequest('get', 'products/shopify_lookup', {'variant_id': variant_id})
def get_product(self, product_id: Union[int, str]) -> Union[List, Dict]:
return self._apiRequest('get', f'products/{product_id}')
def new_product(self, product: Dict) -> Union[List, Dict]:
return self._apiRequest('post', 'products', product)
def update_product(self, product_id: Union[int, str], product_updates: Dict) -> Union[List, Dict]:
return self._apiRequest('put', f'products/{product_id}', product_updates)
def delete_product(self, product_id: Union[int, str]) -> Union[List, Dict]:
return self._apiRequest('delete', f'products/{product_id}')
def issue_product_order(self, product_id: Union[int, str], issue_details: Dict) -> Union[List, Dict]:
return self._apiRequest('post', f'products/{product_id}/issue', issue_details)
| 35.444444 | 105 | 0.699955 |
9f03c13f8b87cfd822a8600282299a86e0a41926 | 26,755 | py | Python | nova/tests/test_quantum.py | bopopescu/openstack-12 | 2c7e0d1e63cae7aaa38095439843c9a2abb0382b | [
"Apache-2.0"
] | null | null | null | nova/tests/test_quantum.py | bopopescu/openstack-12 | 2c7e0d1e63cae7aaa38095439843c9a2abb0382b | [
"Apache-2.0"
] | null | null | null | nova/tests/test_quantum.py | bopopescu/openstack-12 | 2c7e0d1e63cae7aaa38095439843c9a2abb0382b | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011,2012 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import session as sql_session
from nova import exception
from nova import flags
from nova import log as logging
from nova.network.quantum import client as quantum_client
from nova.network.quantum import fake_client
from nova.network.quantum import manager as quantum_manager
from nova.network.quantum import quantum_connection
from nova.network.quantum import melange_connection
from nova.network.quantum import melange_ipam_lib
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
networks = [{'label': 'project1-net1',
'injected': False,
'multi_host': False,
'cidr': '100.168.0.0/24',
'cidr_v6': '100:1db8::/64',
'gateway_v6': '100:1db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '100.168.0.1',
'broadcast': '100.168.0.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'vpn_public_address': None,
'project_id': 'fake_project1',
'priority': 1},
{'label': 'project2-net1',
'injected': False,
'multi_host': False,
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'project_id': 'fake_project2',
'priority': 1},
{'label': "public",
'injected': False,
'multi_host': False,
'cidr': '102.0.0.0/24',
'cidr_v6': '102:1dba::/64',
'gateway_v6': '102:1dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '102.0.0.1',
'broadcast': '102.0.0.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'project_id': None,
'priority': 0},
{'label': "project2-net2",
'injected': False,
'multi_host': False,
'cidr': '103.0.0.0/24',
'cidr_v6': '103:1dbb::/64',
'gateway_v6': '103:1dbb::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '103.0.0.1',
'broadcast': '103.0.0.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'project_id': "fake_project2",
'priority': 2}]
class QuantumConnectionTestCase(test.TestCase):
def test_connection(self):
fc = fake_client.FakeClient(LOG)
qc = quantum_connection.QuantumClientConnection(client=fc)
t = "tenant1"
net1_name = "net1"
net1_uuid = qc.create_network(t, net1_name)
self.assertEquals(net1_name, qc.get_network_name(t, net1_uuid))
self.assertTrue(qc.network_exists(t, net1_uuid))
self.assertFalse(qc.network_exists(t, "fake-uuid"))
self.assertFalse(qc.network_exists("fake-tenant", net1_uuid))
nets = qc.get_networks(t)['networks']
self.assertEquals(len(nets), 1)
self.assertEquals(nets[0]['id'], net1_uuid)
num_ports = 10
for i in range(0, num_ports):
qc.create_and_attach_port(t, net1_uuid,
'iface' + str(i), state='ACTIVE')
self.assertEquals(len(qc.get_attached_ports(t, net1_uuid)), num_ports)
for i in range(0, num_ports):
port_uuid = qc.get_port_by_attachment(t, net1_uuid,
'iface' + str(i))
self.assertTrue(port_uuid)
qc.detach_and_delete_port(t, net1_uuid, port_uuid)
self.assertEquals(len(qc.get_attached_ports(t, net1_uuid)), 0)
# test port not found
qc.create_and_attach_port(t, net1_uuid, 'foo', state='ACTIVE')
port_uuid = qc.get_port_by_attachment(t, net1_uuid, 'foo')
qc.detach_and_delete_port(t, net1_uuid, port_uuid)
self.assertRaises(quantum_client.QuantumNotFoundException,
qc.detach_and_delete_port, t,
net1_uuid, port_uuid)
qc.delete_network(t, net1_uuid)
self.assertFalse(qc.network_exists(t, net1_uuid))
self.assertEquals(len(qc.get_networks(t)['networks']), 0)
self.assertRaises(quantum_client.QuantumNotFoundException,
qc.get_network_name, t, net1_uuid)
# this is a base class to be used by other QuantumManager Test classes
class QuantumNovaTestCase(test.TestCase):
def setUp(self):
super(QuantumNovaTestCase, self).setUp()
self.flags(quantum_use_dhcp=True)
self.flags(l3_lib="nova.network.l3.LinuxNetL3")
linuxdrv = "nova.network.linux_net.LinuxOVSInterfaceDriver"
self.flags(linuxnet_interface_driver=linuxdrv)
fc = fake_client.FakeClient(LOG)
qc = quantum_connection.QuantumClientConnection(client=fc)
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=qc)
def func(arg1, arg2):
pass
def func2(arg1, arg2, arg3):
pass
def func1(arg1):
pass
self.net_man.driver.update_dhcp_hostfile_with_text = func
self.net_man.driver.restart_dhcp = func2
self.net_man.driver.kill_dhcp = func1
# Tests seem to create some networks by default, which
# we don't want. So we delete them.
ctx = context.RequestContext('user1', 'fake_project1').elevated()
for n in db.network_get_all(ctx):
db.network_delete_safe(ctx, n['id'])
# Other unit tests (e.g., test_compute.py) have a nasty
# habit of of creating fixed IPs and not cleaning up, which
# can confuse these tests, so we remove all existing fixed
# ips before starting.
session = sql_session.get_session()
result = session.query(models.FixedIp).all()
with session.begin():
for fip_ref in result:
session.delete(fip_ref)
self.net_man.init_host()
def _create_network(self, n):
ctx = context.RequestContext('user1', n['project_id'])
nwks = self.net_man.create_networks(
ctx,
label=n['label'], cidr=n['cidr'],
multi_host=n['multi_host'],
num_networks=1, network_size=256,
cidr_v6=n['cidr_v6'],
gateway=n['gateway'],
gateway_v6=n['gateway_v6'], bridge=None,
bridge_interface=None, dns1=n['dns1'],
project_id=n['project_id'],
priority=n['priority'])
n['uuid'] = nwks[0]['uuid']
class QuantumAllocationTestCase(QuantumNovaTestCase):
def test_get_network_in_db(self):
context = self.mox.CreateMockAnything()
context.elevated().AndReturn('elevated')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.net_man.context = context
db.network_get_by_uuid('elevated', 'quantum_net_id').AndReturn(
{'uuid': 1})
self.mox.ReplayAll()
network = self.net_man.get_network(context, ('quantum_net_id',
'net_tenant_id'))
self.assertEquals(network['quantum_net_id'], 'quantum_net_id')
self.assertEquals(network['uuid'], 1)
def test_get_network_not_in_db(self):
context = self.mox.CreateMockAnything()
context.elevated().AndReturn('elevated')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.net_man.context = context
db.network_get_by_uuid('elevated', 'quantum_net_id').AndReturn(None)
self.mox.ReplayAll()
network = self.net_man.get_network(context, ('quantum_net_id',
'net_tenant_id'))
self.assertEquals(network['quantum_net_id'], 'quantum_net_id')
self.assertEquals(network['uuid'], 'quantum_net_id')
class QuantumDeallocationTestCase(QuantumNovaTestCase):
def test_deallocate_port(self):
quantum = self.mox.CreateMock(
quantum_connection.QuantumClientConnection)
quantum.get_port_by_attachment('q_tenant_id', 'net_id',
'interface_id').AndReturn('port_id')
quantum.detach_and_delete_port('q_tenant_id', 'net_id', 'port_id')
self.net_man.q_conn = quantum
self.mox.ReplayAll()
self.net_man.deallocate_port('interface_id', 'net_id', 'q_tenant_id',
'instance_id')
def test_deallocate_port_logs_error(self):
quantum = self.mox.CreateMock(
quantum_connection.QuantumClientConnection)
quantum.get_port_by_attachment('q_tenant_id', 'net_id',
'interface_id').AndRaise(Exception)
self.net_man.q_conn = quantum
self.mox.StubOutWithMock(quantum_manager.LOG, 'exception')
quantum_manager.LOG.exception(mox.Regex(r'port deallocation failed'))
self.mox.ReplayAll()
self.net_man.deallocate_port('interface_id', 'net_id', 'q_tenant_id',
'instance_id')
def test_deallocate_ip_address(self):
ipam = self.mox.CreateMock(melange_ipam_lib.QuantumMelangeIPAMLib)
ipam.get_tenant_id_by_net_id('context', 'net_id', {'uuid': 1},
'project_id').AndReturn('ipam_tenant_id')
self.net_man.ipam = ipam
self.mox.ReplayAll()
self.net_man.deallocate_ip_address('context', 'net_id', 'project_id',
{'uuid': 1}, 'instance_id')
def test_deallocate_ip_address(self):
ipam = self.mox.CreateMock(melange_ipam_lib.QuantumMelangeIPAMLib)
ipam.get_tenant_id_by_net_id('context', 'net_id', {'uuid': 1},
'project_id').AndRaise(Exception())
self.net_man.ipam = ipam
self.mox.StubOutWithMock(quantum_manager.LOG, 'exception')
quantum_manager.LOG.exception(mox.Regex(r'ipam deallocation failed'))
self.mox.ReplayAll()
self.net_man.deallocate_ip_address('context', 'net_id', 'project_id',
{'uuid': 1}, 'instance_id')
class QuantumManagerTestCase(QuantumNovaTestCase):
def test_create_and_delete_nets(self):
self._create_nets()
self._delete_nets()
def _create_nets(self):
for n in networks:
self._create_network(n)
def _delete_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
self.net_man.delete_network(ctx, None, n['uuid'])
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, ctx.elevated())
def _validate_nw_info(self, nw_info, expected_net_labels):
self.assertEquals(len(nw_info), len(expected_net_labels))
ctx = context.RequestContext('user1', 'foo').elevated()
all_net_map = {}
for n in db.network_get_all(ctx):
all_net_map[n['label']] = n
for i in range(0, len(nw_info)):
vif = nw_info[i]
net = all_net_map[expected_net_labels[i]]
# simple test assumes that each starting prefix is unique
expected_v4_cidr_start = net['cidr'].split(".")[0].lower()
expected_v6_cidr_start = net['cidr_v6'].split(":")[0].lower()
for subnet in vif['network']['subnets']:
addr = subnet['ips'][0]['address']
if subnet['version'] == 4:
address_start = addr.split(".")[0].lower()
self.assertTrue(expected_v4_cidr_start, address_start)
else:
address_start = addr.split(":")[0].lower()
self.assertTrue(expected_v6_cidr_start, address_start)
# confirm that there is a DHCP device on corresponding net
for l in expected_net_labels:
n = all_net_map[l]
tenant_id = (n['project_id'] or
FLAGS.quantum_default_tenant_id)
ports = self.net_man.q_conn.get_attached_ports(
tenant_id, n['uuid'])
self.assertEquals(len(ports), 2) # gw + instance VIF
# make sure we aren't allowed to delete network with
# active port
self.assertRaises(exception.NetworkBusy,
self.net_man.delete_network,
ctx, None, n['uuid'])
def _check_vifs(self, expect_num_vifs):
ctx = context.RequestContext('user1', "").elevated()
self.assertEqual(len(db.virtual_interface_get_all(ctx)),
expect_num_vifs)
def _allocate_and_deallocate_instance(self, project_id, requested_networks,
expected_labels):
ctx = context.RequestContext('user1', project_id)
self._check_vifs(0)
instance_ref = db.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx.elevated(),
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self._check_vifs(len(nw_info))
self._validate_nw_info(nw_info, expected_labels)
nw_info = self.net_man.get_instance_nw_info(ctx, instance_ref['id'],
instance_ref['uuid'],
instance_ref['instance_type_id'], "",
project_id=project_id)
self._check_vifs(len(nw_info))
self._validate_nw_info(nw_info, expected_labels)
port_net_pairs = []
for vif in nw_info:
nid = vif['network']['id']
pid = self.net_man.q_conn.get_port_by_attachment(
project_id, nid, vif['id'])
if pid is None:
pid = self.net_man.q_conn.get_port_by_attachment(
FLAGS.quantum_default_tenant_id,
nid, vif['id'])
self.assertTrue(pid is not None)
port_net_pairs.append((pid, nid))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
for pid, nid in port_net_pairs:
self.assertRaises(quantum_client.QuantumNotFoundException,
self.net_man.q_conn.detach_and_delete_port,
project_id, nid, pid)
self.assertRaises(quantum_client.QuantumNotFoundException,
self.net_man.q_conn.detach_and_delete_port,
FLAGS.quantum_default_tenant_id, nid, pid)
self._check_vifs(0)
def test_allocate_and_deallocate_instance_static(self):
self._create_nets()
self._allocate_and_deallocate_instance("fake_project1", None,
['public', 'project1-net1'])
self._delete_nets()
def test_allocate_and_deallocate_instance_dynamic(self):
self._create_nets()
project_id = "fake_project2"
ctx = context.RequestContext('user1', project_id)
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
self.net_man.validate_networks(ctx, requested_networks)
label_map = {}
for n in db.network_get_all(ctx.elevated()):
label_map[n['uuid']] = n['label']
expected_labels = [label_map[uid] for uid, _i in requested_networks]
self._allocate_and_deallocate_instance(project_id, requested_networks,
expected_labels)
self._delete_nets()
def test_validate_bad_network(self):
ctx = context.RequestContext('user1', 'fake_project1')
self.assertRaises(exception.NetworkNotFound,
self.net_man.validate_networks, ctx, [("", None)])
def test_create_net_external_uuid(self):
"""Tests use case where network can be created directly via
Quantum API, then the UUID is passed in via nova-manage"""
project_id = "foo_project"
ctx = context.RequestContext('user1', project_id)
net_id = self.net_man.q_conn.create_network(project_id, 'net1')
self.net_man.create_networks(
ctx,
label='achtungbaby',
cidr="9.9.9.0/24",
multi_host=False,
num_networks=1,
network_size=256,
cidr_v6=None,
gateway="9.9.9.1",
gateway_v6=None,
bridge=None,
bridge_interface=None,
dns1="8.8.8.8",
project_id=project_id,
priority=9,
uuid=net_id)
net = db.network_get_by_uuid(ctx.elevated(), net_id)
self.assertTrue(net is not None)
self.assertEquals(net['uuid'], net_id)
def test_create_net_external_uuid_and_host_is_set(self):
"""Make sure network['host'] is set when creating a network via the
network manager"""
project_id = "foo_project"
ctx = context.RequestContext('user1', project_id)
net_id = self.net_man.q_conn.create_network(project_id, 'net2')
self.net_man.create_networks(
ctx, label='achtungbaby2', cidr="9.9.8.0/24", multi_host=False,
num_networks=1, network_size=256, cidr_v6=None,
gateway="9.9.8.1", gateway_v6=None, bridge=None,
bridge_interface=None, dns1="8.8.8.8", project_id=project_id,
priority=8, uuid=net_id)
net = db.network_get_by_uuid(ctx.elevated(), net_id)
self.assertTrue(net is not None)
self.assertEquals(net['uuid'], net_id)
self.assertTrue(net['host'] != None)
class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
def test_local_mac_address_creation(self):
self.flags(use_melange_mac_generation=False)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(utils, "generate_mac_address",
lambda: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
def test_melange_mac_address_creation(self):
self.flags(use_melange_mac_generation=True)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
def test_port_securty(self):
self.flags(use_melange_mac_generation=True)
self.flags(quantum_use_port_security=True)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
oldfunc = self.net_man.q_conn.create_and_attach_port
# Make sure we get the appropriate mac set in allowed_address_pairs
# if port security is enabled.
def _instrumented_create_and_attach_port(tenant_id, net_id,
interface_id, **kwargs):
self.assertTrue('allowed_address_pairs' in kwargs.keys())
pairs = kwargs['allowed_address_pairs']
self.assertTrue(pairs[0]['mac_address'] == fake_mac)
self.net_man.q_conn.create_and_attach_port = oldfunc
return oldfunc(tenant_id, net_id, interface_id, **kwargs)
_port_attach = _instrumented_create_and_attach_port
self.net_man.q_conn.create_and_attach_port = _port_attach
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
def test_port_securty_negative(self):
self.flags(use_melange_mac_generation=True)
self.flags(quantum_use_port_security=False)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
oldfunc = self.net_man.q_conn.create_and_attach_port
# Make sure no pairs are passed in if port security is turned off
def _instrumented_create_and_attach_port(tenant_id, net_id,
interface_id, **kwargs):
self.assertTrue('allowed_address_pairs' in kwargs.keys())
pairs = kwargs['allowed_address_pairs']
self.assertTrue(len(pairs) == 0)
self.net_man.q_conn.create_and_attach_port = oldfunc
return oldfunc(tenant_id, net_id, interface_id, **kwargs)
_port_attach = _instrumented_create_and_attach_port
self.net_man.q_conn.create_and_attach_port = _port_attach
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
class QuantumMelangeTestCase(test.TestCase):
def setUp(self):
super(QuantumMelangeTestCase, self).setUp()
fc = fake_client.FakeClient(LOG)
qc = quantum_connection.QuantumClientConnection(client=fc)
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=qc)
def test_get_instance_uuids_by_ip_filter(self):
fake_context = context.RequestContext('user', 'project')
address = '1.2.3.4'
filters = {'ip': address}
self.net_man.ipam = self.mox.CreateMockAnything()
self.net_man.ipam.get_instance_ids_by_ip_address(fake_context,
address).AndReturn(['instance_id'])
instance = self.mox.CreateMockAnything()
instance.uuid = 'instance_uuid'
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(fake_context, 'instance_id').AndReturn(instance)
self.mox.ReplayAll()
uuids = self.net_man.get_instance_uuids_by_ip_filter(fake_context,
filters)
self.assertEquals(uuids, [{'instance_uuid':'instance_uuid'}])
| 41.28858 | 79 | 0.590768 |
4609f9fa98117f60d60b540f91bc187a3d28419b | 28,739 | py | Python | gridpath/project/operations/fuel_burn.py | souissim/gridpath | 4eeca2be24b485edc56026e38cfda83f4a6b27ea | [
"Apache-2.0"
] | null | null | null | gridpath/project/operations/fuel_burn.py | souissim/gridpath | 4eeca2be24b485edc56026e38cfda83f4a6b27ea | [
"Apache-2.0"
] | null | null | null | gridpath/project/operations/fuel_burn.py | souissim/gridpath | 4eeca2be24b485edc56026e38cfda83f4a6b27ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module keeps track of fuel burn for each project. Fuel burn consists of
both operational fuel burn for power production, and startup fuel burn (if
applicable).
"""
import csv
import os.path
from pyomo.environ import (
Set,
Param,
Var,
Expression,
Constraint,
NonNegativeReals,
PercentFraction,
value,
)
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.db_interface import setup_results_import
from gridpath.auxiliary.auxiliary import get_required_subtype_modules_from_projects_file
from gridpath.project.operations.common_functions import load_operational_type_modules
import gridpath.project.operations.operational_types as op_type_init
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Sets |
+=========================================================================+
| | :code:`FUEL_PRJ_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which a fuel is specified and |
| their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`HR_CURVE_PRJS_OPR_TMPS_SGMS` |
| |
| The three-dimensional set of projects for which a heat rate curve is |
| specified along with the heat rate curve segments and the project |
| operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`HR_CURVE_PRJS_OPR_TMPS` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which a heat rate curve is |
| specified along with their operational timepoints. |
+-------------------------------------------------------------------------+
| | :code:`STARTUP_FUEL_PRJ_OPR_TMPS` |
| | *Within*: :code:`FUEL_PRJ_OPR_TMPS` |
| |
| The two-dimensional set of projects for which startup fuel burn is |
| specified and their operational timepoints. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Variables |
+=========================================================================+
| | :code:`HR_Curve_Prj_Fuel_Burn` |
| | *Defined over*: :code:`HR_CURVE_PRJS_OPR_TMPS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| Fuel burn in each operational timepoint of projects with a heat rate |
| curve. |
+-------------------------------------------------------------------------+
| | :code:`Project_Opr_Fuel_Burn_by_Fuel` |
| | *Defined over*: :code:`FUEL_PRJS_FUEL_OPR_TMPS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| Fuel burn by fuel in each operational timepoint of each fuel project. |
+-------------------------------------------------------------------------+
| | :code:`Project_Startup_Fuel_Burn_by_Fuel` |
| | *Defined over*: :code:`FUEL_PRJS_FUEL_OPR_TMPS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| Startup fuel burn by fuel in each operational timepoint of each startup |
| fuel project. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Expressions |
+=========================================================================+
| | :code:`Operations_Fuel_Burn_MMBtu` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| This expression describes each project's operational fuel consumption |
| (in MMBtu) in all operational timepoints. We obtain it by calling the |
| *fuel_burn_rule* method in the relevant *operational_type*. This does |
| not include fuel burn for startups, which has a separate expression. |
+-------------------------------------------------------------------------+
| | :code:`Startup_Fuel_Burn_MMBtu` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| This expression describes each project's startup fuel consumption |
| (in MMBtu) in all operational timepoints. We obtain it by calling the |
| *startup_fuel_burn_rule* method in the relevant *operational_type*. |
| Only operational types with commitment variables can have startup fuel |
| burn (for others it will always return zero). |
+-------------------------------------------------------------------------+
| | :code:`Total_Fuel_Burn_by_Fuel_MMBtu` |
| | *Within*: :code:`PRJ_OPR_TMPS` |
| |
| Total fuel burn is the sum of operational fuel burn for power |
| production and startup fuel burn (by fuel). |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Constraints |
+=========================================================================+
| | :code:`HR_Curve_Prj_Fuel_Burn_Constraint` |
| | *Defined over*: :code:`HR_CURVE_PRJS_OPR_TMPS_SGMS` |
| |
| Determines fuel burn from the project in each timepoint based on its |
| heat rate curve. |
+-------------------------------------------------------------------------+
| | :code:`Fuel_Blending_Opr_Fuel_Burn_Constraint` |
| | *Defined over*: :code:`FUEL_PRJ_OPR_TMPS` |
| |
| The sum of operations fuel burn across all fuels should equal the total |
| operations fuel burn. |
+-------------------------------------------------------------------------+
| | :code:`Fuel_Blending_Startup_Fuel_Burn_Constraint` |
| | *Defined over*: :code:`STARTUP_FUEL_PRJ_OPR_TMPS` |
| |
| The sum of startup fuel burn across all fuels should equal the total |
| operations fuel burn. |
+-------------------------------------------------------------------------+
"""
# Dynamic Inputs
###########################################################################
required_operational_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
which_type="operational_type",
)
imported_operational_modules = load_operational_type_modules(
required_operational_modules
)
# Sets
###########################################################################
m.FUEL_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.PRJ_OPR_TMPS,
initialize=lambda mod: [
(p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS if p in mod.FUEL_PRJS
],
)
m.FUEL_PRJS_FUEL_OPR_TMPS = Set(
dimen=3,
initialize=lambda mod: set(
(g, f, tmp)
for (g, tmp) in mod.FUEL_PRJ_OPR_TMPS
for _g, f in mod.FUEL_PRJ_FUELS
if g == _g
),
)
m.FUEL_PRJS_FUEL_GROUP_OPR_TMPS = Set(
dimen=3,
initialize=lambda mod: set(
(g, fg, tmp)
for (g, tmp) in mod.FUEL_PRJ_OPR_TMPS
for _g, fg, f in mod.FUEL_PRJ_FUELS_FUEL_GROUP
if g == _g
),
)
m.HR_CURVE_PRJS_OPR_TMPS_SGMS = Set(
dimen=3,
initialize=lambda mod: set(
(g, tmp, s)
for (g, tmp) in mod.PRJ_OPR_TMPS
for _g, p, s in mod.HR_CURVE_PRJS_PRDS_SGMS
if g == _g and mod.period[tmp] == p
),
)
m.HR_CURVE_PRJS_OPR_TMPS = Set(
dimen=2,
within=m.FUEL_PRJ_OPR_TMPS,
initialize=lambda mod: set(
(g, tmp) for (g, tmp, s) in mod.HR_CURVE_PRJS_OPR_TMPS_SGMS
),
)
m.STARTUP_FUEL_PRJ_OPR_TMPS = Set(
dimen=2,
within=m.FUEL_PRJ_OPR_TMPS,
initialize=lambda mod: [
(p, tmp) for (p, tmp) in mod.FUEL_PRJ_OPR_TMPS if p in mod.STARTUP_FUEL_PRJS
],
)
m.STARTUP_FUEL_PRJS_FUEL_OPR_TMPS = Set(
dimen=3,
initialize=lambda mod: set(
(g, f, tmp)
for (g, tmp) in mod.STARTUP_FUEL_PRJ_OPR_TMPS
for _g, f in mod.FUEL_PRJ_FUELS
if g == _g
),
)
# Params
m.min_fraction_in_fuel_blend = Param(
m.FUEL_PRJ_FUELS, within=PercentFraction, default=0
)
m.max_fraction_in_fuel_blend = Param(
m.FUEL_PRJ_FUELS, within=PercentFraction, default=1
)
# Variables
###########################################################################
m.HR_Curve_Prj_Fuel_Burn = Var(m.HR_CURVE_PRJS_OPR_TMPS, within=NonNegativeReals)
m.Project_Opr_Fuel_Burn_by_Fuel = Var(
m.FUEL_PRJS_FUEL_OPR_TMPS, within=NonNegativeReals
)
m.Project_Startup_Fuel_Burn_by_Fuel = Var(
m.STARTUP_FUEL_PRJS_FUEL_OPR_TMPS, within=NonNegativeReals
)
m.Project_Fuel_Contribution_by_Fuel = Var(
m.FUEL_PRJS_FUEL_OPR_TMPS, within=NonNegativeReals
)
# Expressions
###########################################################################
def fuel_burn_rule(mod, prj, tmp):
"""
Emissions from each project based on operational type
(and whether a project burns fuel)
"""
op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[op_type], "fuel_burn_rule"):
fuel_burn_simple = imported_operational_modules[op_type].fuel_burn_rule(
mod, prj, tmp
)
else:
fuel_burn_simple = op_type_init.fuel_burn_rule(mod, prj, tmp)
return fuel_burn_simple + (
mod.HR_Curve_Prj_Fuel_Burn[prj, tmp] if prj in mod.HR_CURVE_PRJS else 0
)
m.Operations_Fuel_Burn_MMBtu = Expression(m.FUEL_PRJ_OPR_TMPS, rule=fuel_burn_rule)
def startup_fuel_burn_rule(mod, prj, tmp):
"""
Startup fuel burn is defined for some operational types while
they are zero for others. Get the appropriate expression for each
generator based on its operational type.
"""
op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[op_type], "startup_fuel_burn_rule"):
return imported_operational_modules[op_type].startup_fuel_burn_rule(
mod, prj, tmp
)
else:
return op_type_init.startup_fuel_burn_rule(mod, prj, tmp)
m.Startup_Fuel_Burn_MMBtu = Expression(
m.STARTUP_FUEL_PRJ_OPR_TMPS, rule=startup_fuel_burn_rule
)
def total_fuel_burn_by_fuel_rule(mod, g, f, tmp):
"""
*Expression Name*: :code:`Total_Fuel_Burn_by_Fuel_MMBtu`
*Defined Over*: :code:`PRJ_OPR_TMPS`
Total fuel burn is the sum of operational fuel burn (power production)
and startup fuel burn.
"""
return mod.Project_Opr_Fuel_Burn_by_Fuel[g, f, tmp] + (
mod.Project_Startup_Fuel_Burn_by_Fuel[g, f, tmp]
if g in mod.STARTUP_FUEL_PRJS
else 0
)
m.Total_Fuel_Burn_by_Fuel_MMBtu = Expression(
m.FUEL_PRJS_FUEL_OPR_TMPS, rule=total_fuel_burn_by_fuel_rule
)
def fuel_contribution_rule(mod, prj, tmp):
"""
Fuel contribution from each fuel project based on operational type.
"""
op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[op_type], "fuel_contribution_rule"):
fuel_contribution = imported_operational_modules[
op_type
].fuel_contribution_rule(mod, prj, tmp)
else:
fuel_contribution = op_type_init.fuel_contribution_rule(mod, prj, tmp)
return fuel_contribution
m.Fuel_Contribution_FuelUnit = Expression(
m.FUEL_PRJ_OPR_TMPS, rule=fuel_contribution_rule
)
# Fuel groups by project; we put limits on total fuel burn for grouped fuels
def opr_fuel_burn_by_fuel_group_rule(mod, g, fg, tmp):
"""
*Expression Name*: :code:`Opr_Fuel_Burn_by_Fuel_Group_MMBtu`
*Defined Over*: :code:`FUEL_PRJS_FUEL_GROUP_OPR_TMPS`
Operating fuel burn per fuel group is the sum of operating fuel burn by fuel group.
"""
return sum(
mod.Project_Opr_Fuel_Burn_by_Fuel[g, f, tmp]
for (_g, _fg, f) in mod.FUEL_PRJ_FUELS_FUEL_GROUP
if f in mod.FUELS_BY_FUEL_GROUP[fg] and fg == _fg and g == _g
)
m.Opr_Fuel_Burn_by_Fuel_Group_MMBtu = Expression(
m.FUEL_PRJS_FUEL_GROUP_OPR_TMPS, rule=opr_fuel_burn_by_fuel_group_rule
)
# Constraints
###########################################################################
def fuel_burn_by_ll_constraint_rule(mod, prj, tmp, s):
"""
**Constraint Name**: HR_Curve_Prj_Fuel_Burn_Constraint
**Enforced Over**: HR_CURVE_PRJS_OPR_TMPS_SGMS
Fuel burn is set by piecewise linear representation of input/output
curve.
Note: we assume that when projects are de-rated for availability, the
input/output curve is de-rated by the same amount. The implicit
assumption is that when a generator is de-rated, some of its units
are out rather than it being forced to run below minimum stable level
at very inefficient operating points.
"""
gen_op_type = mod.operational_type[prj]
if hasattr(imported_operational_modules[gen_op_type], "fuel_burn_by_ll_rule"):
fuel_burn_by_ll = imported_operational_modules[
gen_op_type
].fuel_burn_by_ll_rule(mod, prj, tmp, s)
else:
fuel_burn_by_ll = op_type_init.fuel_burn_by_ll_rule(mod, prj, tmp, s)
return mod.HR_Curve_Prj_Fuel_Burn[prj, tmp] >= fuel_burn_by_ll
m.HR_Curve_Prj_Fuel_Burn_Constraint = Constraint(
m.HR_CURVE_PRJS_OPR_TMPS_SGMS, rule=fuel_burn_by_ll_constraint_rule
)
def blend_fuel_operations_rule(mod, prj, tmp):
"""
The sum of operations fuel burn across all fuels should equal the total
operations fuel burn.
"""
return (
sum(
mod.Project_Opr_Fuel_Burn_by_Fuel[prj, f, tmp]
for f in mod.FUELS_BY_PRJ[prj]
)
== mod.Operations_Fuel_Burn_MMBtu[prj, tmp]
)
m.Fuel_Blending_Opr_Fuel_Burn_Constraint = Constraint(
m.FUEL_PRJ_OPR_TMPS, rule=blend_fuel_operations_rule
)
def min_fraction_of_fuel_blend_opr_rule(mod, prj, f, tmp):
"""
In each timepoint, enforce a minimum on the proportion in the blend of each
fuel.
"""
return (
mod.Project_Opr_Fuel_Burn_by_Fuel[prj, f, tmp]
>= mod.min_fraction_in_fuel_blend[prj, f]
* mod.Operations_Fuel_Burn_MMBtu[prj, tmp]
)
m.Min_Fuel_Fraction_of_Blend_Opr_Constraint = Constraint(
m.FUEL_PRJS_FUEL_OPR_TMPS, rule=min_fraction_of_fuel_blend_opr_rule
)
def max_fraction_of_fuel_blend_opr_rule(mod, prj, f, tmp):
"""
In each timepoint, enforce a maximum on the proportion in the blend of each
fuel.
"""
return (
mod.Project_Opr_Fuel_Burn_by_Fuel[prj, f, tmp]
<= mod.max_fraction_in_fuel_blend[prj, f]
* mod.Operations_Fuel_Burn_MMBtu[prj, tmp]
)
m.Max_Fuel_Fraction_of_Blend_Opr_Constraint = Constraint(
m.FUEL_PRJS_FUEL_OPR_TMPS, rule=max_fraction_of_fuel_blend_opr_rule
)
def blend_fuel_startup_rule(mod, prj, tmp):
"""
The sum of startup fuel burn across all fuels should equal the total
operations fuel burn.
"""
return (
sum(
mod.Project_Startup_Fuel_Burn_by_Fuel[prj, f, tmp]
for f in mod.FUELS_BY_PRJ[prj]
)
== mod.Startup_Fuel_Burn_MMBtu[prj, tmp]
)
m.Fuel_Blending_Startup_Fuel_Burn_Constraint = Constraint(
m.STARTUP_FUEL_PRJ_OPR_TMPS, rule=blend_fuel_startup_rule
)
def min_fraction_of_fuel_blend_startup_rule(mod, prj, f, tmp):
"""
In each timepoint, enforce a minimum on the proportion in the blend of each
fuel.
"""
return (
mod.Project_Startup_Fuel_Burn_by_Fuel[prj, f, tmp]
>= mod.min_fraction_in_fuel_blend[prj, f]
* mod.Startup_Fuel_Burn_MMBtu[prj, tmp]
)
m.Min_Fuel_Fraction_of_Blend_Startup_Constraint = Constraint(
m.STARTUP_FUEL_PRJS_FUEL_OPR_TMPS, rule=min_fraction_of_fuel_blend_startup_rule
)
def max_fraction_of_fuel_blend_startup_rule(mod, prj, f, tmp):
"""
In each timepoint, enforce a maximum on the proportion in the blend of each
fuel.
"""
return (
mod.Project_Startup_Fuel_Burn_by_Fuel[prj, f, tmp]
<= mod.max_fraction_in_fuel_blend[prj, f]
* mod.Startup_Fuel_Burn_MMBtu[prj, tmp]
)
m.Max_Fuel_Fraction_of_Blend_Startup_Constraint = Constraint(
m.STARTUP_FUEL_PRJS_FUEL_OPR_TMPS, rule=max_fraction_of_fuel_blend_startup_rule
)
# Constrain blending for fuel contributions
def blend_fuel_contributions_rule(mod, prj, tmp):
"""
The sum of operations fuel contributions across all fuels should equal the total
operations fuel contribution.
"""
return (
sum(
mod.Project_Fuel_Contribution_by_Fuel[prj, f, tmp]
for f in mod.FUELS_BY_PRJ[prj]
)
== mod.Fuel_Contribution_FuelUnit[prj, tmp]
)
m.Fuel_Blending_Opr_Fuel_Contribution_Constraint = Constraint(
m.FUEL_PRJ_OPR_TMPS, rule=blend_fuel_contributions_rule
)
def min_fraction_of_fuel_blend_contribution_rule(mod, prj, f, tmp):
"""
In each timepoint, enforce a minimum on the proportion in the blend of each
fuel.
"""
return (
mod.Project_Fuel_Contribution_by_Fuel[prj, f, tmp]
>= mod.min_fraction_in_fuel_blend[prj, f]
* mod.Fuel_Contribution_FuelUnit[prj, tmp]
)
m.Min_Fuel_Fraction_of_Blend_Contribution_Constraint = Constraint(
m.FUEL_PRJS_FUEL_OPR_TMPS, rule=min_fraction_of_fuel_blend_contribution_rule
)
def max_fraction_of_fuel_blend_contribution_rule(mod, prj, f, tmp):
"""
In each timepoint, enforce a maximum on the proportion in the blend of each
fuel.
"""
return (
mod.Project_Fuel_Contribution_by_Fuel[prj, f, tmp]
<= mod.max_fraction_in_fuel_blend[prj, f]
* mod.Fuel_Contribution_FuelUnit[prj, tmp]
)
m.Max_Fuel_Fraction_of_Blend_Contribution_Constraint = Constraint(
m.FUEL_PRJS_FUEL_OPR_TMPS, rule=max_fraction_of_fuel_blend_contribution_rule
)
# Input-Output
###############################################################################
def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
project_fuels_file = os.path.join(
scenario_directory, str(subproblem), str(stage), "inputs", "project_fuels.tab"
)
if os.path.exists(project_fuels_file):
data_portal.load(
filename=project_fuels_file,
index=m.FUEL_PRJ_FUELS,
param=(
m.min_fraction_in_fuel_blend,
m.max_fraction_in_fuel_blend,
),
)
def export_results(scenario_directory, subproblem, stage, m, d):
"""
Export fuel burn results.
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
The Pyomo abstract model
:param d:
Dynamic components
:return:
Nothing
"""
with open(
os.path.join(
scenario_directory, str(subproblem), str(stage), "results", "fuel_burn.csv"
),
"w",
newline="",
) as results_f:
writer = csv.writer(results_f)
writer.writerow(
[
"project",
"period",
"horizon",
"timepoint",
"timepoint_weight",
"number_of_hours_in_timepoint",
"load_zone",
"technology",
"fuel",
"fuel_burn_operations_mmbtu",
"fuel_burn_startup_mmbtu",
"total_fuel_burn_mmbtu",
"fuel_contribution_fuelunit",
"net_fuel_burn_fuelunit",
]
)
for (p, f, tmp) in m.FUEL_PRJS_FUEL_OPR_TMPS:
writer.writerow(
[
p,
m.period[tmp],
m.horizon[tmp, m.balancing_type_project[p]],
tmp,
m.tmp_weight[tmp],
m.hrs_in_tmp[tmp],
m.load_zone[p],
m.technology[p],
f,
value(m.Project_Opr_Fuel_Burn_by_Fuel[p, f, tmp]),
value(m.Project_Startup_Fuel_Burn_by_Fuel[p, f, tmp])
if p in m.STARTUP_FUEL_PRJS
else None,
value(m.Total_Fuel_Burn_by_Fuel_MMBtu[p, f, tmp]),
value(m.Project_Fuel_Contribution_by_Fuel[p, f, tmp]),
(
value(m.Total_Fuel_Burn_by_Fuel_MMBtu[p, f, tmp])
- value(m.Project_Fuel_Contribution_by_Fuel[p, f, tmp])
),
]
)
# Database
###############################################################################
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
# Fuel burned by project and timepoint
if not quiet:
print("project fuel burn")
# Delete prior results and create temporary import table for ordering
setup_results_import(
conn=db,
cursor=c,
table="results_project_fuel_burn",
scenario_id=scenario_id,
subproblem=subproblem,
stage=stage,
)
# Load results into the temporary table
results = []
with open(os.path.join(results_directory, "fuel_burn.csv"), "r") as fuel_burn_file:
reader = csv.reader(fuel_burn_file)
next(reader) # skip header
for row in reader:
project = row[0]
period = row[1]
horizon = row[2]
timepoint = row[3]
timepoint_weight = row[4]
number_of_hours_in_timepoint = row[5]
load_zone = row[6]
technology = row[7]
fuel = row[8]
opr_fuel_burn_tons = row[9]
startup_fuel_burn_tons = row[10]
total_fuel_burn = row[11]
fuel_contribution = row[12]
net_fuel_burn = row[13]
results.append(
(
scenario_id,
project,
period,
subproblem,
stage,
horizon,
timepoint,
timepoint_weight,
number_of_hours_in_timepoint,
load_zone,
technology,
fuel,
opr_fuel_burn_tons,
startup_fuel_burn_tons,
total_fuel_burn,
fuel_contribution,
net_fuel_burn,
)
)
insert_temp_sql = """
INSERT INTO
temp_results_project_fuel_burn{}
(scenario_id, project, period, subproblem_id, stage_id,
horizon, timepoint, timepoint_weight,
number_of_hours_in_timepoint,
load_zone, technology, fuel, operations_fuel_burn_mmbtu,
startup_fuel_burn_mmbtu, total_fuel_burn_mmbtu, fuel_contribution_fuelunit,
net_fuel_burn_fuelunit)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""".format(
scenario_id
)
spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results)
# Insert sorted results into permanent results table
insert_sql = """
INSERT INTO results_project_fuel_burn
(scenario_id, project, period, subproblem_id, stage_id,
horizon, timepoint, timepoint_weight, number_of_hours_in_timepoint,
load_zone, technology, fuel, operations_fuel_burn_mmbtu,
startup_fuel_burn_mmbtu, total_fuel_burn_mmbtu, fuel_contribution_fuelunit,
net_fuel_burn_fuelunit)
SELECT
scenario_id, project, period, subproblem_id, stage_id,
horizon, timepoint, timepoint_weight, number_of_hours_in_timepoint,
load_zone, technology, fuel, operations_fuel_burn_mmbtu,
startup_fuel_burn_mmbtu, total_fuel_burn_mmbtu, fuel_contribution_fuelunit,
net_fuel_burn_fuelunit
FROM temp_results_project_fuel_burn{}
ORDER BY scenario_id, project, subproblem_id, stage_id, timepoint;
""".format(
scenario_id
)
spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(), many=False)
| 39.314637 | 91 | 0.520408 |
5bfaac9e8ceda51407a658066fddd665d23d943d | 2,689 | py | Python | quaternion/challenge/challenge.py | assert0/hackasat-qualifier-2021 | ffa17fc3c3f167c2a81fd3c12e43af9aacb2e95c | [
"MIT"
] | 39 | 2021-07-09T09:50:15.000Z | 2022-02-25T19:55:50.000Z | quaternion/challenge/challenge.py | assert0/hackasat-qualifier-2021 | ffa17fc3c3f167c2a81fd3c12e43af9aacb2e95c | [
"MIT"
] | null | null | null | quaternion/challenge/challenge.py | assert0/hackasat-qualifier-2021 | ffa17fc3c3f167c2a81fd3c12e43af9aacb2e95c | [
"MIT"
] | 6 | 2021-07-21T07:58:02.000Z | 2022-03-29T02:57:10.000Z | # Quaternion Challenge
import os, sys
from numpy import dot, cross, array
from numpy.linalg import norm
from time import sleep
from timeout import timeout, TimeoutError
time = int(os.getenv("TIMEOUT",90))
def render_intro(vec):
art = [
" QUATERNION ",
" CHALLANGE ",
" ",
" z ",
" | ",
" __|____ ",
" / | /| ",
" ______ /______/ | ______ ",
" | |==| | ====| |---y ",
" |______| | / | / |______| ",
" |_/____|/ ",
" / ",
" / /x ",
" x z_ _ / ",
" Satellite \ ",
" Body \y ",
" Frame J2000 ",
" "
]
# Challenge Intro
for row in art:
print(row)
sleep(0.05)
sys.stdout.flush()
print("A spacecraft is considered \"pointing\" in the direction of its z-axis or [0,0,1] vector in the \"satellite body frame.\"")
print("In the J2000 frame, the same spacecraft is pointing at ",vec[0],".",sep='')
print("Determine the spacecraft attitude quaternion.")
return
def quaternion(u,v):
u = u / norm(u)
v = v / norm(v)
q = [0,0,0,0]
q[0:3] = cross(u,v)
q[3] = 1 + dot(u,v)
q = q / norm(q)
return q
@timeout(time)
def challenge():
satBodyRef = [0,0,1]
satDirVec = [1,-7,0.3]
satDirVec = satDirVec/norm(satDirVec)
render_intro([satDirVec])
qA = quaternion(satBodyRef, satDirVec)
#print(Qt)
sys.stdout.flush()
print("Qx = ",end='')
x = input()
print("Qy = ",end='')
y = input()
print("Qz = ",end='')
z = input()
print("Qw = ",end='')
w = input()
print()
qB = [float(x), float(y), float(z), float(w)]
qE = array(qB) - array(qA)
for e in qE:
if abs(e) > 0.001:
return 0
return 1
if __name__ == "__main__":
# Challenge
try:
success = challenge()
except TimeoutError:
sys.stdout.write("\nTimeout, Bye\n")
sys.exit(1)
if success:
print("You got it! Here's your flag:")
flag = os.getenv('FLAG')
print(flag)
else:
print("That didn't work, try again!")
| 24.669725 | 134 | 0.401636 |
3155aa4c4fc7652c0cee1f3373c05b0d2e202cf9 | 9,756 | py | Python | tests/test_execute.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 57 | 2021-09-28T00:48:08.000Z | 2022-03-16T16:50:39.000Z | tests/test_execute.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 59 | 2021-09-25T00:06:22.000Z | 2022-03-31T15:49:36.000Z | tests/test_execute.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 3 | 2021-12-31T10:03:03.000Z | 2021-12-31T16:07:54.000Z | import contextvars
import functools
import sys
import threading
import time
from contextlib import contextmanager
from typing import Any, AsyncGenerator, Generator, List
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
import anyio
import pytest
from di.container import Container, ContainerState, bind_by_type
from di.dependant import Dependant, Marker
from di.exceptions import IncompatibleDependencyError, UnknownScopeError
from di.executors import AsyncExecutor, ConcurrentAsyncExecutor, SyncExecutor
from di.typing import Annotated
class vZero:
def __call__(self) -> "vZero":
return self
v0 = vZero()
class vOne:
def __call__(self) -> "vOne":
return self
v1 = vOne()
class vTwo:
def __call__(self, one: Annotated[vOne, Marker(v1)]) -> "vTwo":
self.one = one
return self
v2 = vTwo()
class vThree:
def __call__(
self, zero: Annotated[vZero, Marker(v0)], one: Annotated[vOne, Marker(v1)]
) -> "vThree":
self.zero = zero
self.one = one
return self
v3 = vThree()
class vFour:
def __call__(self, two: Annotated[vTwo, Marker(v2)]) -> "vFour":
self.two = two
return self
v4 = vFour()
class vFive:
def __call__(
self,
zero: Annotated[vZero, Marker(v0)],
three: Annotated[vThree, Marker(v3)],
four: Annotated[vFour, Marker(v4)],
) -> "vFive":
self.zero = zero
self.three = three
self.four = four
return self
v5 = vFive()
def test_execute():
container = Container()
with container.enter_scope(None) as state:
res = container.execute_sync(
container.solve(Dependant(v5), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
assert res.three.zero is res.zero
def sync_callable_func() -> int:
return 1
async def async_callable_func() -> int:
return 1
def sync_gen_func() -> Generator[int, None, None]:
yield 1
async def async_gen_func() -> AsyncGenerator[int, None]:
yield 1
class SyncCallableCls:
def __call__(self) -> int:
return 1
class AsyncCallableCls:
async def __call__(self) -> int:
return 1
class Counter:
def __init__(self) -> None:
self._lock = threading.Lock()
self._counter = 0
@property
def counter(self) -> int:
return self._counter
@contextmanager
def acquire(self) -> Generator[None, None, None]:
with self._lock:
self._counter += 1
yield
def sync_callable_func_slow(counter: Counter) -> None:
start = time.time()
with counter.acquire():
while counter.counter < 2:
if time.time() - start > 0.5:
raise TimeoutError(
"Tasks did not execute concurrently"
) # pragma: no cover
time.sleep(0.005)
return
async def async_callable_func_slow(counter: Counter) -> None:
start = time.time()
with counter.acquire():
while counter.counter < 2:
if time.time() - start > 0.5:
raise TimeoutError(
"Tasks did not execute concurrently"
) # pragma: no cover
await anyio.sleep(0.005)
return
def sync_gen_func_slow(counter: Counter) -> Generator[None, None, None]:
sync_callable_func_slow(counter)
yield None
async def async_gen_func_slow(counter: Counter) -> AsyncGenerator[None, None]:
await async_callable_func_slow(counter)
yield None
class SyncCallableClsSlow:
def __call__(self, counter: Counter) -> None:
sync_callable_func_slow(counter)
class AsyncCallableClsSlow:
async def __call__(self, counter: Counter) -> None:
await async_callable_func_slow(counter)
@pytest.mark.parametrize(
"dep1,sync1",
[
(sync_callable_func_slow, True),
(async_callable_func_slow, False),
(sync_gen_func_slow, True),
(async_gen_func_slow, False),
(SyncCallableClsSlow(), True),
(AsyncCallableClsSlow(), False),
],
ids=[
"sync_callable_func",
"async_callable_func",
"sync_gen_func",
"async_gen_func",
"SyncCallableCls",
"AsyncCallableCls",
],
)
@pytest.mark.parametrize(
"dep2,sync2",
[
(sync_callable_func_slow, True),
(async_callable_func_slow, False),
(sync_gen_func_slow, True),
(async_gen_func_slow, False),
(SyncCallableClsSlow(), True),
(AsyncCallableClsSlow(), False),
],
ids=[
"sync_callable_func",
"async_callable_func",
"sync_gen_func",
"async_gen_func",
"SyncCallableCls",
"AsyncCallableCls",
],
)
@pytest.mark.anyio
async def test_concurrency_async(dep1: Any, sync1: bool, dep2: Any, sync2: bool):
container = Container()
counter = Counter()
container.bind(bind_by_type(Dependant(lambda: counter), Counter))
async def collector(
a: Annotated[None, Marker(dep1, use_cache=False, sync_to_thread=sync1)],
b: Annotated[None, Marker(dep2, use_cache=False, sync_to_thread=sync2)],
):
...
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=ConcurrentAsyncExecutor(),
state=state,
)
@pytest.mark.anyio
async def test_concurrent_executions_do_not_use_cache_results():
"""If the same solved depedant is executed twice concurrently we should not
overwrite the result of any sub-dependencies.
"""
delays = {1: 0, 2: 0.01}
ctx: contextvars.ContextVar[int] = contextvars.ContextVar("id")
def get_id() -> int:
return ctx.get()
async def dep1(id: Annotated[int, Marker(get_id)]) -> int:
await anyio.sleep(delays[id])
return id
async def dep2(
id: Annotated[int, Marker(get_id)], one: Annotated[int, Marker(dep1)]
) -> None:
# let the other branch run
await anyio.sleep(max(delays.values()))
# check if the other branch replaced our value
# ctx.get() serves as the source of truth
expected = ctx.get()
# and we check if the result was use_cached via caching or a bug in the
# internal state of tasks (see https://github.com/adriangb/di/issues/18)
assert id == expected # replaced via caching
assert one == expected # replaced in results state
container = Container()
solved = container.solve(Dependant(dep2), scopes=[None])
async def execute_in_ctx(id: int) -> None:
ctx.set(id)
async with container.enter_scope(None) as state:
await container.execute_async(
solved, executor=ConcurrentAsyncExecutor(), state=state
)
async with anyio.create_task_group() as tg:
async with container.enter_scope("app"):
tg.start_soon(functools.partial(execute_in_ctx, 1))
tg.start_soon(functools.partial(execute_in_ctx, 2))
@pytest.mark.anyio
@pytest.mark.parametrize("scope,use_cache", [(None, False), ("app", True)])
async def test_concurrent_executions_use_cache(
scope: Literal[None, "app"], use_cache: bool
):
"""Check that global / local scopes are respected during concurrent execution"""
objects: List[object] = []
def get_obj() -> object:
return object()
async def collect1(
obj: Annotated[object, Marker(get_obj, scope=scope, use_cache=use_cache)]
) -> None:
objects.append(obj)
await anyio.sleep(0.01)
async def collect2(
obj: Annotated[object, Marker(get_obj, scope=scope, use_cache=use_cache)]
) -> None:
objects.append(obj)
container = Container()
solved1 = container.solve(Dependant(collect1), scopes=["app", None])
solved2 = container.solve(Dependant(collect2), scopes=["app", None])
async def execute_1(state: ContainerState):
async with container.enter_scope(None, state=state) as state:
return await container.execute_async(
solved1, executor=ConcurrentAsyncExecutor(), state=state
)
async def execute_2(state: ContainerState):
async with container.enter_scope(None, state=state) as state:
return await container.execute_async(
solved2, executor=ConcurrentAsyncExecutor(), state=state
)
async with container.enter_scope("app") as state:
async with anyio.create_task_group() as tg:
tg.start_soon(execute_1, state)
await anyio.sleep(0.005)
tg.start_soon(execute_2, state)
assert (objects[0] is objects[1]) is (use_cache and scope == "app")
@pytest.mark.anyio
async def test_async_cm_de_in_sync_scope():
"""Cannot execute an async contextmanager-like dependency from within a sync scope"""
async def dep() -> AsyncGenerator[None, None]:
yield
container = Container()
with container.enter_scope(None) as state:
with pytest.raises(
IncompatibleDependencyError, match="canot be used in the sync scope"
):
await container.execute_async(
container.solve(Dependant(dep, scope=None), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
def test_unknown_scope() -> None:
def bad_dep(v: Annotated[int, Marker(lambda: 1, scope="foo")]) -> int:
return v
container = Container()
with pytest.raises(UnknownScopeError):
container.solve(Dependant(bad_dep, scope="app"), scopes=["app"])
| 27.175487 | 89 | 0.635301 |
56ba83affa527e62cf9c295a7d3ff5af9f3ef93c | 9,235 | py | Python | freeclimb/models/queue_list.py | FreeClimbAPI/python-sdk | 1ec89eddc0069a39989579552b979a9d21418117 | [
"MIT"
] | null | null | null | freeclimb/models/queue_list.py | FreeClimbAPI/python-sdk | 1ec89eddc0069a39989579552b979a9d21418117 | [
"MIT"
] | 6 | 2020-03-03T20:14:26.000Z | 2021-12-06T22:11:15.000Z | freeclimb/models/queue_list.py | FreeClimbAPI/python-sdk | 1ec89eddc0069a39989579552b979a9d21418117 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
FreeClimb API
FreeClimb is a cloud-based application programming interface (API) that puts the power of the Vail platform in your hands. FreeClimb simplifies the process of creating applications that can use a full range of telephony features without requiring specialized or on-site telephony equipment. Using the FreeClimb REST API to write applications is easy! You have the option to use the language of your choice or hit the API directly. Your application can execute a command by issuing a RESTful request to the FreeClimb API. The base URL to send HTTP requests to the FreeClimb REST API is: /apiserver. FreeClimb authenticates and processes your request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@freeclimb.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from freeclimb.configuration import Configuration
class QueueList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'total': 'int',
'start': 'int',
'end': 'int',
'page': 'int',
'num_pages': 'int',
'page_size': 'int',
'next_page_uri': 'str',
'queues': 'list[QueueResult]'
}
attribute_map = {
'total': 'total',
'start': 'start',
'end': 'end',
'page': 'page',
'num_pages': 'numPages',
'page_size': 'pageSize',
'next_page_uri': 'nextPageUri',
'queues': 'queues'
}
def __init__(self, total=None, start=None, end=None, page=None, num_pages=None, page_size=None, next_page_uri=None, queues=None, local_vars_configuration=None): # noqa: E501
"""QueueList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._total = None
self._start = None
self._end = None
self._page = None
self._num_pages = None
self._page_size = None
self._next_page_uri = None
self._queues = None
self.discriminator = None
if total is not None:
self.total = total
if start is not None:
self.start = start
if end is not None:
self.end = end
if page is not None:
self.page = page
if num_pages is not None:
self.num_pages = num_pages
if page_size is not None:
self.page_size = page_size
if next_page_uri is not None:
self.next_page_uri = next_page_uri
if queues is not None:
self.queues = queues
@property
def total(self):
"""Gets the total of this QueueList. # noqa: E501
Total amount of requested resource. # noqa: E501
:return: The total of this QueueList. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this QueueList.
Total amount of requested resource. # noqa: E501
:param total: The total of this QueueList. # noqa: E501
:type: int
"""
self._total = total
@property
def start(self):
"""Gets the start of this QueueList. # noqa: E501
Resource index at start of current page # noqa: E501
:return: The start of this QueueList. # noqa: E501
:rtype: int
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this QueueList.
Resource index at start of current page # noqa: E501
:param start: The start of this QueueList. # noqa: E501
:type: int
"""
self._start = start
@property
def end(self):
"""Gets the end of this QueueList. # noqa: E501
Resource index at end of current page # noqa: E501
:return: The end of this QueueList. # noqa: E501
:rtype: int
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this QueueList.
Resource index at end of current page # noqa: E501
:param end: The end of this QueueList. # noqa: E501
:type: int
"""
self._end = end
@property
def page(self):
"""Gets the page of this QueueList. # noqa: E501
Current page # noqa: E501
:return: The page of this QueueList. # noqa: E501
:rtype: int
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this QueueList.
Current page # noqa: E501
:param page: The page of this QueueList. # noqa: E501
:type: int
"""
self._page = page
@property
def num_pages(self):
"""Gets the num_pages of this QueueList. # noqa: E501
Total number of pages # noqa: E501
:return: The num_pages of this QueueList. # noqa: E501
:rtype: int
"""
return self._num_pages
@num_pages.setter
def num_pages(self, num_pages):
"""Sets the num_pages of this QueueList.
Total number of pages # noqa: E501
:param num_pages: The num_pages of this QueueList. # noqa: E501
:type: int
"""
self._num_pages = num_pages
@property
def page_size(self):
"""Gets the page_size of this QueueList. # noqa: E501
Number of items per page # noqa: E501
:return: The page_size of this QueueList. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this QueueList.
Number of items per page # noqa: E501
:param page_size: The page_size of this QueueList. # noqa: E501
:type: int
"""
self._page_size = page_size
@property
def next_page_uri(self):
"""Gets the next_page_uri of this QueueList. # noqa: E501
Uri to retrieve the next page of items # noqa: E501
:return: The next_page_uri of this QueueList. # noqa: E501
:rtype: str
"""
return self._next_page_uri
@next_page_uri.setter
def next_page_uri(self, next_page_uri):
"""Sets the next_page_uri of this QueueList.
Uri to retrieve the next page of items # noqa: E501
:param next_page_uri: The next_page_uri of this QueueList. # noqa: E501
:type: str
"""
self._next_page_uri = next_page_uri
@property
def queues(self):
"""Gets the queues of this QueueList. # noqa: E501
:return: The queues of this QueueList. # noqa: E501
:rtype: list[QueueResult]
"""
return self._queues
@queues.setter
def queues(self, queues):
"""Sets the queues of this QueueList.
:param queues: The queues of this QueueList. # noqa: E501
:type: list[QueueResult]
"""
self._queues = queues
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.to_camel_case(attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif value is None:
continue
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueueList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, QueueList):
return True
return self.to_dict() != other.to_dict()
def to_camel_case(self, snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
| 28.415385 | 667 | 0.580834 |
d5e99239a69e9869c26640e141443017c147489e | 303 | py | Python | data/multilingual/Latn.KEA/Sans_12/pdf_to_json_test_Latn.KEA_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.KEA/Sans_12/pdf_to_json_test_Latn.KEA_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.KEA/Sans_12/pdf_to_json_test_Latn.KEA_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.KEA/Sans_12/udhr_Latn.KEA_Sans_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 |
cf07f5987247f1c15224452d25f2033768decd84 | 1,165 | py | Python | idaes/power_generation/unit_models/steamheater.py | OOAmusat/idaes-pse | ae7d3bb8e372bc32822dcdcb75e9fd96b78da539 | [
"RSA-MD"
] | null | null | null | idaes/power_generation/unit_models/steamheater.py | OOAmusat/idaes-pse | ae7d3bb8e372bc32822dcdcb75e9fd96b78da539 | [
"RSA-MD"
] | null | null | null | idaes/power_generation/unit_models/steamheater.py | OOAmusat/idaes-pse | ae7d3bb8e372bc32822dcdcb75e9fd96b78da539 | [
"RSA-MD"
] | 1 | 2022-03-17T11:08:43.000Z | 2022-03-17T11:08:43.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Deprecation path for renamed model.
"""
from pyomo.common.deprecation import deprecation_warning
deprecation_warning("The steamheater module has been moved to "
"idaes.models_extra.power_generation.unit_models."
"steamheater",
version="2.0.0.alpha0")
from idaes.models_extra.power_generation.unit_models.steamheater import *
| 48.541667 | 81 | 0.64721 |
1a726d227e55a1fe5903ccaf57af778322750713 | 29,686 | py | Python | benchmarks/ltl_timed_automata/train/f3/train_gate_0024.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_timed_automata/train/f3/train_gate_0024.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_timed_automata/train/f3/train_gate_0024.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from collections import Iterable
from itertools import chain
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_integer_type, \
msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 24
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type):
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
delta, x_delta = decl_consts(menv, delta_name, real_type)
curr2next = {delta: x_delta}
a = msat_make_number(menv, "2")
b = msat_make_number(menv, "5")
c = msat_make_number(menv, "1")
d = msat_make_number(menv, "2")
e = msat_make_number(menv, "1")
gate = Gate("gate", menv, enc, c, d, delta)
controller = Controller("controller", menv, enc, e, num_procs + 1,
delta)
trains = [Train("t{}".format(idx), menv, enc, a, b, delta)
for idx in range(num_procs)]
components = [gate, controller, *trains]
for p in components:
for s, x_s in p.symb2next.items():
assert s not in curr2next.keys()
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
# delta > 0
init = msat_make_geq(menv, delta, zero)
trans = msat_make_geq(menv, x_delta, zero)
for p in components:
init = msat_make_and(menv, init, p.init)
trans = msat_make_and(menv, trans, p.trans)
d_eq_0 = msat_make_equal(menv, delta, zero)
# only 1 train moves
for idx0, t0 in enumerate(trains):
other_stutter = None
for idx1, t1 in enumerate(trains):
if idx0 != idx1:
if other_stutter is None:
other_sutter = t1.evt_stutter
else:
other_sutter = msat_make_and(menv, other_sutter,
t1.evt_stutter)
lhs = msat_make_and(menv, d_eq_0,
msat_make_not(menv, t0.evt_stutter))
curr = msat_make_impl(menv, lhs, other_sutter)
trans = msat_make_and(menv, trans, curr)
# sync evt_lower
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_lower,
gate.evt_lower)))
# sync evt_rise
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_rise,
gate.evt_rise)))
# sync evt_approach
train_approach = trains[0].evt_approach
for t in trains[1:]:
train_approach = msat_make_or(menv, train_approach, t.evt_approach)
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_approach,
train_approach)))
# sync evt_exit
train_exit = trains[0].evt_exit
for t in trains[1:]:
train_exit = msat_make_or(menv, train_exit, t.evt_exit)
trans = msat_make_and(menv, trans,
msat_make_impl(
menv, d_eq_0,
msat_make_iff(menv, controller.evt_exit,
train_exit)))
# G ((gate.g0 & gate.g1') -> F (gate.g2 & gate.g3'))
lhs = msat_make_and(menv, gate.g0, enc.make_X(gate.g1))
rhs = msat_make_and(menv, gate.g2, enc.make_X(gate.g3))
ltl = enc.make_G(msat_make_impl(menv, lhs, enc.make_F(rhs)))
return TermMap(curr2next), init, trans, ltl
class Module:
"""Synchronous component"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(self.menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(self._symb(c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(self.menv, b_vars[idx][0]),
msat_make_not(self.menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(self.menv, pred, it[0])
x_pred = msat_make_and(self.menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
class Train(Module):
"""Train module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
a, b, delta):
super().__init__(name, menv, enc)
# int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
# loc, x_loc = self._symb("l", int_type)
loc_symbs, locs, x_locs = self._enum("l", 4)
# evt, x_evt = self._symb("evt", int_type)
evt_symbs, evts, x_evts = self._enum("evt", 4)
x, x_x = self._symb("x", real_type)
self.symb2next = {x: x_x}
for s, x_s in chain(evt_symbs, loc_symbs):
assert s not in self.symb2next
self.symb2next[s] = x_s
self.evt_stutter = evts[0]
self.evt_approach = evts[1]
self.evt_exit = evts[2]
self.evt_move = evts[3]
x_evt_stutter = x_evts[0]
x_evt_approach = x_evts[1]
x_evt_exit = x_evts[2]
x_evt_move = x_evts[3]
self.t0 = locs[0]
self.t1 = locs[1]
self.t2 = locs[2]
self.t3 = locs[3]
self.x_t0 = x_locs[0]
self.x_t1 = x_locs[1]
self.x_t2 = x_locs[2]
self.x_t3 = x_locs[3]
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
zero = msat_make_number(menv, "0")
# l = t0 & x = 0
self.init = msat_make_and(menv, self.t0,
msat_make_equal(menv, x, zero))
# bound l
bound_l = msat_make_or(menv,
msat_make_or(menv, self.t0, self.t1),
msat_make_or(menv, self.t2, self.t3))
self.init = msat_make_and(menv, self.init, bound_l)
x_bound_l = msat_make_or(menv,
msat_make_or(menv, self.x_t0, self.x_t1),
msat_make_or(menv, self.x_t2, self.x_t3))
self.trans = x_bound_l
# bound evt
bound_evt = msat_make_or(menv,
msat_make_or(menv, self.evt_stutter,
self.evt_approach),
msat_make_or(menv, self.evt_exit,
self.evt_move))
self.init = msat_make_and(menv, self.init, bound_evt)
x_bound_evt = msat_make_or(menv,
msat_make_or(menv, x_evt_stutter,
x_evt_approach),
msat_make_or(menv, x_evt_exit,
x_evt_move))
self.trans = msat_make_and(menv, self.trans, x_bound_evt)
# invars: l != t0 -> x <= b
lhs = msat_make_not(menv, self.t0)
rhs = msat_make_leq(menv, x, b)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
# invars: l != t0 -> x <= b
lhs = msat_make_not(menv, self.x_t0)
rhs = msat_make_leq(menv, x_x, b)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# delta > 0 | stutter -> x' = x + delta & l' = l
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero),
self.evt_stutter)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_x,
msat_make_plus(menv, x, delta)),
same_loc)
self.trans = msat_make_and( menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_not(menv, self.evt_stutter))
# (l = t0) -> (l' = t1 & evt_approach & x' = 0)
lhs = msat_make_and(menv, disc_t, self.t0)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t1,
self.evt_approach),
msat_make_equal(menv, x_x, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = t1) -> (l' = t2 & x > a & evt_move & x' = x)
lhs = msat_make_and(menv, disc_t, self.t1)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t2,
msat_make_gt(menv, x, a)),
msat_make_and(menv, self.evt_move,
msat_make_equal(menv, x_x, x)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = t2) -> (l' = t3 & evt_move & x' = x)
lhs = msat_make_and(menv, disc_t, self.t2)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t3, self.evt_move),
msat_make_equal(menv, x_x, x))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = t3) -> (l' = t0 & x <= b & evt_exit & x' = x)
lhs = msat_make_and(menv, disc_t, self.t3)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_t0,
msat_make_leq(menv, x, b)),
msat_make_and(menv, self.evt_exit,
msat_make_equal(menv, x_x, x)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Gate(Module):
"""Gate module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
c, d, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
loc_symbs, locs, x_locs = self._enum("l", 4)
evt_symbs, evts, x_evts = self._enum("evt", 4)
y, x_y = self._symb("y", real_type)
self.symb2next = {y: x_y}
for s, x_s in chain(loc_symbs, evt_symbs):
assert s not in self.symb2next
self.symb2next[s] = x_s
self.evt_stutter = evts[0]
self.evt_lower = evts[1]
self.evt_rise = evts[2]
self.evt_move = evts[3]
x_evt_stutter = x_evts[0]
x_evt_lower = x_evts[1]
x_evt_rise = x_evts[2]
x_evt_move = x_evts[3]
self.g0 = locs[0]
self.g1 = locs[1]
self.g2 = locs[2]
self.g3 = locs[3]
self.x_g0 = x_locs[0]
self.x_g1 = x_locs[1]
self.x_g2 = x_locs[2]
self.x_g3 = x_locs[3]
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
zero = msat_make_number(menv, "0")
# l = g0 & y = 0
self.init = msat_make_and(menv, self.g0,
msat_make_equal(menv, y, zero))
# bound l
bound_l = msat_make_or(menv,
msat_make_or(menv, self.g0, self.g1),
msat_make_or(menv, self.g2, self.g3))
self.init = msat_make_and(menv, self.init, bound_l)
x_bound_l = msat_make_or(menv,
msat_make_or(menv, self.x_g0, self.x_g1),
msat_make_or(menv, self.x_g2, self.x_g3))
self.trans = x_bound_l
# bound evt
bound_evt = msat_make_or(menv,
msat_make_or(menv, self.evt_stutter,
self.evt_lower),
msat_make_or(menv, self.evt_rise,
self.evt_move))
self.init = msat_make_and(menv, self.init, bound_evt)
x_bound_evt = msat_make_or(menv,
msat_make_or(menv, x_evt_stutter,
x_evt_lower),
msat_make_or(menv, x_evt_rise,
x_evt_move))
self.trans = msat_make_and(menv, self.trans, x_bound_evt)
# invars: l = g1 -> y <= c; l = g3 -> y <= d
lhs = self.g1
rhs = msat_make_leq(menv, y, c)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
lhs = self.g3
rhs = msat_make_leq(menv, y, d)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
# invars: l = g1 -> y <= c; l = g3 -> y <= d
lhs = self.x_g1
rhs = msat_make_leq(menv, x_y, c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = self.x_g3
rhs = msat_make_leq(menv, x_y, d)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# delta > 0 | stutter -> y' = y + delta & l' = l
lhs = msat_make_or(menv,
msat_make_gt(menv, delta, zero),
self.evt_stutter)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_y,
msat_make_plus(menv, y, delta)),
same_loc)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_not(menv, self.evt_stutter))
# (l = g0) -> (l' = g1 & evt_lower & y' = 0)
lhs = msat_make_and(menv, disc_t, self.g0)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g1,
self.evt_lower),
msat_make_equal(menv, x_y, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = g1) -> (l' = g2 & y <= c & evt_move & y' = y)
lhs = msat_make_and(menv, disc_t, self.g1)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g2,
self.evt_move),
msat_make_and(menv,
msat_make_leq(menv, y, c),
msat_make_equal(menv, x_y, y)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = g2) -> (l' = g3 & evt_rise & y' = 0)
lhs = msat_make_and(menv, disc_t, self.g2)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g3, self.evt_rise),
msat_make_equal(menv, x_y, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = g3) -> (l' = g0 & y >= c & y <= d & evt_move & y' = y)
lhs = msat_make_and(menv, disc_t, self.g3)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_g0,
msat_make_geq(menv, y, c)),
msat_make_and(menv,
msat_make_leq(menv, y, d),
self.evt_move))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_y, y))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Controller(Module):
"""Controller module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
e, N, delta):
super().__init__(name, menv, enc)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc_symbs, locs, x_locs = self._enum("l", 4)
evt_symbs, evts, x_evts = self._enum("evt", 5)
z, x_z = self._symb("z", real_type)
cnt, x_cnt = self._symb("cnt", int_type)
self.symb2next = {z: x_z, cnt: x_cnt}
for s, x_s in chain(loc_symbs, evt_symbs):
assert s not in self.symb2next
self.symb2next[s] = x_s
self.evt_stutter = evts[0]
self.evt_approach = evts[1]
self.evt_exit = evts[2]
self.evt_lower = evts[3]
self.evt_rise = evts[4]
x_evt_stutter = x_evts[0]
x_evt_approach = x_evts[1]
x_evt_exit = x_evts[2]
x_evt_lower = x_evts[3]
x_evt_rise = x_evts[4]
self.c0 = locs[0]
self.c1 = locs[1]
self.c2 = locs[2]
self.c3 = locs[3]
self.x_c0 = x_locs[0]
self.x_c1 = x_locs[1]
self.x_c2 = x_locs[2]
self.x_c3 = x_locs[3]
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
nums = [msat_make_number(menv, str(i)) for i in range(N + 1)]
N = nums[-1]
# l = c0 & z = 0
self.init = msat_make_and(menv, self.c0,
msat_make_equal(menv, z, nums[0]))
# bound l
bound_l = msat_make_or(menv,
msat_make_or(menv, self.c0, self.c1),
msat_make_or(menv, self.c2, self.c3))
self.init = msat_make_and(menv, self.init, bound_l)
x_bound_l = msat_make_or(menv,
msat_make_or(menv, self.x_c0, self.x_c1),
msat_make_or(menv, self.x_c2, self.x_c3))
self.trans = x_bound_l
# bound evt
bound_evt = msat_make_or(
menv,
msat_make_or(menv, self.evt_stutter,
self.evt_approach),
msat_make_or(menv, self.evt_exit,
msat_make_or(menv, self.evt_lower,
self.evt_rise)))
self.init = msat_make_and(menv, self.init, bound_evt)
x_bound_evt = msat_make_or(
menv,
msat_make_or(menv, x_evt_stutter,
x_evt_approach),
msat_make_or(menv, x_evt_exit,
msat_make_or(menv, x_evt_lower,
x_evt_rise)))
self.trans = msat_make_and(menv, self.trans, x_bound_evt)
# bound cnt
bound_cnt = msat_make_equal(menv, cnt, nums[0])
x_bound_cnt = msat_make_equal(menv, x_cnt, nums[0])
for i in nums[1:]:
bound_cnt = msat_make_or(menv, bound_cnt,
msat_make_equal(menv, cnt, i))
x_bound_cnt = msat_make_or(menv, x_bound_cnt,
msat_make_equal(menv, x_cnt, i))
self.init = msat_make_and(menv, self.init, bound_cnt)
self.trans = msat_make_and(menv, self.trans, x_bound_cnt)
# invars: (l = c1 | l = c3) -> (z <= e)
lhs = msat_make_or(menv, self.c1, self.c3)
rhs = msat_make_leq(menv, z, e)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, lhs, rhs))
# invars: (l = c1 | l = c3) -> (z <= e)
lhs = msat_make_or(menv, self.x_c1, self.x_c3)
rhs = msat_make_leq(menv, x_z, e)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# delta > 0 | stutter -> z' = z + delta & l' = l & cnt' = cnt
lhs = msat_make_or(menv, msat_make_gt(menv, delta, nums[0]),
self.evt_stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_z,
msat_make_plus(menv, z, delta)),
same_loc),
msat_make_equal(menv, x_cnt, cnt))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, nums[0]),
msat_make_not(menv, self.evt_stutter))
# (l = c0) -> (l' = c1 & evt_approach & z' = 0 & cnt' = 1)
lhs = msat_make_and(menv, disc_t, self.c0)
rhs = msat_make_and(menv,
msat_make_and(menv, self.x_c1,
self.evt_approach),
msat_make_and(menv,
msat_make_equal(menv, x_z, nums[0]),
msat_make_equal(menv, x_cnt,
nums[1])))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c1) -> ((l' = c1 | l' = c2) & z' = z)
lhs = msat_make_and(menv, disc_t, self.c1)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_z, z),
msat_make_or(menv, self.x_c1, self.x_c2))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c1 & l' = c1) -> ((evt_approach & cnt' = cnt + 1) |
# (evt_exit & cnt' = cnt - 1))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c1, self.x_c1))
dec_cnt = msat_make_equal(menv, x_cnt,
msat_make_minus(menv, cnt, nums[1]))
inc_cnt = msat_make_equal(menv, x_cnt,
msat_make_plus(menv, cnt, nums[1]))
rhs = msat_make_or(menv,
msat_make_and(menv, self.evt_approach, inc_cnt),
msat_make_and(menv, self.evt_exit, dec_cnt))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c1 & l' = c2) -> (evt_lower & z = e & cnt' = cnt)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c1, self.x_c2))
rhs = msat_make_and(menv, self.evt_lower,
msat_make_and(menv,
msat_make_equal(menv, z, e),
msat_make_equal(menv, x_cnt, cnt)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c2) -> (l' = c2 | l' = c3)
lhs = msat_make_and(menv, disc_t, self.c2)
rhs = msat_make_or(menv, self.x_c2, self.x_c3)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# l = c2 & l' = c2) -> (z' = z & ((cnt > 1 & evt_exit & cnt' = cnt - 1) |
# (evt_approach & cnt' = cnt + 1)))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c2, self.x_c2))
disj0 = msat_make_and(menv,
msat_make_gt(menv, cnt, nums[1]),
msat_make_and(menv, self.evt_exit,
dec_cnt))
rhs = msat_make_and(menv,
msat_make_equal(menv, x_z, z),
msat_make_or(menv, disj0,
msat_make_and(menv, self.evt_approach,
inc_cnt)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c2 & l' = c3) -> (cnt = 1 & evt_exit & z' = 0 & cnt' = 0)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c2, self.x_c3))
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, cnt, nums[1]),
self.evt_exit),
msat_make_and(menv,
msat_make_equal(menv, x_z, nums[0]),
msat_make_equal(menv, x_cnt,
nums[0])))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c3) -> ((l' = c2 | l' = c0) & z' = z)
lhs = msat_make_and(menv, disc_t, self.c3)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_z, z),
msat_make_or(menv, self.x_c2, self.x_c0))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c3 & l' = c2) -> (z <= e & evt_approach & cnt' = cnt + 1)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c3, self.x_c2))
rhs = msat_make_and(menv, inc_cnt,
msat_make_and(menv,
msat_make_leq(menv, z, e),
self.evt_approach))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (l = c3 & l' = c0) -> (z <= e & evt_rise & cnt' = cnt)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.c3, self.x_c0))
rhs = msat_make_and(menv,
msat_make_equal(menv, x_cnt, cnt),
msat_make_and(menv, self.evt_rise,
msat_make_leq(menv, z, e)))
| 43.527859 | 81 | 0.497541 |
cf29bcb55657ad0e6c4f4696b8c6f3b675a75be8 | 928 | py | Python | tests/integration/cartography/intel/aws/test_s3.py | eRaMvn/cartography | 807e9216700d32f23739e3b5d227b32fd81aee19 | [
"Apache-2.0"
] | 1 | 2022-03-31T03:24:37.000Z | 2022-03-31T03:24:37.000Z | tests/integration/cartography/intel/aws/test_s3.py | srics/cartography | 19a06766e304d657d956246179a2bb01a6d9aef6 | [
"Apache-2.0"
] | 1 | 2021-02-23T18:08:04.000Z | 2021-03-31T08:17:23.000Z | tests/integration/cartography/intel/aws/test_s3.py | srics/cartography | 19a06766e304d657d956246179a2bb01a6d9aef6 | [
"Apache-2.0"
] | 1 | 2021-07-12T16:05:55.000Z | 2021-07-12T16:05:55.000Z | import cartography.intel.aws.s3
import tests.data.aws.s3
TEST_ACCOUNT_ID = '000000000000'
TEST_REGION = 'us-east-1'
TEST_UPDATE_TAG = 123456789
def test_load_s3_buckets(neo4j_session, *args):
"""
Ensure that expected buckets get loaded with their key fields.
"""
data = tests.data.aws.s3.LIST_BUCKETS
cartography.intel.aws.s3.load_s3_buckets(neo4j_session, data, TEST_ACCOUNT_ID, TEST_UPDATE_TAG)
expected_nodes = {
(
"bucket-1",
"bucket-1",
),
(
"bucket-2",
"bucket-2",
),
(
"bucket-3",
"bucket-3",
),
}
nodes = neo4j_session.run(
"""
MATCH (s:S3Bucket) return s.id, s.name
""",
)
actual_nodes = {
(
n['s.id'],
n['s.name'],
)
for n in nodes
}
assert actual_nodes == expected_nodes
| 20.622222 | 99 | 0.529095 |
5f8b970b038ea6d3a3c9f176aea0e8208424fc8d | 103,485 | py | Python | jax/lax.py | pcmoritz/jax | d065f8630ccd40a6369e6ff81325fb9db0195f14 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/lax.py | pcmoritz/jax | d065f8630ccd40a6369e6ff81325fb9db0195f14 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/lax.py | pcmoritz/jax | d065f8630ccd40a6369e6ff81325fb9db0195f14 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .util import partial
import itertools
import operator
import six
from six.moves import builtins, xrange
import string
import numpy as onp
from . import core
from . import ad_util
from . import linear_util as lu
from .core import Primitive
from .abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,
array_types, make_shaped_array)
from .api_util import flatten_fun, tree_to_jaxtuples
from .interpreters import partial_eval as pe
from .interpreters import xla
from .interpreters import ad
from .interpreters import batching
from .util import curry, safe_zip, unzip2, prod
from .tree_util import build_tree
from .lib import xla_bridge
_max = builtins.max
_min = builtins.max
### traceables
def neg(x): return neg_p.bind(x)
def sign(x): return sign_p.bind(x)
def floor(x): return floor_p.bind(x)
def ceil(x): return ceil_p.bind(x)
def round(x): return round_p.bind(x)
def is_finite(x): return is_finite_p.bind(x)
def exp(x): return exp_p.bind(x)
def expm1(x): return expm1_p.bind(x)
def log(x): return log_p.bind(x)
def log1p(x): return log1p_p.bind(x)
def tanh(x): return tanh_p.bind(x)
def sin(x): return sin_p.bind(x)
def cos(x): return cos_p.bind(x)
def atan2(x, y): return atan2_p.bind(x, y)
def lgamma(x): return lgamma_p.bind(x)
def digamma(x): return digamma_p.bind(x)
def erf(x): return erf_p.bind(x)
def erfc(x): return erfc_p.bind(x)
def erf_inv(x): return erf_inv_p.bind(x)
def real(x): return real_p.bind(x)
def imag(x): return imag_p.bind(x)
def complex(x, y): return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x): return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x): return abs_p.bind(x)
def pow(x, y): return pow_p.bind(x, y)
def bitwise_not(x): return not_p.bind(x)
def bitwise_and(x, y): return and_p.bind(x, y)
def bitwise_or(x, y): return or_p.bind(x, y)
def bitwise_xor(x, y): return xor_p.bind(x, y)
def add(x, y): return add_p.bind(x, y)
def sub(x, y): return sub_p.bind(x, y)
def mul(x, y): return mul_p.bind(x, y)
def div(x, y): return div_p.bind(x, y)
def rem(x, y): return rem_p.bind(x, y)
def max(x, y): return max_p.bind(x, y)
def min(x, y): return min_p.bind(x, y)
def shift_left(x, y): return shift_left_p.bind(x, y)
def shift_right_arithmetic(x, y): return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x, y): return shift_right_logical_p.bind(x, y)
def eq(x, y): return eq_p.bind(x, y)
def ne(x, y): return ne_p.bind(x, y)
def ge(x, y): return ge_p.bind(x, y)
def gt(x, y): return gt_p.bind(x, y)
def le(x, y): return le_p.bind(x, y)
def lt(x, y): return lt_p.bind(x, y)
def convert_element_type(operand, new_dtype):
new_dtype = xla_bridge.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
else:
return operand
def bitcast_convert_type(operand, new_dtype):
new_dtype = xla_bridge.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min, operand, max):
return clamp_p.bind(min, operand, max)
def concatenate(operands, dimension):
return concatenate_p.bind(*operands, dimension=dimension,
operand_shapes=tuple(o.shape for o in operands))
def conv_general_dilated(lhs, rhs, window_strides, padding, lhs_dilation=None,
rhs_dilation=None, dimension_numbers=None):
if type(dimension_numbers) is not ConvDimensionNumbers:
dimension_numbers = conv_dimension_numbers(
lhs.shape, rhs.shape, dimension_numbers)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dimension_numbers
padding = padtype_to_pads(
onp.take(lhs.shape, lhs_perm)[2:], onp.take(rhs.shape, rhs_perm)[2:],
window_strides, padding)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dimension_numbers, lhs_shape=lhs.shape,
rhs_shape=rhs.shape)
def dot(lhs, rhs): return dot_p.bind(lhs, rhs)
def dot_general(lhs, rhs, dimension_numbers):
lhs_dims, rhs_dims = dimension_numbers
dimension_numbers = (tuple(map(tuple, lhs_dims)), tuple(map(tuple, rhs_dims)))
return dot_general_p.bind(lhs, rhs, dimension_numbers=dimension_numbers)
def broadcast(operand, sizes):
return broadcast_p.bind(operand, sizes=tuple(sizes))
def broadcast_in_dim(operand, shape, broadcast_dimensions):
if operand.ndim == len(shape) and not len(broadcast_dimensions):
return operand
else:
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def reshape(operand, new_sizes, dimensions=None):
same_shape = onp.shape(operand) == tuple(new_sizes)
same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))
if same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=tuple(new_sizes),
dimensions=None if dimensions is None else tuple(dimensions),
old_sizes=onp.shape(operand))
def pad(operand, padding_value, padding_config):
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand, dimensions):
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred, on_true, on_false):
return select_p.bind(pred, on_true, on_false)
def slice(operand, start_indices, limit_indices, strides=None):
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides),
operand_shape=operand.shape)
def dynamic_slice(operand, start_indices, slice_sizes):
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(
operand, start_indices, slice_sizes=tuple(slice_sizes),
operand_shape=operand.shape)
def dynamic_update_slice(operand, update, start_indices):
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, start_indices,
update_shape=update.shape)
def index_take(src, idxs, axes):
pvals = [_abstractify(arg) for arg in (src,) + idxs]
jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(partial(_index_take, axes), pvals)
return index_take_p.bind(src, *idxs, axes=tuple(axes),
input_shape=src.shape, jaxpr=jaxpr, consts=consts)
def _index_take(axes, src, *idxs):
n = idxs[0].shape[0]
slice_sizes = subvals(src.shape, zip(axes, [1] * len(axes)))
def body_fun(i, state):
src, idxs, out = state
src_ind = (dynamic_index_in_dim(x, i, 0, False) for x in idxs)
start_indices = subvals([0] * src.ndim, zip(axes, src_ind))
update = dynamic_slice(src, start_indices, slice_sizes)
update = reshape(update, (1,) + out.shape[1:])
out = dynamic_update_slice(out, update, [i] + [0] * (out.ndim - 1))
return src, idxs, out
out = full_like(src, 0, shape=(n,) + tuple(onp.delete(src.shape, axes)))
init_val = src, idxs, out
_, _, out = fori_loop(0, n, body_fun, init_val)
return out
def index_untake(src, dst, idxs, axes):
pvals = [_abstractify(arg) for arg in (src, dst) + idxs]
jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(partial(_index_untake, axes), pvals)
return index_untake_p.bind(src, dst, *idxs, axes=tuple(axes),
jaxpr=jaxpr, consts=consts)
def _index_untake(axes, src, dst, *idxs):
n = idxs[0].shape[0]
slice_sizes = subvals(dst.shape, zip(axes, [1] * len(axes)))
def body_fun(i, state):
src, dst, idxs = state
vals = dynamic_slice(src, [i] + [0] * (src.ndim - 1), (1,) + src.shape[1:])
vals = reshape(vals, subvals(dst.shape, zip(axes, [1] * len(axes))))
dst_ind = (dynamic_index_in_dim(x, i, 0, False) for x in idxs)
start_indices = subvals([0] * dst.ndim, zip(axes, dst_ind))
update = add(vals, dynamic_slice(dst, start_indices, slice_sizes))
dst = dynamic_update_slice(dst, update, start_indices)
return src, dst, idxs
init_val = src, dst, idxs
_, dst, _ = fori_loop(0, n, body_fun, init_val)
return dst
def transpose(operand, permutation):
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def reduce(operand, init_value, computation, dimensions):
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, init_value)
return reduce_p.bind(operand, init_value, computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
def _reduction_jaxpr(computation, init_value):
pval = _abstractify(init_value)
jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(computation, (pval, pval))
return jaxpr, consts
def _get_monoid_reducer(monoid_op, x):
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_min
elif monoid_op is bitwise_or and aval.dtype == onp.bool_:
return aval.val == _get_max_identity(aval.dtype) and _reduce_or
elif monoid_op is bitwise_and and aval.dtype == onp.bool_:
return aval.val == _get_min_identity(aval.dtype) and _reduce_and
def _get_max_identity(dtype):
if onp.issubdtype(dtype, onp.floating):
return onp.array(-onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).min, dtype)
elif onp.issubdtype(dtype, onp.bool_):
return onp.array(False, onp.bool_)
def _get_min_identity(dtype):
if onp.issubdtype(dtype, onp.floating):
return onp.array(onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).max, dtype)
elif onp.issubdtype(dtype, onp.bool_):
return onp.array(True, onp.bool_)
def _reduce_sum(operand, axes):
return reduce_sum_p.bind(operand, axes=tuple(axes), input_shape=operand.shape)
def _reduce_max(operand, axes):
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand, axes):
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand, axes):
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand, axes):
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand, init_value, computation, window_dimensions,
window_strides, padding):
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding)
else:
jaxpr, consts = _reduction_jaxpr(computation, init_value)
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _get_monoid_window_reducer(monoid_op, x):
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
def _reduce_window_sum(operand, window_dimensions, window_strides, padding):
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding,
input_shape=operand.shape)
def _reduce_window_max(operand, window_dimensions, window_strides, padding):
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_min(operand, window_dimensions, window_strides, padding):
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter(operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter):
select_jaxpr, select_consts = _reduction_jaxpr(select)
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter)
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter_add(source, operand, select_prim, window_dimensions,
window_strides, padding):
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_gather_add(tangents, operand, select_prim, window_dimensions,
window_strides, padding):
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def sort(operand, dimension=-1):
return sort_p.bind(operand, dimension=dimension)
def sort_key_val(keys, values, dimension=-1):
# TODO new sort_key_val is variadic
result = sort_key_val_p.bind(keys, values, dimension=dimension)
sorted_keys, sorted_values = result
return sorted_keys, sorted_values
def _while_loop(cond_fun, body_fun, init_val):
init_val_flat, in_tree = tree_to_jaxtuples(init_val)
flat_body_fun, out_tree = flatten_fun(lu.wrap_init(body_fun), (in_tree,))
flat_cond_fun, _ = flatten_fun(lu.wrap_init(cond_fun), (in_tree,))
pval_flat = _abstractify(init_val_flat)
cond_jaxpr, _, cond_consts = pe.trace_to_jaxpr(flat_cond_fun, (pval_flat,))
body_jaxpr, pvout, body_consts = pe.trace_to_jaxpr(flat_body_fun, (pval_flat,))
abs_out, _ = pvout
params = OpaqueParam((abs_out, cond_jaxpr, cond_consts, body_jaxpr, body_consts))
out_flat = while_p.bind(init_val_flat, opaque_params=params)
if out_tree() != in_tree:
raise TypeError("body_fun input and output must have identical structure")
return build_tree(out_tree(), out_flat)
class OpaqueParam(object):
__slots__ = ["val", "id"]
def __init__(self, val):
self.val = val
self.id = next(opaque_param_ids)
def __hash__(self):
return self.id
opaque_param_ids = itertools.count()
### convenience wrappers around traceables
def conv(lhs, rhs, window_strides, padding):
pads = padtype_to_pads(lhs.shape[2:], rhs.shape[2:], window_strides, padding)
return conv_general_dilated(lhs, rhs, window_strides, padding)
def conv_with_general_padding(lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation):
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation)
def full_like(x, fill_value, dtype=None, shape=None):
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
shape = onp.shape(x) if shape is None else shape
return broadcast(onp.array(fill_value, dtype or _dtype(x)), shape)
def collapse(operand, start_dimension, stop_dimension):
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand, start_index, limit_index, stride=1, axis=0):
"""Convenience wrapper around slice applying to only one dimension."""
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
start_indices[axis] = start_index
limit_indices[axis] = limit_index
strides[axis] = stride
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand, index, axis=0, keepdims=True):
"""Convenience wrapper around slice to perform int indexing."""
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0):
"""Convenience wrapper around dynamic_slice applying to one dimension."""
start_indices = [onp.array([0])] * operand.ndim
slice_sizes = list(operand.shape)
start_indices[axis] = reshape(rem(start_index, operand.shape[axis]), [1])
slice_sizes[axis] = slice_size
start_indices = concatenate(start_indices, 0)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand, index, axis=0, keepdims=True):
"""Convenience wrapper around dynamic_slice to perform int indexing."""
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_update_slice_in_dim(operand, update, start_index, axis):
start_indices = [0] * _ndim(operand)
start_indices[axis] = start_index % operand.shape[axis]
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand, update, index, axis):
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
ax = axis % _ndim(operand)
update = reshape(update, operand.shape[:ax] + (1,) + operand.shape[ax:])
return dynamic_update_slice_in_dim(operand, update, index, axis)
def fori_loop(lower, upper, body_fun, init_val):
"""Loop from `lower` to `upper` by reduction to `while_loop`.
Arguments:
lower: loop index lower bound (inclusive)
upper: loop index upper bound (exclusive)
body_fun: function of type (int, T) -> T, where T is the type of `init_val`
init_val: initial loop value, of type T
Returns:
Loop value from the final iteration, of type T.
"""
# state: (upper limit, index, loop value)
# The `lt` and `add` functions are added to the namespace programmatically.
_, _, result = _while_loop(
lambda upper_i_x: lt(upper_i_x[1], upper_i_x[0]),
lambda upper_i_x: (upper_i_x[0], add(upper_i_x[1], 1),
body_fun(upper_i_x[1], upper_i_x[2])),
(upper, lower, init_val))
return result
def foreach_loop(sequence, body_fun, init_val):
"""Loop over `sequence` by reduction to `while_loop`.
Arguments:
sequence: tuple of loop items, each of type U
body_fun: function of type (U, T) -> T, where T is the type of `init_val`
init_val: initial loop value, of type T
Returns:
Loop value from the final iteration, of type T.
"""
_, result = fori_loop(
0, len(sequence),
lambda i, seq_val: body_fun(seq_val[0][i], seq_val[1]),
(sequence, init_val))
return result
def batch_matmul(lhs, rhs):
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, [(lhs_contract, rhs_contract), (batch, batch)])
# These trig functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def sqrt(x):
return pow(x, _const(x, 0.5))
def rsqrt(x):
return pow(x, _const(x, -0.5))
def square(x):
return mul(x, x)
def reciprocal(x):
return div(_const(x, 1), x)
def tan(x):
return div(sin(x), cos(x))
def asin(x):
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
def acos(x):
return mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x)))
def atan(x):
return atan2(x, _const(x, 1))
def sinh(x):
return mul(_const(x, 0.5), sub(exp(x), exp(neg(x))))
def cosh(x):
return mul(_const(x, 0.5), add(exp(x), exp(neg(x))))
def asinh(x):
# asinh(x) = log(x + sqrt(x**2 + 1))
return log(add(x, sqrt(add(mul(x, x), _const(x, 1)))))
def acosh(x):
# acosh(x) = log(x + sqrt((x + 1) * (x - 1)))
return log(add(x, mul(sqrt(add(x, _const(x, 1))),
sqrt(sub(x, _const(x, 1))))))
def atanh(x):
# atanh(x) = 0.5 * log((1 + x) / (1 - x))
return mul(_const(x, 0.5), log(div(add(_const(x, 1), x),
sub(_const(x, 1), x))))
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = tracer.shape[0]
return (index_in_dim(tracer, i, keepdims=False) for i in xrange(n))
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
dtype = xla_bridge.canonicalize_dtype(_dtype(x))
return onp.broadcast_to(onp.zeros((), dtype), onp.shape(x))
for t in itertools.chain(array_types, [xla.DeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[t] = zeros_like_array
batching.pytype_aval_mappings[xla.DeviceArray] = make_shaped_array
### primitives
_input_dtype = lambda *args, **_: xla_bridge.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: xla_bridge.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: onp.abs(onp.zeros((), dtype)).dtype
def identity(x): return x
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is ShapedArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is UnshapedArray:
return UnshapedArray(dtype_rule(*args, **kwargs))
else:
raise TypeError(args, least_specialized)
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(c, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(onp.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(onp.dtype(aval.dtype).name)
accepted_typenames = (str(onp.dtype(t).name) for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name)
batching.defvectorized(prim)
return prim
standard_unop = partial(unop, identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def binop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(onp.issubdtype(aval_dtype, t) for t in types):
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(onp.dtype(aval_dtype).name)
typenames = ', '.join(str(onp.dtype(t).name) for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def broadcasting_shape_rule(name, *avals):
shapes = onp.array([aval.shape for aval in avals if aval.shape])
if not shapes.size:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
min_shape = onp.min(shapes, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(min_shape == 0, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return tuple(result_shape)
def binop(result_dtype, accepted_dtypes, name):
dtype_rule = partial(binop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name)
batching.defbroadcasting(prim)
return prim
standard_binop = partial(binop, _input_dtype)
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just
# a broadcast). but saving the shape info with the primitives isn't great either
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# used in jvprules to make binop broadcasting explicit for transposability.
# requires shape info during jvp tracing, which isn't strictly necessary.
shapes = list(filter(None, map(onp.shape, (x,) + others)))
shape = tuple(shapes and onp.max(shapes, axis=0))
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = onp.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = onp.where(onp.equal(x_shape, shape))
squeezed_dimensions, = onp.where(onp.not_equal(x_shape, shape))
inshape = onp.delete(x_shape, squeezed_dimensions)
return broadcast_in_dim(reshape(x, inshape), shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_f32 = {onp.float32}
_float = {onp.floating}
_complex = {onp.complex64}
_int = {onp.integer}
_bool = {onp.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
batching.defvectorized(neg_p)
sign_p = standard_unop(_num, 'sign')
ad.defjvp_zero(sign_p)
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(onp.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp(tanh_p, lambda g, x: div(g, pow(cosh(x), _two(x))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_binop([_float, _float], 'atan2')
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, onp.sqrt(onp.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_fixed_dtype(onp.float32), _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), onp.float32))])
imag_p = unop(_fixed_dtype(onp.float32), _complex, 'imag')
ad.deflinear(imag_p, lambda t: [complex(onp.zeros((), onp.float32), t)])
complex_p = binop(_fixed_dtype(onp.complex64), [_f32, _f32], 'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(t)])
conj_p = unop(_fixed_dtype(onp.complex64), _float | _complex, 'conj')
def conj_transpose_rule(t, x, input_dtype):
assert x is None
if onp.issubdtype(input_dtype, onp.complexfloating):
return [conj(t)]
else:
return [real(t)]
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
ad.defjvp2(abs_p,
lambda g, ans, x:
div(_maybe_real(mul(g, _maybe_conj(x))), _replace_zero(ans)))
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
# TODO handle broadcasting
pow_p = standard_binop([_float | _complex, _float | _complex], 'pow')
ad.defjvp(pow_p,
lambda g, x, y: mul(_brcast(g, y), mul(y, pow(x, select(
eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))),
lambda g, x, y: mul(_brcast(g, x),
mul(log(_replace_zero(x)), pow(x, y))))
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_int | _bool, 'not')
and_p = standard_binop([_any, _any], 'and')
ad.defjvp_zero(and_p)
or_p = standard_binop([_any, _any], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_binop([_any, _any], 'xor')
ad.defjvp_zero(xor_p)
def add_transpose(t, x, y):
assert x is None and y is None # computation must be linear, not affine
return [t, t]
add_p = standard_binop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = add_transpose
sub_p = standard_binop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
mul_p = standard_binop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul) # TODO
def div_transpose_rule(cotangent, x, y):
assert x is None and y is not None
res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)
return res, None
div_p = standard_binop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: div(mul(neg(_brcast(g, x)), x), pow(y, _two(y))))
ad.primitive_transposes[div_p] = div_transpose_rule
rem_p = standard_binop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(neg(g), floor(div(x, y))))
max_p = standard_binop([_any, _any], 'max')
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p = standard_binop([_any, _any], 'min')
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_binop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_binop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_binop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def convert_element_type_shape_rule(operand, new_dtype, old_dtype):
return operand.shape
def convert_element_type_dtype_rule(operand, new_dtype, old_dtype):
return new_dtype
def convert_element_type_translation_rule(c, operand, new_dtype, old_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return c.ConvertElementType(operand, new_element_type=new_etype)
convert_element_type_p = standard_primitive(
convert_element_type_shape_rule, convert_element_type_dtype_rule,
'convert_element_type', convert_element_type_translation_rule)
ad.deflinear(
convert_element_type_p,
lambda t, new_dtype, old_dtype: [convert_element_type(t, old_dtype)])
batching.defvectorized(convert_element_type_p)
def bitcast_convert_type_shape_rule(operand, new_dtype):
return operand.shape
def bitcast_convert_type_dtype_rule(operand, new_dtype):
return new_dtype
def bitcast_convert_type_translation_rule(c, operand, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return c.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
bitcast_convert_type_shape_rule, bitcast_convert_type_dtype_rule,
'bitcast_convert_type', bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
def conv_general_dilated_shape_rule(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def conv_general_dilated_dtype_rule(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return binop_dtype_rule(_input_dtype, [_f32, _f32], 'conv_general_dilated',
lhs, rhs)
_conv_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
def conv_general_dilated_transpose_lhs(
g, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, lhs_shape, rhs_shape):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_transpose(rhs_spec)
trans_dimension_numbers = ConvDimensionNumbers(lhs_spec, t_rhs_spec, out_spec)
padding = _conv_general_vjp_lhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
return conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers)
def conv_general_dilated_transpose_rhs(
g, lhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, lhs_shape, rhs_shape):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_transpose, dimension_numbers)
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers)
def conv_general_dilated_translation_rule(
c, lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
return c.ConvGeneralDilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers)
conv_general_dilated_p = standard_primitive(
conv_general_dilated_shape_rule, conv_general_dilated_dtype_rule,
'conv_general_dilated', conv_general_dilated_translation_rule)
ad.defbilinear(conv_general_dilated_p,
conv_general_dilated_transpose_lhs,
conv_general_dilated_transpose_rhs)
def dot_shape_rule(lhs, rhs):
if lhs.ndim == 0 or rhs.ndim == 0:
msg = "Dot only supports rank 1 or above, got shapes {} and {}."
raise TypeError(msg.format(lhs.shape, rhs.shape))
if lhs.ndim > 2 or rhs.ndim > 2:
msg = "Dot only supports rank 2 or less, got shapes {} and {}."
raise TypeError(msg.format(lhs.shape, rhs.shape))
def require(shape_cond):
if not shape_cond:
msg = "Incompatible shapes for dot: got {} and {}."
raise TypeError(msg.format(lhs.shape, rhs.shape))
if lhs.ndim == rhs.ndim == 1:
require(lhs.shape == rhs.shape)
return ()
elif lhs.ndim == rhs.ndim == 2:
require(lhs.shape[1] == rhs.shape[0])
return (lhs.shape[0], rhs.shape[1])
elif rhs.ndim == 1:
require(lhs.shape[-1] == rhs.shape[0])
return lhs.shape[:-1]
else:
require(lhs.shape[-1] == rhs.shape[-2])
return lhs.shape[:-1] + rhs.shape[:-2] + rhs.shape[-1:]
def dot_transpose_lhs(t, rhs):
if onp.ndim(t) == onp.ndim(rhs) == 2:
return dot(t, transpose(rhs, (1, 0)))
elif onp.ndim(t) == 1 and onp.ndim(rhs) == 2:
return dot(rhs, t)
elif onp.ndim(t) == onp.ndim(rhs) == 1:
return _outer(t, rhs)
elif onp.ndim(t) == 0 or onp.ndim(rhs) == 0:
return mul(t, rhs)
else:
raise TypeError
def dot_transpose_rhs(t, lhs):
if onp.ndim(lhs) == onp.ndim(t) == 2:
return dot(transpose(lhs, (1, 0)), t)
elif onp.ndim(lhs) == 2 and onp.ndim(t) == 1:
return dot(t, lhs)
elif onp.ndim(t) == onp.ndim(lhs) == 1:
return _outer(lhs, t)
elif onp.ndim(t) == 0 or onp.ndim(lhs) == 0:
return mul(t, lhs)
else:
raise TypeError
def _outer(x, y):
assert onp.ndim(x) == onp.ndim(y) == 1
return mul(reshape(x, (x.shape[0], 1)), reshape(y, (1, y.shape[0])))
def dot_batch_rule(batched_args, batch_dims):
lhs, rhs = batched_args
lbd, rbd = batch_dims
T = lambda x: transpose(x, onp.arange(onp.ndim(x))[::-1])
# in some cases, we can call dot instead of dot_general
if max(onp.ndim(lhs), onp.ndim(rhs)) <= 2:
if rbd is None:
assert lbd in (0, 1)
if lbd == 0:
return dot(lhs, rhs), 0
else:
return dot(T(rhs), lhs), 1
if lbd is None:
assert rbd in (0, 1)
if rbd == onp.ndim(rhs) - 1:
return dot(lhs, rhs), 1
else:
return dot(rhs, T(lhs)), 0
assert lbd is not None and rbd is not None
assert lhs.ndim == rhs.ndim == 2 # dot only supports rank 1 and above
if lbd != 0:
batching.move_dim_to_front(lhs, lbd)
if rbd != 0:
batching.move_dim_to_front(rhs, rbd)
return dot_general(lhs, rhs, [((1,), (1,)), ((0,), (0,))])
if lbd is None:
assert rbd is not None
lhs = broadcast(lhs, (rhs.shape[rbd],))
else:
lhs = batching.move_dim_to_front(lhs, lbd)
lhs_batch = (0,)
lhs_contracting = (onp.ndim(lhs) - 1,)
if rbd is None:
assert lbd is not None
rhs = broadcast(rhs, (lhs.shape[lbd],))
else:
rhs = batching.move_dim_to_front(rhs, rbd)
rhs_batch = (0,)
rhs_contracting = (onp.arange(1, onp.ndim(rhs))[-2:][0],)
dim_nums = [(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch)]
return dot_general(lhs, rhs, dim_nums), 0
dot_dtype_rule = partial(binop_dtype_rule, _input_dtype, [_num, _num], 'dot')
dot_p = standard_primitive(dot_shape_rule, dot_dtype_rule, 'dot')
ad.defbilinear(dot_p, dot_transpose_lhs, dot_transpose_rhs)
batching.primitive_batchers[dot_p] = dot_batch_rule
def dot_general_shape_rule(lhs, rhs, dimension_numbers):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = ("dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = ("dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}.")
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = ("dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}.")
raise TypeError(msg.format(rhs_batch))
if not len(lhs_contracting) == len(rhs_contracting) == 1:
msg = ("dot_general accepts exactly one lhs_contracting and "
"rhs_contracting dimension, got {} and {}.")
raise TypeError(msg.format(lhs_contracting, rhs_contracting))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
if lhs.ndim > len(lhs_batch) + len(lhs_contracting) + 1:
msg = ("dot_general requires either one or zero non-batch non-contracting "
"lhs dimension, got {}.")
diff = lhs.ndim - len(lhs_batch) - len(lhs_contracting)
raise TypeError(msg.format(diff))
if rhs.ndim > len(rhs_batch) + len(rhs_contracting) + 1:
msg = ("dot_general requires either one or zero non-batch non-contracting "
"rhs dimension, got {}.")
diff = rhs.ndim - len(rhs_batch) - len(rhs_contracting)
raise TypeError(msg.format(diff))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def dot_general_dtype_rule(lhs, rhs, dimension_numbers):
return binop_dtype_rule(_input_dtype, [_num, _num], 'dot_general', lhs, rhs)
def dot_general_transpose_lhs(g, y, dimension_numbers, swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(onp.take(x_contract, onp.argsort(y_contract)))
out_axes = onp.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims), tuple(out_axes))
def dot_general_transpose_rhs(g, x, dimension_numbers):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return dot_general_transpose_lhs(g, x, swapped_dimension_numbers, True)
def dot_general_batch_rule(batched_args, batch_dims, dimension_numbers):
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
if lbd is not None:
if lbd != 0:
lhs = batching.move_dim_to_front(lhs, lbd)
lbd = 0
else:
assert rbd is not None
lhs = broadcast(lhs, (rhs.shape[rbd],))
lhs_contract = tuple(onp.add(1, lhs_contract))
lhs_batch = (0,) + tuple(onp.add(1, lhs_batch))
if rbd is not None:
if rbd != 0:
rhs = batching.move_dim_to_front(rhs, rbd)
rbd = 0
else:
assert lbd is not None
rhs = broadcast(rhs, (lhs.shape[lbd],))
rhs_contract = tuple(onp.add(1, rhs_contract))
rhs_batch = (0,) + tuple(onp.add(1, rhs_batch))
new_dimension_numbers = [(lhs_contract, rhs_contract), (lhs_batch, rhs_batch)]
batched_out = dot_general(lhs, rhs, new_dimension_numbers)
return batched_out, 0
dot_general_p = standard_primitive(dot_general_shape_rule,
dot_general_dtype_rule, 'dot_general')
ad.defbilinear(dot_general_p,
dot_general_transpose_lhs, dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = dot_general_batch_rule
def broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def broadcast_batch_rule(batched_args, batch_dims, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = broadcast_batch_rule
def broadcast_in_dim_shape_rule(operand, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
if operand.ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim, got broadcast_dimensions for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand.ndim))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand.ndim, shape))
return shape
def broadcast_in_dim_transpose_rule(t, shape, broadcast_dimensions):
axes = tuple(onp.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def broadcast_in_dim_batch_rule(batched_args, batch_dims, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_shape = list(shape)
new_shape.insert(bdim, operand.shape[bdim])
new_broadcast_dimensions = [d if d < bdim else d + 1 for d in broadcast_dimensions]
new_broadcast_dimensions.insert(bdim, bdim)
return broadcast_in_dim(operand, new_shape, new_broadcast_dimensions), bdim
broadcast_in_dim_p = standard_primitive(
broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
ad.deflinear(broadcast_in_dim_p, broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = broadcast_in_dim_batch_rule
def clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
clamp_dtype_rule = partial(binop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(clamp_shape_rule, clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
def concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len(set(operand.ndim for operand in operands)) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
shapes = onp.array([operand.shape for operand in operands])
if not 0 <= dimension < shapes.shape[1]:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
if not onp.all(onp.delete(shapes[0] == shapes, dimension, axis=1)):
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: dimension {} for shapes {}.")
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return c.Concatenate(operands, dimension=dimension)
def concatenate_transpose_rule(t, *operands, **kwargs):
dimension = kwargs.pop('dimension')
operand_shapes = kwargs.pop('operand_shapes')
if t is ad_util.zero:
return [ad_util.zero if o is None else None for o in operands]
else:
limit_points = onp.cumsum([shape[dimension] for shape in operand_shapes])
starts = onp.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = onp.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if o is None else None
for o, start, limit in zip(operands, starts, limits)]
def concatenate_batch_rule(batched_args, batch_dims, dimension, operand_shapes):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.move_dim_to_front(op, bdim) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
concatenate_p = standard_primitive(
concatenate_shape_rule, concatenate_dtype_rule, 'concatenate',
concatenate_translation_rule)
ad.deflinear(concatenate_p, concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = concatenate_batch_rule
def pad_shape_rule(operand, padding_value, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
lo, hi, interior = zip(*padding_config)
out_shape = onp.add(onp.add(onp.add(lo, hi), operand.shape),
onp.multiply(interior, onp.subtract(operand.shape, 1)))
return tuple(out_shape)
def pad_transpose(t, operand, padding_value, padding_config):
lo, hi, interior = zip(*padding_config)
if onp.any(onp.less(lo, 0)) or onp.any(onp.less(hi, 0)):
msg = "pad transpose not implemented for negative padding, got {}."
raise NotImplementedError(msg.format(padding_config))
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
t_op = lambda: slice(t, lo, onp.subtract(t.shape, hi), onp.add(interior, 1))
t_operand = t_op() if operand is None else None
if padding_value is None:
t_operand = t_op() if t_operand is None else t_operand
t_padv = sub(total(t), total(t_operand))
else:
t_padv = None
return [t_operand, t_padv]
def pad_batch_rule(batched_args, batch_dims, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
pad_p = standard_primitive(pad_shape_rule, _input_dtype, 'pad')
ad.deflinear(pad_p, pad_transpose)
ad.primitive_transposes[pad_p] = pad_transpose
batching.primitive_batchers[pad_p] = pad_batch_rule
def reshape_shape_rule(operand, new_sizes, dimensions, **unused_kwargs):
if not onp.all(onp.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(onp.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, onp.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(onp.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, onp.shape(operand)))
return tuple(new_sizes)
def reshape_dtype_rule(operand, new_sizes, dimensions, **unused_kwargs):
return operand.dtype
def reshape_translation_rule(c, operand, new_sizes, dimensions, old_sizes):
del old_sizes # Unused.
return c.Reshape(operand, new_sizes=new_sizes, dimensions=dimensions)
def reshape_transpose_rule(t, new_sizes, dimensions, old_sizes):
out = reshape(t, old_sizes)
if dimensions is None:
return [out]
else:
return [transpose(out, onp.argsort(dimensions))]
def reshape_batch_rule(batched_args, batch_dims, new_sizes, dimensions, **unused):
operand, = batched_args
bdim, = batch_dims
operand = batching.move_dim_to_front(operand, bdim)
if dimensions is not None:
raise NotImplementedError # TODO(mattjj): handle reshape w/ dimensions
dimensions = (0,) + tuple(onp.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
reshape_p = standard_primitive(reshape_shape_rule, reshape_dtype_rule,
'reshape', reshape_translation_rule)
ad.deflinear(reshape_p, reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = reshape_batch_rule
def rev_shape_rule(operand, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
rev_p = standard_primitive(rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
def transpose_shape_rule(operand, permutation):
if not isinstance(permutation, (tuple, list, onp.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(onp.take(operand.shape, permutation))
def transpose_batch_rule(batched_args, batch_dims, permutation):
operand, = batched_args
bdim, = batch_dims
perm = tuple(onp.insert(onp.add(permutation, 1), bdim, 0))
return transpose(operand, perm), 0
transpose_p = standard_primitive(transpose_shape_rule, _input_dtype,
'transpose')
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, onp.argsort(permutation))])
batching.primitive_batchers[transpose_p] = transpose_batch_rule
def select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not onp.issubdtype(pred.dtype, onp.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def select_transpose_rule(t, pred, on_true, on_false):
return [None,
select(pred, t, _zeros(on_false)) if on_true is None else None,
select(pred, _zeros(on_true), t) if on_false is None else None]
def select_batch_rule(batched_args, batch_dims, **unused_kwargs):
oprand, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
if (ot_bdim not in {None, pred_bdim}) or (of_bdim not in {None, pred_bdim}):
raise NotImplementedError # TODO(schsam, mattjj): Handle more cases.
# TODO(schsam, mattjj): Switch to using broadcast_in_dim.
ot = _ones(oprand) * on_true
of = _ones(oprand) * on_false
return select(oprand, ot, of), pred_bdim
select_p = standard_primitive(select_shape_rule, select_dtype_rule, 'select')
ad.defjvp(select_p,
None,
lambda g, b, x, y: select(b, g, _zeros(g)),
lambda g, b, x, y: select(b, _zeros(g), g))
ad.primitive_transposes[select_p] = select_transpose_rule
batching.primitive_batchers[select_p] = select_batch_rule
def slice_shape_rule(operand, start_indices, limit_indices, strides,
operand_shape):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if not onp.all(onp.less_equal(limit_indices, operand.shape)):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not onp.all(onp.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if not onp.all(onp.greater_equal(limit_indices, start_indices)):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = onp.ones(operand.ndim, onp.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not onp.all(onp.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
result_shape = onp.floor_divide(
onp.add(onp.subtract(limit_indices, start_indices), strides) - 1, strides)
return tuple(result_shape)
def slice_translation_rule(c, operand, start_indices, limit_indices, strides,
operand_shape):
return c.Slice(operand, start_indices, limit_indices, strides)
def slice_transpose_rule(t, start_indices, limit_indices, strides,
operand_shape):
if strides is None or onp.all(onp.equal(strides, 1)):
pads = zip(start_indices, onp.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = onp.add(onp.add(start_indices, 1),
onp.multiply(onp.subtract(t.shape, 1), strides))
pads = zip(start_indices, onp.subtract(operand_shape, real_limits),
onp.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape
return [result]
def slice_batching_rule(batched_args, batch_dims, start_indices, limit_indices,
strides, **unused_kwargs):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
slice_p = standard_primitive(slice_shape_rule, _input_dtype, 'slice',
slice_translation_rule)
ad.deflinear(slice_p, slice_transpose_rule)
batching.primitive_batchers[slice_p] = slice_batching_rule
def dynamic_slice_shape_rule(operand, start_indices, slice_sizes,
operand_shape):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not onp.all(onp.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not onp.all(onp.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def dynamic_slice_translation_rule(c, operand, start_indices, slice_sizes,
operand_shape):
return c.DynamicSlice(operand, start_indices, slice_sizes)
def dynamic_slice_jvp_rule(g, operand, start_indices, slice_sizes,
operand_shape):
return dynamic_slice(g, start_indices, slice_sizes)
def dynamic_slice_transpose_rule(t, operand, start_indices, slice_sizes,
operand_shape):
assert operand is None
zeros = broadcast(_const(t, 0), operand_shape)
return [dynamic_update_slice(zeros, t, start_indices), ad_util.zero]
dynamic_slice_p = standard_primitive(
dynamic_slice_shape_rule, _input_dtype, 'dynamic_slice',
dynamic_slice_translation_rule)
ad.defjvp(dynamic_slice_p, dynamic_slice_jvp_rule, None)
ad.primitive_transposes[dynamic_slice_p] = dynamic_slice_transpose_rule
def dynamic_update_slice_shape_rule(operand, update, start_indices,
update_shape):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not onp.all(onp.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def dynamic_update_slice_dtype_rule(operand, update, start_indices,
update_shape):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
return operand.dtype
def dynamic_update_slice_jvp(primals, tangents, update_shape):
operand, update, start_indices = primals
g_operand, g_update, g_start_indices = tangents
assert g_start_indices is ad_util.zero
val_out = dynamic_update_slice(operand, update, start_indices)
if g_operand is ad_util.zero and g_update is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_update = ad.instantiate_zeros(update, g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def dynamic_update_slice_transpose_rule(t, operand, update, start_indices,
update_shape):
assert start_indices is not None
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if operand is None else None
update_t = ds(t, start_indices, update_shape) if update is None else None
return [operand_t, update_t, None]
def dynamic_update_slice_translation_rule(c, operand, update, start_indices,
update_shape):
return c.DynamicUpdateSlice(operand, update, start_indices)
dynamic_update_slice_p = standard_primitive(
dynamic_update_slice_shape_rule, dynamic_update_slice_dtype_rule,
'dynamic_update_slice', dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
dynamic_update_slice_transpose_rule
def index_take_shape_rule(src, *idxs, **kwargs):
axes = kwargs['axes']
return (idxs[0].shape[0],) + tuple(onp.delete(src.shape, axes))
def index_take_translation_rule(c, src, *idxs, **kwargs):
jaxpr = kwargs['jaxpr']
consts = kwargs['consts']
shapes = map(c.GetShape, (src,) + idxs)
xla_computation = xla.jaxpr_computation(jaxpr, consts, (), *shapes)
return c.Call(xla_computation, (src,) + idxs)
def index_take_jvp(primals, tangents, axes, input_shape, jaxpr, consts):
src = primals[0]
idxs = tuple(primals[1:])
g = ad.instantiate_zeros(src, tangents[0])
return index_take(src, idxs, axes), index_take(g, idxs, axes)
def index_take_transpose_rule(t, src, *idxs, **kwargs):
assert src is None
axes = kwargs['axes']
input_shape = kwargs['input_shape']
t_src = index_untake(t, _zeros(t, shape=input_shape), idxs, axes)
return [t_src] + [None] * len(idxs)
index_take_p = standard_primitive(index_take_shape_rule, _input_dtype,
'index_take', index_take_translation_rule)
ad.primitive_jvps[index_take_p] = index_take_jvp
ad.primitive_transposes[index_take_p] = index_take_transpose_rule
def index_untake_shape_rule(src, dst, *idxs, **kwargs):
return dst.shape
def index_untake_translation_rule(c, src, dst, *idxs, **kwargs):
jaxpr = kwargs['jaxpr']
consts = kwargs['consts']
shapes = map(c.GetShape, (src, dst) + idxs)
xla_computation = xla.jaxpr_computation(jaxpr, consts, (), *shapes)
return c.Call(xla_computation, (src, dst) + idxs)
def index_untake_jvp(primals, tangents, axes, jaxpr, consts):
src, dst = primals[0], primals[1]
idxs = tuple(primals[2:])
g_src, g_dst = tangents[0], tangents[1]
g_src = ad.instantiate_zeros(src, g_src)
g_dst = ad.instantiate_zeros(dst, g_dst)
val_out = index_untake(src, dst, idxs, axes)
tangent_out = index_untake(g_src, g_dst, idxs, axes)
return val_out, tangent_out
def index_untake_transpose_rule(t, src, dst, *idxs, **kwargs):
axes = kwargs['axes']
t_src = t_dst = None
if src is None:
t_src = index_take(t, idxs, axes)
if dst is None:
t_dst = t
return [t_src, t_dst] + [None] * len(idxs)
index_untake_p = standard_primitive(
index_untake_shape_rule, _input_dtype, 'index_untake',
index_untake_translation_rule)
ad.primitive_jvps[index_untake_p] = index_untake_jvp
ad.primitive_transposes[index_untake_p] = index_untake_transpose_rule
def reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):
return tuple(onp.delete(operand.shape, dimensions))
def reduce_translation_rule(c, operand, init_value, computation, jaxpr, consts, dimensions):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return c.Reduce(operand, init_value, xla_computation, dimensions)
def reduce_batch_rule(batched_args, batch_dims, computation, jaxpr, consts, dimensions):
operand, init_value = batched_args
operand_bdim, init_value_bdim = batch_dims
if init_value_bdim is None:
assert operand_bdim is not None
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - onp.sum(onp.less(dimensions, operand_bdim))
return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_value):
shape = c.GetShape(init_value)
return xla.jaxpr_computation(jaxpr, consts, (), shape, shape)
reduce_p = standard_primitive(reduce_shape_rule, _input_dtype, 'reduce',
reduce_translation_rule)
# batching.primitive_batchers[reduce_p] = reduce_batch_rule # TODO(mattjj): test
def reduce_sum_shape_rule(operand, axes, input_shape):
assert operand.shape == input_shape, ('{} != {}'
.format(operand.shape, input_shape))
return tuple(onp.delete(operand.shape, axes))
def reduce_sum_translation_rule(c, operand, axes, input_shape):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_bridge.Shape.array_shape(dtype, ())
return c.Reduce(operand, c.Constant(onp.array(0, dtype)),
xla.primitive_computation(add_p, scalar, scalar),
axes)
def reduce_sum_transpose_rule(cotangent, input_shape, axes):
broadcast_dimensions = tuple(onp.delete(onp.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(reduce_sum_shape_rule, _input_dtype,
'reduce_sum', reduce_sum_translation_rule)
ad.deflinear(reduce_sum_p, reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
def reduce_chooser_shape_rule(operand, axes):
return tuple(onp.delete(operand.shape, axes))
def reduce_chooser_translation_rule(prim, identity, c, operand, axes):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_bridge.Shape.array_shape(dtype, ())
return c.Reduce(operand, c.Constant(identity(dtype)),
xla.primitive_computation(prim, scalar, scalar), axes)
def reduce_chooser_jvp_rule(g, ans, operand, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
reduce_max_translation_rule = partial(reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(reduce_chooser_shape_rule, _input_dtype,
'reduce_max', reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
reduce_min_translation_rule = partial(
reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(reduce_chooser_shape_rule, _input_dtype,
'reduce_min', reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
def reduce_logical_shape_rule(operand, axes):
if operand.dtype != onp.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(onp.delete(operand.shape, axes))
def reduce_logical_translation_rule(prim, identity, c, operand, axes):
scalar = xla_bridge.Shape.array_shape(onp.bool_, ())
return c.Reduce(operand, c.Constant(identity(onp.bool_)),
xla.primitive_computation(prim, scalar, scalar), axes)
reduce_or_translation_rule = partial(reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_or', reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
reduce_and_translation_rule = partial(reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_and', reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def reduce_window_shape_rule(operand, init_value, jaxpr, consts,
window_dimensions, window_strides, padding):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
return common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def reduce_window_translation_rule(c, operand, init_value, jaxpr, consts,
window_dimensions, window_strides, padding):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return c.ReduceWindow(operand, init_value, xla_computation, window_dimensions,
window_strides, padding)
reduce_window_p = standard_primitive(
reduce_window_shape_rule, _input_dtype, 'reduce_window',
reduce_window_translation_rule)
def reduce_window_sum_shape_rule(operand, window_dimensions, window_strides,
padding, input_shape):
return common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def reduce_window_sum_translation_rule(c, operand, window_dimensions,
window_strides, padding, input_shape):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_bridge.Shape.array_shape(dtype, ())
return c.ReduceWindow(operand, c.Constant(onp.array(0, dtype)),
xla.primitive_computation(add_p, scalar, scalar),
window_dimensions, window_strides, padding)
def reduce_window_sum_transpose_rule(cotangent, window_dimensions,
window_strides, padding, input_shape):
in_pads = padtype_to_pads(input_shape, window_dimensions, window_strides,
padding)
ones = [1] * len(input_shape)
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, in_pads,
ones, ones)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, ones,
xla_bridge.get_xla_client().PaddingType.VALID)
assert result.shape == input_shape
return [result]
reduce_window_sum_p = standard_primitive(
reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
reduce_window_sum_translation_rule)
ad.deflinear(reduce_window_sum_p, reduce_window_sum_transpose_rule)
def reduce_window_chooser_translation_rule(
prim, identity, c, operand, window_dimensions, window_strides, padding):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_bridge.Shape.array_shape(dtype, ())
return c.ReduceWindow(operand, c.Constant(identity(dtype)),
xla.primitive_computation(prim, scalar, scalar),
window_dimensions, window_strides, padding)
def reduce_window_chooser_jvp_rule(prim, g, operand, window_dimensions,
window_strides, padding):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding)
def common_reduce_window_shape_rule(operand, window_dimensions, window_strides,
padding):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions)
_check_shapelike("reduce_window", "window_strides", window_strides)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding):
pads = padtype_to_pads(operand_shape, window_dimensions, window_strides, padding)
operand_padded = onp.add(operand_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(
onp.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
reduce_window_max_translation_rule = partial(
reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(reduce_window_chooser_jvp_rule, max_p))
reduce_window_min_translation_rule = partial(
reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(reduce_window_chooser_jvp_rule, min_p))
def select_and_scatter_shape_rule(
operand, source, init_value, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def select_and_scatter_translation(c, operand, source, init_value, select_jaxpr,
select_consts, scatter_jaxpr, scatter_consts,
window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
return c.SelectAndScatter(operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter)
select_and_scatter_p = standard_primitive(
select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
select_and_scatter_translation)
def select_and_scatter_add_shape_rule(
source, operand, select_prim, window_dimensions, window_strides, padding):
return operand.shape
def select_and_scatter_add_translation(
c, source, operand, select_prim, window_dimensions, window_strides,
padding):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_bridge.Shape.array_shape(dtype, ())
select = xla.primitive_computation(select_prim, scalar, scalar)
scatter = xla.primitive_computation(add_p, scalar, scalar)
zero = c.Constant(onp.array(0, dtype))
return c.SelectAndScatter(operand, select, window_dimensions, window_strides,
padding, source, zero, scatter)
def select_and_scatter_add_transpose(
t, source, operand, select_prim, window_dimensions, window_strides,
padding):
assert source is None and operand is not None
result = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [result, None]
select_and_scatter_add_p = standard_primitive(
select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
select_and_scatter_add_transpose
def select_and_gather_add_shape_rule(
tangents, operand, select_prim, window_dimensions, window_strides, padding):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def select_and_gather_add_translation(
c, tangents, operand, select_prim, window_dimensions, window_strides,
padding):
raise NotImplementedError("No efficient translation.")
def select_and_gather_add_transpose(
t, tangents, operand, select_prim, window_dimensions, window_strides,
padding):
assert tangents is None and operand is not None
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [result, None]
select_and_gather_add_p = standard_primitive(
select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
select_and_gather_add_translation)
ad.primitive_transposes[select_and_gather_add_p] = \
select_and_gather_add_transpose
sort_shape = lambda operand, dimension: operand.shape
def sort_jvp_rule(g, operand, dimension):
_, g_out = sort_key_val(operand, g, dimension)
return g_out
sort_p = standard_primitive(sort_shape, _input_dtype, 'sort')
ad.defjvp(sort_p, sort_jvp_rule)
def sort_key_val_abstract_eval(keys, values, dimension):
return core.AbstractTuple((keys, values))
def sort_key_val_impl(keys, values, dimension):
out = xla.apply_primitive(sort_key_val_p, keys, values, dimension=dimension)
sorted_keys, sorted_values = out
return core.pack((sorted_keys, sorted_values))
def sort_key_val_jvp(primals, tangents, dimension):
# NOTE(mattjj): this re-sorts three times, but if we had a variadic
# sort_key_val, or if we could apply a fixed permutation efficiently, we could
# implement this jvp rule with a single sort. The apply_permutation primitive
# would make the jvp (and corresponding transpose rule) faster and easier.
# This would also be cleaner if we didn't get the sorted keys out.
# TODO(mattjj): make sort_key_val variadic, no sorted keys out by default
keys, values = primals
keys_tangents, values_tangents = tangents
val_out = sort_key_val(keys, values, dimension)
if keys_tangents is ad_util.zero:
keys_tangents_out = ad_util.zero
else:
keys_tangents_out = sort_jvp_rule(keys_tangents, keys, dimension)
if values_tangents is ad_util.zero:
values_tangents_out = ad_util.zero
else:
values_tangents_out = sort_jvp_rule(values_tangents, keys, dimension)
tangents_out = keys_tangents_out, values_tangents_out
return core.pack(val_out), ad.TangentTuple(tangents_out)
def sort_key_val_transpose_rule(t, keys, values, dimension):
t_keys, t_values = t
assert t_keys is ad_util.zero
broadcasted_iota = broadcast_in_dim(
onp.arange(keys.shape[dimension]), keys.shape, [dimension % keys.ndim])
_, perm = sort_key_val(keys, broadcasted_iota)
keys_result = ad_util.zero if keys is None else None
values_result = sort_key_val(perm, t_values)[1] if values is None else None
return [keys_result, values_result]
sort_key_val_p = Primitive('sort_key_val')
sort_key_val_p.def_impl(sort_key_val_impl)
sort_key_val_p.def_abstract_eval(sort_key_val_abstract_eval)
xla.translations[sort_key_val_p] = partial(standard_translate, 'sort_key_val')
ad.primitive_jvps[sort_key_val_p] = sort_key_val_jvp
ad.primitive_transposes[sort_key_val_p] = sort_key_val_transpose_rule
def while_loop_abstract_eval(init_val, opaque_params):
abs_out = opaque_params.val[0]
return maybe_tracer_tuple_to_abstract_tuple(abs_out)
def while_loop_translation_rule(c, init_val, opaque_params):
shape = c.GetShape(init_val)
abs_out, cond_jaxpr, cond_consts, body_jaxpr, body_consts = opaque_params.val
cond_computation = xla.jaxpr_computation(cond_jaxpr, cond_consts, (), shape)
body_computation = xla.jaxpr_computation(body_jaxpr, body_consts, (), shape)
return c.While(cond_computation, body_computation, init_val)
while_p = Primitive('while')
while_p.def_impl(partial(xla.apply_primitive, while_p))
while_p.def_abstract_eval(while_loop_abstract_eval)
xla.translations[while_p] = while_loop_translation_rule
### util
def _ndim(x):
return x.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not onp.all(onp.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return onp.multiply(dilation, onp.subtract(shape, 1)) + 1
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_bridge.get_xla_client().PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding))
if padding == PaddingType.SAME:
out_shape = onp.ceil(onp.true_divide(in_shape, window_strides)).astype(int)
pad_sizes = [_max((out_size - 1) * stride + window_shape - in_size, 0)
for out_size, stride, window_shape, in_size
in zip(out_shape, window_strides, window_shape, in_shape)]
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *dtypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
dtypes = list(map(onp.dtype, dtypes)) # canonicalize
if ignore_fp_precision:
dtypes = [
onp.floating if onp.issubdtype(dtype, onp.floating)
else onp.complexfloating if onp.issubdtype(dtype, onp.complexfloating)
else dtype for dtype in dtypes]
if len({xla_bridge.canonicalize_dtype(t) for t in dtypes}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, dtypes))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not onp.all(onp.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)
return tuple(out_shape)
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
if not onp.issubdtype(obj_arr.dtype, onp.integer):
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
def _dynamic_slice_indices(operand, start_indices):
if isinstance(start_indices, (tuple, list)):
start_indices = concatenate([reshape(i, [1]) for i in start_indices], 0)
return rem(start_indices, onp.array(operand.shape, start_indices.dtype))
_const = lambda example, val: onp.array(val, _dtype(example))
_zeros = partial(full_like, fill_value=0)
_zero = partial(full_like, shape=(), fill_value=0)
_ones = partial(full_like, fill_value=1)
_one = partial(full_like, shape=(), fill_value=1)
_twos = partial(full_like, fill_value=2)
_two = partial(full_like, shape=(), fill_value=2)
_dtype = onp.result_type
_iscomplex = lambda x: onp.issubdtype(_dtype(x), onp.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
blacklist = set(itertools.chain(*removed_lists))
return [i for i in original if i not in blacklist]
ConvDimensionNumbers = collections.namedtuple(
"ConvDimensionNumbers", ["lhs_spec", "rhs_spec", "out_spec"])
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):
"""Convert from user spec of dimension_numbers to ConvDimensionNumbers.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings, following the
convolution dimension number specification format in xla_client.py.
Returns:
A ConvDimensionNumbers namedtuple representing dimension_numbers in a
canonical form that is handled by internal lax functions.
"""
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exatly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_bridge.xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = onp.subtract(window_dimensions, [lo for lo, _ in padding]) - 1
pad_after = (onp.add(lhs_dilated_shape, window_dimensions) - 1
- out_dilated_shape - pad_before)
return zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = onp.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def maybe_tracer_tuple_to_abstract_tuple(tup):
if isinstance(tup, pe.JaxprTracerTuple):
return core.AbstractTuple(list(map(maybe_tracer_tuple_to_abstract_tuple, tup)))
elif isinstance(tup, core.AbstractValue):
return tup
elif tup is None:
return core.AbstractTuple(()) # TODO(dougalm): check this
else:
raise TypeError(tup)
def subvals(lst, replace):
lst = list(lst)
for i, v in replace:
lst[i] = v
return tuple(lst)
def _abstractify(x):
# abstractify wrapper used internally for primitives like _while_loop
if isinstance(x, core.Tracer):
# TODO(mattjj,dougalm): check that it's at least ShapedArray
return pe.PartialVal((x.aval, core.unit))
else:
return pe.PartialVal((xla.abstractify(x), core.unit))
| 40.935522 | 92 | 0.71851 |
112b01f069ed1a5f515e8ec07a81dc18195e2583 | 4,082 | py | Python | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_postgresql_databases_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_postgresql_databases_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_postgresql_databases_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListPostgresqlDatabasesResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'databases': 'list[PostgresqlListDatabase]',
'total_count': 'int'
}
attribute_map = {
'databases': 'databases',
'total_count': 'total_count'
}
def __init__(self, databases=None, total_count=None):
"""ListPostgresqlDatabasesResponse - a model defined in huaweicloud sdk"""
super(ListPostgresqlDatabasesResponse, self).__init__()
self._databases = None
self._total_count = None
self.discriminator = None
if databases is not None:
self.databases = databases
if total_count is not None:
self.total_count = total_count
@property
def databases(self):
"""Gets the databases of this ListPostgresqlDatabasesResponse.
列表中每个元素表示一个数据库。
:return: The databases of this ListPostgresqlDatabasesResponse.
:rtype: list[PostgresqlListDatabase]
"""
return self._databases
@databases.setter
def databases(self, databases):
"""Sets the databases of this ListPostgresqlDatabasesResponse.
列表中每个元素表示一个数据库。
:param databases: The databases of this ListPostgresqlDatabasesResponse.
:type: list[PostgresqlListDatabase]
"""
self._databases = databases
@property
def total_count(self):
"""Gets the total_count of this ListPostgresqlDatabasesResponse.
数据库总数。
:return: The total_count of this ListPostgresqlDatabasesResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ListPostgresqlDatabasesResponse.
数据库总数。
:param total_count: The total_count of this ListPostgresqlDatabasesResponse.
:type: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPostgresqlDatabasesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.746479 | 84 | 0.593092 |
c5a0b2c9d857d12cf58dee276eb040622c15c800 | 2,042 | py | Python | envs/classic_control.py | bvanberl/cs885-project | 486f549b354c58a60d45204b71ebd6a3d6f17de9 | [
"MIT"
] | null | null | null | envs/classic_control.py | bvanberl/cs885-project | 486f549b354c58a60d45204b71ebd6a3d6f17de9 | [
"MIT"
] | null | null | null | envs/classic_control.py | bvanberl/cs885-project | 486f549b354c58a60d45204b71ebd6a3d6f17de9 | [
"MIT"
] | null | null | null | import gym
import cv2
from gym import wrappers
import numpy as np
class ClassicControlEnv(gym.Env):
def __init__(self, env_name, seed=0, max_steps=1000, n_frames=1, action_repeat=1, image_size=(84,84)):
self._env = gym.make(env_name)
self._env.seed(seed)
self.max_steps = max_steps
self.action_repeat = action_repeat
self.image_size = image_size
self.observation = np.zeros((n_frames, image_size[0], image_size[1]))
if self._env.spec:
self.spec = self._env.spec
else:
self.spec = None
def reset(self):
self.t = 0
state = self._env.reset()
image = self._env.render(mode='rgb_array')
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, tuple(self.image_size), interpolation=cv2.INTER_AREA)
self.observation[-1] = image
return self.observation
def step(self, action):
reward = 0
for i in range(self.action_repeat):
state, reward_i, done, info = self._env.step(action)
image = self._env.render(mode='rgb_array')
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, tuple(self.image_size), interpolation=cv2.INTER_AREA)
reward += reward_i
self.t += 1
terminal = done or self.t == self.max_steps
if done:
break
self.observation = np.roll(self.observation, -1, axis=0)
self.observation[-1] = image
return self.observation, reward, terminal, {'concepts': state}
def close(self):
return self._env.close()
def render(self, mode):
return self._env.render(mode)
@property
def observation_space(self):
image_box = gym.spaces.Box(0, 255, (self.image_size[0], self.image_size[1]), dtype=np.uint8)
return image_box
@property
def action_space(self):
return self._env.action_space | 35.206897 | 107 | 0.600881 |
79b29106836cf3e6a4c1b6805272187b74383f71 | 5,253 | py | Python | SignalExtractor/ModifiedSignal.py | Janga-Lab/Penguin-1 | f6162be3549c470416da0fab590ae7d04c74bfa5 | [
"MIT"
] | 1 | 2021-04-25T19:37:05.000Z | 2021-04-25T19:37:05.000Z | SignalExtractor/ModifiedSignal.py | Janga-Lab/Penguin-1 | f6162be3549c470416da0fab590ae7d04c74bfa5 | [
"MIT"
] | 9 | 2021-06-19T08:46:23.000Z | 2021-08-28T19:01:20.000Z | SignalExtractor/ModifiedSignal.py | Janga-Lab/Penguin-1 | f6162be3549c470416da0fab590ae7d04c74bfa5 | [
"MIT"
] | 1 | 2021-03-04T18:19:45.000Z | 2021-03-04T18:19:45.000Z | #!/usr/bin/env python
import getopt
import argparse
import sys, getopt
import h5py, numpy as np, os, sys
import itertools
# import matplotlib.pyplot as plt
import SignalExtractor.eventHelper as eH
from SignalExtractor.eventHelper import f_events
# ########################################### file control ########################################
def extract_signal(IdFile, modCoordFile, fast5path=None):
#coord file
inp = modCoordFile
#id file
inp2 = IdFile
id_dict=dict()
'''
for fileob in os.listdir(inp2):
fname = inp2 + fileob
'''
#ids
with open(inp2, 'r') as f:
for i in f:
i1=i.split( )
#path/file/id/run
print(i1[2])
id_dict[i1[2]] = [i1[0]]
id_dict[i1[2]].append(i1[1])
id_dict[i1[2]].append(i1[3])
cnt=0
with open("./Data/post_pseudo_signals.txt",'w+') as f:
with open("./testing/fastObject.txt", 'w+') as ob:
#coords
mod_file=open(inp,'r')
for mod_row in mod_file:
mod_coord_col=mod_row.split( )
if mod_coord_col[3] in id_dict.keys():
print("match in id_dict")
#path to fast5 file
fast5File=id_dict[mod_coord_col[3]][0]+'/'+id_dict[mod_coord_col[3]][1]
ob.write(fast5File)
hdf = h5py.File(fast5File,'r')
#### Extract signal of modified fast5 file
try:
raw_data=list(hdf['/Raw/Reads/'].values())[0]
raw_signal=raw_data['Signal'].value
print("sig ", raw_signal)
### Extract events
events_data=hdf.get('/Analyses/Basecall_1D_001/BaseCalled_template/Events/')
events=events_data.value
### Extract start time
start_time=hdf.get('Raw/Reads/')
sas=''.join(list(start_time.keys()))
start_t=hdf.get('Raw/Reads/'+sas+'/')
start_t=start_t.attrs['start_time']
### Extract duration
Du_time=hdf.get('Raw/Reads/'+sas+'/')
Du_time=Du_time.attrs['duration']
### Extract Fastq
Fastq_data=hdf.get('/Analyses/Basecall_1D_001/BaseCalled_template/Fastq/')
summary_data=hdf.get('/Analyses/Basecall_1D_000/Summary/basecall_1d_template/')
### Extract frequency
c_freq = hdf.get('/UniqueGlobalKey/context_tags/')
c_freq = (c_freq.attrs['sample_frequency']).decode('utf-8')
raw_fastq=(Fastq_data.value).decode('utf-8')
fastq_decoded=raw_fastq.split('\n')
ob.write("raw_data \n")
ob.write(raw_data)
ob.write(raw_signal)
ob.write(events_data)
ob.write(events)
ob.write(start_t)
ob.write(Du_time)
ob.write(Fastq_data)
ob.write(c_freq)
ob.write(raw_fastq)
except AttributeError:
continue
# #################### Shifted to helper function module ################################
final_eves=eH.event_scrapper(events)
seq_no=3
for e, i in enumerate(final_eves):
seq_no=seq_no+int(i[1])
len_seq=len(''.join(fastq_decoded[1]))
a_seq_no=(len_seq-int(mod_row[2]))+1
##### Tail pass
if a_seq_no > 2 and a_seq_no < len(final_eves)-2:
if seq_no == a_seq_no:
f_e=f_events(final_eves,e)
if len(f_e) != 0:
start=[]
end=[]
for s in f_e:
print(s)
for t in range(5):
start.append(s[t][2])
end.append(s[t][4])
print(s[t][2])
print(start[2])
print(end[2])
min_st=min(start)
max_stl=(max(end)-min(start))+1
sig='_'.join(map(str, raw_signal[min_st:][:max_stl]))
print(id_dict[mod_row[3]][1]+' '+i[0]+' '+mod_row[1]+'_'+mod_row[2]+' '+sig)
print("write ")
f.write(id_dict[mod_row[3]][1]+' '+i[0]+' '+mod_row[1]+'_'+mod_row[2]+' '+sig+'\n')
| 40.72093 | 119 | 0.4091 |
63746e7d3358ac3ff6b0e748c82d68d476e540c9 | 2,495 | py | Python | vkwave/api/methods/stats.py | krasnovmv/vkwave | e0db86cc16f97797765aadfb811ec87ff7945b1f | [
"MIT"
] | null | null | null | vkwave/api/methods/stats.py | krasnovmv/vkwave | e0db86cc16f97797765aadfb811ec87ff7945b1f | [
"MIT"
] | null | null | null | vkwave/api/methods/stats.py | krasnovmv/vkwave | e0db86cc16f97797765aadfb811ec87ff7945b1f | [
"MIT"
] | null | null | null | from vkwave.types.responses import *
from ._category import Category
from ._utils import get_params
class Stats(Category):
async def get(
self,
return_raw_response: bool = False,
group_id: typing.Optional[int] = None,
app_id: typing.Optional[int] = None,
timestamp_from: typing.Optional[int] = None,
timestamp_to: typing.Optional[int] = None,
interval: typing.Optional[str] = None,
intervals_count: typing.Optional[int] = None,
filters: typing.Optional[typing.List[str]] = None,
stats_groups: typing.Optional[typing.List[str]] = None,
extended: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, StatsGetResponse]:
"""
:param group_id: - Community ID.
:param app_id: - Application ID.
:param timestamp_from:
:param timestamp_to:
:param interval:
:param intervals_count:
:param filters:
:param stats_groups:
:param extended:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("get", params)
if return_raw_response:
return raw_result
result = StatsGetResponse(**raw_result)
return result
async def get_post_reach(
self,
owner_id: str,
post_ids: typing.List[int],
return_raw_response: bool = False,
) -> typing.Union[dict, StatsGetPostReachResponse]:
"""
:param owner_id: - post owner community id. Specify with "-" sign.
:param post_ids: - wall posts id
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getPostReach", params)
if return_raw_response:
return raw_result
result = StatsGetPostReachResponse(**raw_result)
return result
async def track_visitor(
self, id: str, return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("trackVisitor", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
| 30.426829 | 74 | 0.612425 |
be1d8783a1a82eadd2be0fefd039c5f8e9bbd33f | 1,868 | py | Python | flightservices/migrations/0001_initial.py | ashutoshacharya24/FlightReservation | b4cb991ba07fb3537b54b56e697ccc22b0f98c45 | [
"MIT"
] | null | null | null | flightservices/migrations/0001_initial.py | ashutoshacharya24/FlightReservation | b4cb991ba07fb3537b54b56e697ccc22b0f98c45 | [
"MIT"
] | null | null | null | flightservices/migrations/0001_initial.py | ashutoshacharya24/FlightReservation | b4cb991ba07fb3537b54b56e697ccc22b0f98c45 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-10-31 11:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Flight',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('flightNumber', models.CharField(max_length=20)),
('operatingAirlines', models.CharField(max_length=20)),
('departureCity', models.CharField(max_length=20)),
('arrivalCity', models.CharField(max_length=20)),
('dateOfDeparture', models.DateField()),
('estimatedTimeOfDeparture', models.TimeField()),
],
),
migrations.CreateModel(
name='Passenger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=20)),
('lastName', models.CharField(max_length=20)),
('middletName', models.CharField(max_length=20)),
('email', models.CharField(max_length=20)),
('phone', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('flight', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='flightservices.flight')),
('passenger', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='flightservices.passenger')),
],
),
]
| 39.744681 | 128 | 0.581906 |
4f55c3956f3ddbb5daa3ff03c386ddfacbc522c5 | 1,701 | py | Python | app/core/migrations/0001_initial.py | SachinI001/recipe-app-api | d18331b4150f7f2958e75a9e6d97e079f6bdc2cc | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | SachinI001/recipe-app-api | d18331b4150f7f2958e75a9e6d97e079f6bdc2cc | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | SachinI001/recipe-app-api | d18331b4150f7f2958e75a9e6d97e079f6bdc2cc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2021-12-30 14:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
54e3b717b771ad42a25b162c422d29ae5bb70bc4 | 3,410 | py | Python | pyquest/benchmarks.py | hillmich/quantum-benchmarks-1 | e7ab97e004f638d8681b5ee9cbbe662d64bc3378 | [
"MIT"
] | 79 | 2019-07-03T01:54:30.000Z | 2021-04-19T12:28:08.000Z | pyquest/benchmarks.py | hillmich/quantum-benchmarks-1 | e7ab97e004f638d8681b5ee9cbbe662d64bc3378 | [
"MIT"
] | 28 | 2019-07-16T21:03:49.000Z | 2021-02-14T14:59:45.000Z | pyquest/benchmarks.py | hillmich/quantum-benchmarks-1 | e7ab97e004f638d8681b5ee9cbbe662d64bc3378 | [
"MIT"
] | 21 | 2019-07-04T05:21:53.000Z | 2021-02-22T18:59:47.000Z | import numpy as np
from pyquest_cffi import quest
import pytest
env = quest.createQuESTEnv()
nqubits_list = range(4,26)
def entangler(circuit, qubits, pairs):
for a, b in pairs:
circuit.cx(qubits[a], qubits[b])
return circuit
def first_rotation(qubits, nqubits):
for k in range(nqubits):
quest.rotateX(qubits, k, np.random.rand())
quest.rotateZ(qubits, k, np.random.rand())
def mid_rotation(qubits, nqubits):
for k in range(nqubits):
quest.rotateZ(qubits, k, np.random.rand())
quest.rotateX(qubits, k, np.random.rand())
quest.rotateZ(qubits, k, np.random.rand())
def last_rotation(qubits, nqubits):
for k in range(nqubits):
quest.rotateZ(qubits, k, np.random.rand())
quest.rotateX(qubits, k, np.random.rand())
def entangler(qubits, nqubits, pairs):
for a, b in pairs:
quest.controlledNot(qubits, a, b)
def run_qft(qubits, nqubits):
for wire in reversed(range(nqubits)):
quest.hadamard(qubits, wire)
for i in range(wire):
quest.controlledPhaseShift(qubits, i, wire, np.pi/(2**(wire-i)))
for i in range(nqubits//2):
quest.swapGate(qubits, i, nqubits - i - 1)
return qubits
def run_qcbm(qubits, nqubits, depth, pairs):
first_rotation(qubits, nqubits)
entangler(qubits, nqubits, pairs)
for k in range(depth):
mid_rotation(qubits, nqubits)
entangler(qubits, nqubits, pairs)
last_rotation(qubits, nqubits)
return qubits
def run_bench(benchmark, gate, nqubits, args):
qubits = quest.createQureg(nqubits, env)
benchmark(gate, qubits, *args)
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_X(benchmark, nqubits):
benchmark.group = "X"
run_bench(benchmark, quest.pauliX, nqubits, (3, ))
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_H(benchmark, nqubits):
benchmark.group = "H"
run_bench(benchmark, quest.hadamard, nqubits, (3, ))
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_T(benchmark, nqubits):
benchmark.group = "T"
run_bench(benchmark, quest.tGate, nqubits, (3, ))
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_Rx(benchmark, nqubits):
benchmark.group = "Rx"
run_bench(benchmark, quest.rotateX, nqubits, (3, 0.5))
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_Rz(benchmark, nqubits):
benchmark.group = "Rz"
run_bench(benchmark, quest.rotateZ, nqubits, (3, 0.5))
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_CNOT(benchmark, nqubits):
benchmark.group = "CNOT"
run_bench(benchmark, quest.controlledNot, nqubits, (2, 3))
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_Toffoli(benchmark, nqubits):
benchmark.group = "Toffoli"
X = ((0.0, 0.0), (1.0, 0.0), (1.0, 0.0), (0.0, 0.0))
qubits = quest.createQureg(nqubits, env)
benchmark(quest.multiControlledUnitary, qubits, [0, 1], 2, 2, X)
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_QFT(benchmark, nqubits):
benchmark.group = "QFT"
qubits = quest.createQureg(nqubits, env)
benchmark(run_qft, qubits, nqubits)
@pytest.mark.parametrize('nqubits', nqubits_list)
def test_QCBM(benchmark, nqubits):
benchmark.group = "QCBM"
qubits = quest.createQureg(nqubits, env)
pairs = [(i, (i + 1) % nqubits) for i in range(nqubits)]
benchmark(run_qcbm, qubits, nqubits, 9, pairs)
| 31.869159 | 76 | 0.682698 |
4d2e66ab5b9ff8e9d2e2f8ccdf8913a21853d61b | 7,910 | py | Python | exchange_server/tests/common.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | 4 | 2021-06-21T19:21:49.000Z | 2021-06-23T21:21:55.000Z | exchange_server/tests/common.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | null | null | null | exchange_server/tests/common.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | 1 | 2021-06-21T19:21:51.000Z | 2021-06-21T19:21:51.000Z | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
MINIMAL_INSTANCE = {
'host': '.',
}
CHECK_NAME = 'exchange_server'
DATABASE_INSTANCES = [
'Information Store/_Total',
'Information Store - Mailbox Database 1266275882/_Total',
'edgetransport/_Total',
'edgetransport/Transport Mail Database',
'edgetransport/IP Filtering Database',
]
EXCHANGE_PROCESSES = [
'MSExchangeHMHost MSExchangeHM 2212',
'Microsoft.Exchange.Directory.TopologyService',
'umservice',
'MSExchangeFrontendTransport',
'MSExchangeTransportLogSearch LogSearchSvc 4932',
'Microsoft.Exchange.Store.Service',
'ForefrontActiveDirectoryConnector',
'MSExchangeMailboxAssistants',
'MSExchangeMailboxReplication MSExchMbxRepl 3832',
'MSExchangeDelivery',
'msexchangerepl',
'Microsoft.Exchange.RpcClientAccess.Service',
'Microsoft.Exchange.ServiceHost EMS 4360',
'MSExchangeSubmission',
'MSExchangeThrottling',
'MSExchangeHMWorker ExHMWorker 4668',
'Microsoft.Exchange.UM.CallRouter',
'noderunner noderunner 3876',
'noderunner noderunner 3376',
'noderunner noderunner 3736',
'noderunner noderunner 3956',
'MSExchangeTransport',
'EdgeTransport Transport 5732',
'w3wp EWS 1656',
'w3wp',
'w3wp ECP 7404',
'w3wp AirSync 7704',
'w3wp OWA 7648',
'w3wp',
'w3wp',
'w3wp RemotePS 8932',
'w3wp',
'Microsoft.Exchange.EdgeSyncSvc',
'Microsoft.Exchange.Store.Worker',
'w3wp UNKNOWN 9332',
'powershell EMS 9000',
'umservice',
'UMWorkerProcess UM 4304',
'Microsoft.Exchange.Search.Service',
'MSExchangeHMHost MSExchangeHM _Total',
'MSExchangeTransportLogSearch LogSearchSvc _Total',
'MSExchangeMailboxReplication MSExchMbxRepl _Total',
'Microsoft.Exchange.ServiceHost EMS _Total',
'MSExchangeHMWorker ExHMWorker _Total',
'noderunner noderunner _Total',
'EdgeTransport Transport _Total',
'w3wp EWS _Total',
'w3wp ECP _Total',
'w3wp AirSync _Total',
'w3wp OWA _Total',
'w3wp RemotePS _Total',
'w3wp UNKNOWN _Total',
'powershell EMS _Total',
'UMWorkerProcess UM _Total',
]
PROXY_INSTANCES = [
'remoteps',
'ews',
'ecp',
'oab',
'autodiscover',
'eas',
'owa',
'unknown',
'win-k2olfvr52p5',
'rpchttp'
]
WEB_SITE_INSTANCES = [
'_Total',
'Default Web Site',
'Exchange Back End',
]
WORKLOAD_INSTANCES = [
'msexchangemailboxreplication_mailboxreplicationservicehighpriority',
'msexchangemailboxreplication_mailboxreplicationservice',
'msexchangemailboxassistants_sitemailboxassistant_site mailbox assistant',
'msexchangemailboxassistants_peoplerelevanceassistant',
'msexchangemailboxassistants_oabgeneratorassistant',
'msexchangemailboxassistants_publicfolderassistant',
'msexchangemailboxassistants_directoryprocessorassistant',
'msexchangemailboxassistants_storemaintenanceassistant_storedsmaintenanceassistant',
'msexchangemailboxassistants_storemaintenanceassistant',
'msexchangemailboxassistants_umreportingassistant',
'msexchangemailboxassistants_calendarsyncassistant',
'msexchangemailboxassistants_topnassistant_topnwordsassistant',
'msexchangemailboxassistants_sharingpolicyassistant',
'msexchangemailboxassistants_calendarrepairassistant',
'msexchangemailboxassistants_junkemailoptionscommitterassistant',
'msexchangemailboxassistants_elcassistant',
]
CLIENT_TYPE_INSTANCES = [
'ediscoverysearch',
'publicfoldersystem',
'simplemigration',
'loadgen',
'storeactivemonitoring',
'teammailbox',
'sms',
'inference',
'maintenance',
'ha',
'transportsync',
'migration',
'momt',
'timebasedassistants',
'approvalapi',
'webservices',
'unifiedmessaging',
'monitoring',
'management',
'elc',
'availabilityservice',
'contentindexing',
'rpchttp',
'popimap',
'owa',
'eventbasedassistants',
'airsync',
'transport',
'user',
'administrator',
'system',
'_total',
]
METRIC_INSTANCES = {
'exchange.adaccess_domain_controllers.ldap_read': ['win-k2olfvr52p5.croissant.datad0g.com'],
'exchange.adaccess_domain_controllers.ldap_search': ['win-k2olfvr52p5.croissant.datad0g.com'],
'exchange.adaccess_processes.ldap_read': EXCHANGE_PROCESSES,
'exchange.adaccess_processes.ldap_search': EXCHANGE_PROCESSES,
'exchange.processor.cpu_time': None,
'exchange.processor.cpu_user': None,
'exchange.processor.cpu_privileged': None,
'exchange.processor.queue_length': None,
'exchange.memory.available': None,
'exchange.memory.committed': None,
'exchange.network.outbound_errors': ['AWS PV Network Device', 'isatap.{C7BAFAFE-DBF4-4C76-B406-8A25283E4CF9}'],
'exchange.network.tcpv6.connection_failures': None,
'exchange.network.tcpv4.conns_reset': None,
'exchange.network.tcpv4.conns_reset': None,
'exchange.netlogon.semaphore_waiters': ['_Total'],
'exchange.netlogon.semaphore_holders': ['_Total'],
'exchange.netlogon.semaphore_acquires': ['_Total'],
'exchange.netlogon.semaphore_timeouts': ['_Total'],
'exchange.netlogon.semaphore_hold_time': ['_Total'],
# Database counters
'exchange.database.io_reads_avg_latency': DATABASE_INSTANCES,
'exchange.database.io_writes_avg_latency': DATABASE_INSTANCES,
'exchange.database.io_log_writes_avg_latency': DATABASE_INSTANCES,
'exchange.database.io_db_reads_recovery_avg_latency': DATABASE_INSTANCES,
'exchange.database.io_db_writes_recovery_avg_latency': DATABASE_INSTANCES,
'exchange.database.io_db_reads_attached_persec': DATABASE_INSTANCES,
'exchange.database.io_db_writes_attached_persec': DATABASE_INSTANCES,
'exchange.database.io_log_writes_persec': DATABASE_INSTANCES,
'exchange.activemanager.database_mounted': None,
# RPC Client Access Counters
'exchange.rpc.averaged_latency': None,
'exchange.rpc.requests': None,
'exchange.rpc.active_user_count': None,
'exchange.rpc.conn_count': None,
'exchange.rpc.ops_persec': None,
'exchange.rpc.user_count': None,
# HTTP Proxy Counters
'exchange.httpproxy.server_locator_latency': PROXY_INSTANCES,
'exchange.httpproxy.avg_auth_latency': PROXY_INSTANCES,
'exchange.httpproxy.clientaccess_processing_latency': PROXY_INSTANCES,
'exchange.httpproxy.mailbox_proxy_failure_rate': PROXY_INSTANCES,
'exchange.httpproxy.outstanding_requests': PROXY_INSTANCES,
'exchange.httpproxy.proxy_requests_persec': PROXY_INSTANCES,
'exchange.httpproxy.requests_persec': PROXY_INSTANCES,
# Information Store Counters
'exchange.is.store.rpc_requests': ['mailbox database 1266275882', '_total'],
'exchange.is.clienttype.rpc_latency': CLIENT_TYPE_INSTANCES,
'exchange.is.store.rpc_latency': ['mailbox database 1266275882', '_total'],
'exchange.is.store.rpc_ops_persec': ['mailbox database 1266275882', '_total'],
'exchange.is.clienttype.rpc_ops_persec': CLIENT_TYPE_INSTANCES,
# Client Access Server Counters
'exchange.activesync.requests_persec': None,
'exchange.activesync.ping_pending': None,
'exchange.activesync.sync_persec': None,
'exchange.owa.unique_users': None,
'exchange.owa.requests_persec': None,
'exchange.autodiscover.requests_persec': None,
'exchange.ws.requests_persec': None,
'exchange.ws.current_connections_total': None,
'exchange.ws.current_connections_default_website': WEB_SITE_INSTANCES,
'exchange.ws.connection_attempts': None,
'exchange.ws.other_attempts': None,
# Workload Management Counters
'exchange.workload_management.active_tasks': WORKLOAD_INSTANCES,
'exchange.workload_management.completed_tasks': WORKLOAD_INSTANCES,
'exchange.workload_management.queued_tasks': WORKLOAD_INSTANCES,
}
| 34.541485 | 115 | 0.739317 |
d049e85105f525dc6b01e2a9f5148b01fd26c3fe | 7,943 | py | Python | kopf/reactor/causation.py | MarkusH/kopf | caaf399c46af0fcb10730eec2db303484d31b38b | [
"MIT"
] | 1,038 | 2019-03-26T16:32:56.000Z | 2022-03-27T09:15:03.000Z | kopf/reactor/causation.py | MarkusH/kopf | caaf399c46af0fcb10730eec2db303484d31b38b | [
"MIT"
] | 393 | 2019-03-26T13:43:42.000Z | 2020-09-14T13:18:14.000Z | kopf/reactor/causation.py | MarkusH/kopf | caaf399c46af0fcb10730eec2db303484d31b38b | [
"MIT"
] | 116 | 2019-03-31T23:01:09.000Z | 2022-03-18T16:44:58.000Z | """
Detection of the event causes, based on the resource state.
The low-level watch-events are highly limited in information on what
caused them, and they only notify that the object was changed somehow:
* ``ADDED`` for the newly created objects (or for the first-time listing).
* ``MODIFIED`` for the changes of any field, be that metadata, spec, or status.
* ``DELETED`` for the actual deletion of the object post-factum.
The conversion of low-level *events* to high level *causes* is done by
checking the object's state and comparing it to the saved last-seen state.
This allows to track which specific fields were changed, and if are those
changes are important enough to call the handlers: e.g. the ``status`` changes
are ignored, so as some selected system fields of the ``metadata``.
For deletion, the cause is detected when the object is just marked for deletion,
not when it is actually deleted (as the events notify): so that the handlers
could execute on the yet-existing object (and its children, if created).
"""
import dataclasses
import logging
import warnings
from typing import Any, Optional, Union, TypeVar
from kopf.storage import finalizers
from kopf.structs import bodies
from kopf.structs import configuration
from kopf.structs import containers
from kopf.structs import diffs
from kopf.structs import handlers
from kopf.structs import patches
from kopf.structs import primitives
from kopf.structs import resources
@dataclasses.dataclass
class BaseCause:
logger: Union[logging.Logger, logging.LoggerAdapter]
@dataclasses.dataclass
class ActivityCause(BaseCause):
activity: handlers.Activity
settings: configuration.OperatorSettings
@dataclasses.dataclass
class ResourceCause(BaseCause):
resource: resources.Resource
patch: patches.Patch
body: bodies.Body
memo: containers.Memo
@dataclasses.dataclass
class ResourceWatchingCause(ResourceCause):
"""
The raw event received from the API.
It is a read-only mapping with some extra properties and methods.
"""
type: bodies.RawEventType
raw: bodies.RawEvent
@dataclasses.dataclass
class ResourceSpawningCause(ResourceCause):
"""
An internal daemon is spawning: tasks, threads, timers.
Used only on the first appearance of a resource as a container for resource-
specific objects (loggers, etc).
"""
reset: bool
@dataclasses.dataclass
class ResourceChangingCause(ResourceCause):
"""
The cause is what has caused the whole reaction as a chain of handlers.
Unlike the low-level Kubernetes watch-events, the cause is aware
of actual field changes, including multi-handler changes.
"""
initial: bool
reason: handlers.Reason
diff: diffs.Diff = diffs.EMPTY
old: Optional[bodies.BodyEssence] = None
new: Optional[bodies.BodyEssence] = None
@property
def event(self) -> handlers.Reason:
warnings.warn("cause.event is deprecated; use cause.reason.", DeprecationWarning)
return self.reason
@property
def deleted(self) -> bool:
""" Used to conditionally skip/select the @on.resume handlers if the object is deleted. """
return finalizers.is_deletion_ongoing(self.body)
@dataclasses.dataclass
class DaemonCause(ResourceCause):
"""
An exceptional case of a container for daemon invocation kwargs.
Regular causes are usually short-term, triggered by a watch-stream event,
and disappear once the event is processed. The processing includes
daemon spawning: the original cause and its temporary watch-event
should not be remembered though the whole life cycle of a daemon.
Instead, a new artificial daemon-cause is used (this class), which
passes the kwarg values to the invocation routines. It only contains
the long-living kwargs: loggers, per-daemon stoppers, body-views
(with only the latest bodies as contained values), etc.
Unlike other causes, it is created not in the processing routines once
per event, but in the daemon spawning routines once per daemon (or a timer).
Therefore, it is not "detected", but is created directly as an instance.
"""
stopper: primitives.DaemonStopper # a signaller for the termination and its reason.
def detect_resource_watching_cause(
raw_event: bodies.RawEvent,
body: bodies.Body,
**kwargs: Any,
) -> ResourceWatchingCause:
return ResourceWatchingCause(
raw=raw_event,
type=raw_event['type'],
body=body,
**kwargs)
def detect_resource_spawning_cause(
body: bodies.Body,
**kwargs: Any,
) -> ResourceSpawningCause:
return ResourceSpawningCause(
body=body,
**kwargs)
def detect_resource_changing_cause(
*,
finalizer: str,
raw_event: bodies.RawEvent,
body: bodies.Body,
old: Optional[bodies.BodyEssence] = None,
new: Optional[bodies.BodyEssence] = None,
diff: Optional[diffs.Diff] = None,
initial: bool = False,
**kwargs: Any,
) -> ResourceChangingCause:
"""
Detect the cause of the event to be handled.
This is a purely computational function with no side-effects.
The causes are then consumed by `custom_object_handler`,
which performs the actual handler invocation, logging, patching,
and other side-effects.
"""
# Put them back to the pass-through kwargs (to avoid code duplication).
kwargs.update(body=body, old=old, new=new, initial=initial)
if diff is not None:
kwargs.update(diff=diff)
# The object was really deleted from the cluster. But we do not care anymore.
if raw_event['type'] == 'DELETED':
return ResourceChangingCause(reason=handlers.Reason.GONE, **kwargs)
# The finalizer has been just removed. We are fully done.
deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
deletion_is_blocked = finalizers.is_deletion_blocked(body=body, finalizer=finalizer)
if deletion_is_ongoing and not deletion_is_blocked:
return ResourceChangingCause(reason=handlers.Reason.FREE, **kwargs)
if deletion_is_ongoing:
return ResourceChangingCause(reason=handlers.Reason.DELETE, **kwargs)
# For an object seen for the first time (i.e. just-created), call the creation handlers,
# then mark the state as if it was seen when the creation has finished.
# Creation never mixes with resuming, even if an object is detected on startup (first listing).
if old is None: # i.e. we have no essence stored
kwargs['initial'] = False
return ResourceChangingCause(reason=handlers.Reason.CREATE, **kwargs)
# Cases with no essence changes are usually ignored (NOOP). But for the not-yet-resumed objects,
# we simulate a fake cause to invoke the resuming handlers. For cases with the essence changes,
# the resuming handlers will be mixed-in to the regular cause handling ("cuckoo-style")
# due to the ``initial=True`` flag on the cause, regardless of the reason.
if not diff and initial:
return ResourceChangingCause(reason=handlers.Reason.RESUME, **kwargs)
# The previous step triggers one more patch operation without actual changes. Ignore it.
# Either the last-seen state or the status field has changed.
if not diff:
return ResourceChangingCause(reason=handlers.Reason.NOOP, **kwargs)
# And what is left, is the update operation on one of the useful fields of the existing object.
return ResourceChangingCause(reason=handlers.Reason.UPDATE, **kwargs)
_CT = TypeVar('_CT', bound=BaseCause)
def enrich_cause(
cause: _CT,
**kwargs: Any,
) -> _CT:
"""
Produce a new derived cause with some fields modified ().
Usually, those are the old/new/diff fields, and used when a field-handler
is invoked (the old/new/diff refer to the field's values only).
"""
return dataclasses.replace(cause, **kwargs)
| 35.779279 | 100 | 0.724286 |
7c254b41be74cb251fbf9f724a7a0b874ee40321 | 1,352 | py | Python | sandbox/src1/TCSE3-3rd-examples/src/py/regex/swap3.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 5 | 2016-05-28T14:12:28.000Z | 2021-04-22T10:23:12.000Z | sandbox/src1/TCSE3-3rd-examples/src/py/regex/swap3.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | null | null | null | sandbox/src1/TCSE3-3rd-examples/src/py/regex/swap3.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 2 | 2015-07-13T10:04:10.000Z | 2021-04-22T10:23:23.000Z | #!/usr/bin/env python
import re, glob, string
# as swap1.py except that we here use a regex with comments and compile it:
arg = r'[^,]+'
call = re.compile(r"""
(?P<start> # preserve start of function string
superLibFunc # name of function to match
\s* # optional whitespace
\( # parenthesis before argument list
\s* # optional whitespace
) # end of <start>
(?P<arg1>.+?) # a C variable name, non-greedy
(?P<middle> # preserve middle of function string
\s*,\s* # comma with optional surrounding whitespace
) # end of <middle>
(?P<arg2>.+?) # a C variable name, non-greedy
(?P<end> # preserve end of function string
\s* # optional whitespace
\) # closing parenthesis
) # End of <end>
""" , re.VERBOSE | re.DOTALL)
cfiles = ['.test1.c']
for cfile in cfiles:
print 'Treating',cfile
file = open(cfile, 'r')
filestr = file.read() # load all lines into a string
file.close()
filestr = call.sub(r'superLibFunc(\g<arg2>\g<middle>\g<arg1>\g<end>)',
filestr)
file = open(cfile + '.tmp', 'w')
file.write(filestr) # print everything into cfile.tmp
| 40.969697 | 75 | 0.527367 |
44bc6a45f1b18c3ca564931bc13a5b1c60d0729c | 606 | py | Python | leetcode/404.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | 1 | 2019-08-28T23:15:25.000Z | 2019-08-28T23:15:25.000Z | leetcode/404.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | null | null | null | leetcode/404.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | null | null | null | """
link: https://leetcode.com/problems/sum-of-left-leaves
problem: 求树的所有左叶子的和。
solution: 递归。
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumOfLeftLeaves(self, root: TreeNode) -> int:
if not root:
return 0
t = 0
if root.left and not root.left.left and not root.left.right:
t = root.left.val
return t + self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right)
| 24.24 | 85 | 0.613861 |
b8098e4639a3270505b13b79a38370689e5f1e20 | 1,582 | py | Python | tools/util_test.py | riginding/deno | 08303b78bf487fbcd10c1450288b2cc51ab119b7 | [
"MIT"
] | 1 | 2020-08-17T07:01:51.000Z | 2020-08-17T07:01:51.000Z | tools/util_test.py | riginding/deno | 08303b78bf487fbcd10c1450288b2cc51ab119b7 | [
"MIT"
] | null | null | null | tools/util_test.py | riginding/deno | 08303b78bf487fbcd10c1450288b2cc51ab119b7 | [
"MIT"
] | null | null | null | # Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import os
from test_util import DenoTestCase, run_tests
from util import (parse_exit_code, shell_quote_win, parse_wrk_output,
root_path)
class TestUtil(DenoTestCase):
def test_parse_exit_code(self):
assert parse_exit_code('hello_error54_world') == 54
assert parse_exit_code('hello_error_world') == 1
assert parse_exit_code('hello_world') == 0
def test_shell_quote_win(self):
assert shell_quote_win('simple') == 'simple'
assert shell_quote_win(
'roof/\\isoprojection') == 'roof/\\isoprojection'
assert shell_quote_win('with space') == '"with space"'
assert shell_quote_win('embedded"quote') == '"embedded""quote"'
assert shell_quote_win(
'a"b""c\\d\\"e\\\\') == '"a""b""""c\\d\\\\""e\\\\\\\\"'
def test_parse_wrk_output(self):
f = open(os.path.join(root_path, "tools/testdata/wrk1.txt"))
stats = parse_wrk_output(f.read())
assert stats['req_per_sec'] == 1837
assert stats['max_latency'] == 6.25
f2 = open(os.path.join(root_path, "tools/testdata/wrk2.txt"))
stats2 = parse_wrk_output(f2.read())
assert stats2['req_per_sec'] == 53435
assert stats2['max_latency'] == 6.22
f3 = open(os.path.join(root_path, "tools/testdata/wrk3.txt"))
stats3 = parse_wrk_output(f3.read())
assert stats3['req_per_sec'] == 96037
assert stats3['max_latency'] == 6.36
if __name__ == '__main__':
run_tests()
| 36.790698 | 73 | 0.634008 |
913999083229481dd648d31306ce85f216ee6eb9 | 12,363 | py | Python | backdoor/backdoor-svhn/curves.py | Bhaskers-Blu-Org1/model-sanitization | 1eff7e9f35e4fd194ffc83a55e4f6688ca9bb5c3 | [
"Apache-2.0"
] | 15 | 2020-05-04T15:28:36.000Z | 2021-12-31T02:34:12.000Z | backdoor/backdoor-svhn/curves.py | Bhaskers-Blu-Org1/model-sanitization | 1eff7e9f35e4fd194ffc83a55e4f6688ca9bb5c3 | [
"Apache-2.0"
] | 1 | 2020-05-14T04:56:22.000Z | 2020-05-27T14:10:12.000Z | error-injection/injection_cifar/curves.py | IBM/model-sanitization | 1eff7e9f35e4fd194ffc83a55e4f6688ca9bb5c3 | [
"Apache-2.0"
] | 4 | 2020-06-29T15:18:57.000Z | 2022-03-27T17:04:07.000Z | import numpy as np
import math
import torch
import torch.nn.functional as F
from torch.nn import Module, Parameter
from torch.nn.modules.utils import _pair
from scipy.special import binom
class Bezier(Module):
def __init__(self, num_bends):
super(Bezier, self).__init__()
self.register_buffer(
'binom',
torch.Tensor(binom(num_bends - 1, np.arange(num_bends), dtype=np.float32))
)
self.register_buffer('range', torch.arange(0, float(num_bends)))
self.register_buffer('rev_range', torch.arange(float(num_bends - 1), -1, -1))
def forward(self, t):
return self.binom * \
torch.pow(t, self.range) * \
torch.pow((1.0 - t), self.rev_range)
class PolyChain(Module):
def __init__(self, num_bends):
super(PolyChain, self).__init__()
self.num_bends = num_bends
self.register_buffer('range', torch.arange(0, float(num_bends)))
def forward(self, t):
t_n = t * (self.num_bends - 1)
return torch.max(self.range.new([0.0]), 1.0 - torch.abs(t_n - self.range))
class CurveModule(Module):
def __init__(self, fix_points, parameter_names=()):
super(CurveModule, self).__init__()
self.fix_points = fix_points
self.num_bends = len(self.fix_points)
self.parameter_names = parameter_names
self.l2 = 0.0
def compute_weights_t(self, coeffs_t):
w_t = [None] * len(self.parameter_names)
self.l2 = 0.0
for i, parameter_name in enumerate(self.parameter_names):
for j, coeff in enumerate(coeffs_t):
parameter = getattr(self, '%s_%d' % (parameter_name, j))
if parameter is not None:
if w_t[i] is None:
w_t[i] = parameter * coeff
else:
w_t[i] += parameter * coeff
if w_t[i] is not None:
self.l2 += torch.sum(w_t[i] ** 2)
return w_t
class Linear(CurveModule):
def __init__(self, in_features, out_features, fix_points, bias=True):
super(Linear, self).__init__(fix_points, ('weight', 'bias'))
self.in_features = in_features
self.out_features = out_features
self.l2 = 0.0
for i, fixed in enumerate(self.fix_points):
self.register_parameter(
'weight_%d' % i,
Parameter(torch.Tensor(out_features, in_features), requires_grad=not fixed)
)
for i, fixed in enumerate(self.fix_points):
if bias:
self.register_parameter(
'bias_%d' % i,
Parameter(torch.Tensor(out_features), requires_grad=not fixed)
)
else:
self.register_parameter('bias_%d' % i, None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.in_features)
for i in range(self.num_bends):
getattr(self, 'weight_%d' % i).data.uniform_(-stdv, stdv)
bias = getattr(self, 'bias_%d' % i)
if bias is not None:
bias.data.uniform_(-stdv, stdv)
def forward(self, input, coeffs_t):
weight_t, bias_t = self.compute_weights_t(coeffs_t)
return F.linear(input, weight_t, bias_t)
class Conv2d(CurveModule):
def __init__(self, in_channels, out_channels, kernel_size, fix_points, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2d, self).__init__(fix_points, ('weight', 'bias'))
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
for i, fixed in enumerate(self.fix_points):
self.register_parameter(
'weight_%d' % i,
Parameter(
torch.Tensor(out_channels, in_channels // groups, *kernel_size),
requires_grad=not fixed
)
)
for i, fixed in enumerate(self.fix_points):
if bias:
self.register_parameter(
'bias_%d' % i,
Parameter(torch.Tensor(out_channels), requires_grad=not fixed)
)
else:
self.register_parameter('bias_%d' % i, None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
for i in range(self.num_bends):
getattr(self, 'weight_%d' % i).data.uniform_(-stdv, stdv)
bias = getattr(self, 'bias_%d' % i)
if bias is not None:
bias.data.uniform_(-stdv, stdv)
def forward(self, input, coeffs_t):
weight_t, bias_t = self.compute_weights_t(coeffs_t)
return F.conv2d(input, weight_t, bias_t, self.stride,
self.padding, self.dilation, self.groups)
class _BatchNorm(CurveModule):
_version = 2
def __init__(self, num_features, fix_points, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(_BatchNorm, self).__init__(fix_points, ('weight', 'bias'))
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.l2 = 0.0
for i, fixed in enumerate(self.fix_points):
if self.affine:
self.register_parameter(
'weight_%d' % i,
Parameter(torch.Tensor(num_features), requires_grad=not fixed)
)
else:
self.register_parameter('weight_%d' % i, None)
for i, fixed in enumerate(self.fix_points):
if self.affine:
self.register_parameter(
'bias_%d' % i,
Parameter(torch.Tensor(num_features), requires_grad=not fixed)
)
else:
self.register_parameter('bias_%d' % i, None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
for i in range(self.num_bends):
getattr(self, 'weight_%d' % i).data.uniform_()
getattr(self, 'bias_%d' % i).data.zero_()
def _check_input_dim(self, input):
raise NotImplementedError
def forward(self, input, coeffs_t):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
weight_t, bias_t = self.compute_weights_t(coeffs_t)
return F.batch_norm(
input, self.running_mean, self.running_var, weight_t, bias_t,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def _load_from_state_dict(self, state_dict, prefix, metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = metadata.get('version', None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(_BatchNorm, self)._load_from_state_dict(
state_dict, prefix, metadata, strict,
missing_keys, unexpected_keys, error_msgs)
class BatchNorm2d(_BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class CurveNet(Module):
def __init__(self, num_classes, curve, architecture, num_bends, fix_start=True, fix_end=True,
architecture_kwargs={}):
super(CurveNet, self).__init__()
self.num_classes = num_classes
self.num_bends = num_bends
self.fix_points = [fix_start] + [False] * (self.num_bends - 2) + [fix_end]
self.curve = curve
self.architecture = architecture
self.l2 = 0.0
self.coeff_layer = self.curve(self.num_bends)
self.net = self.architecture(num_classes, fix_points=self.fix_points, **architecture_kwargs)
self.curve_modules = []
for module in self.net.modules():
if issubclass(module.__class__, CurveModule):
self.curve_modules.append(module)
def import_base_parameters(self, base_model, index):
aa = list(self.net.parameters())
parameters = list(self.net.parameters())[index::self.num_bends]
base_parameters = base_model.parameters()
for parameter, base_parameter in zip(parameters, base_parameters):
parameter.data.copy_(base_parameter.data)
def import_base_buffers(self, base_model):
for buffer, base_buffer in zip(self.net._all_buffers(), base_model._all_buffers()):
buffer.data.copy_(base_buffer.data)
def export_base_parameters(self, base_model, index):
parameters = list(self.net.parameters())[index::self.num_bends]
base_parameters = base_model.parameters()
for parameter, base_parameter in zip(parameters, base_parameters):
base_parameter.data.copy_(parameter.data)
def init_linear(self):
parameters = list(self.net.parameters())
for i in range(0, len(parameters), self.num_bends):
weights = parameters[i:i+self.num_bends]
for j in range(1, self.num_bends - 1):
alpha = j * 1.0 / (self.num_bends - 1)
weights[j].data.copy_(alpha * weights[-1].data + (1.0 - alpha) * weights[0].data)
def weights(self, t):
coeffs_t = self.coeff_layer(t)
weights = []
for module in self.curve_modules:
weights.extend([w for w in module.compute_weights_t(coeffs_t) if w is not None])
return np.concatenate([w.detach().cpu().numpy().ravel() for w in weights])
def _compute_l2(self):
self.l2 = sum(module.l2 for module in self.curve_modules)
def forward(self, input, t=None):
if t is None:
t = input.data.new(1).uniform_(0.0,1.0)
coeffs_t = self.coeff_layer(t)
output = self.net(input, coeffs_t)
self._compute_l2()
return output
def l2_regularizer(weight_decay):
return lambda model: 0.5 * weight_decay * model.l2
| 38.514019 | 100 | 0.603333 |
ddb0cf854187d8f3dd0e62860cdc375315d49dc9 | 202 | py | Python | app/compile_translations.py | YuliyaSinkevich/fastogt_site_new | 9e4c98a24d1d7594a302dc58bbb862a1e6b204b7 | [
"BSD-3-Clause"
] | null | null | null | app/compile_translations.py | YuliyaSinkevich/fastogt_site_new | 9e4c98a24d1d7594a302dc58bbb862a1e6b204b7 | [
"BSD-3-Clause"
] | 1 | 2019-01-03T16:35:01.000Z | 2019-01-03T16:35:01.000Z | app/compile_translations.py | YuliyaSinkevich/fastogt_site_new | 9e4c98a24d1d7594a302dc58bbb862a1e6b204b7 | [
"BSD-3-Clause"
] | 5 | 2018-12-04T17:56:37.000Z | 2020-03-06T18:04:55.000Z | #!/usr/bin/env python3
import constants as constants
import subprocess
# pybabel compile -d translations
if __name__ == '__main__':
subprocess.call(['pybabel', 'compile', '-d', 'translations'])
| 18.363636 | 65 | 0.712871 |
2b3f6555f4ebed61fb8fc4b91090508677700fb0 | 23,310 | py | Python | plugins/php/index.py | basoro/SLEMP | ad3865e441cb5a872d01348749e79bb515152055 | [
"Apache-2.0"
] | null | null | null | plugins/php/index.py | basoro/SLEMP | ad3865e441cb5a872d01348749e79bb515152055 | [
"Apache-2.0"
] | null | null | null | plugins/php/index.py | basoro/SLEMP | ad3865e441cb5a872d01348749e79bb515152055 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import sys
import io
import os
import time
import re
import json
import shutil
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append(os.getcwd() + "/class/core")
sys.path.append("/usr/local/lib/python2.7/site-packages")
import slemp
app_debug = False
if slemp.isAppleSystem():
app_debug = True
def getPluginName():
return 'php'
def getPluginDir():
return slemp.getPluginDir() + '/' + getPluginName()
def getServerDir():
return slemp.getServerDir() + '/' + getPluginName()
def getInitDFile(version):
if app_debug:
return '/tmp/' + getPluginName()
return '/etc/init.d/' + getPluginName() + version
def getArgs():
args = sys.argv[3:]
tmp = {}
args_len = len(args)
if args_len == 1:
t = args[0].strip('{').strip('}')
t = t.split(':')
tmp[t[0]] = t[1]
elif args_len > 1:
for i in range(len(args)):
t = args[i].split(':')
tmp[t[0]] = t[1]
return tmp
def checkArgs(data, ck=[]):
for i in range(len(ck)):
if not ck[i] in data:
return (False, slemp.returnJson(False, '参数:(' + ck[i] + ')没有!'))
return (True, slemp.returnJson(True, 'ok'))
def getConf(version):
path = getServerDir() + '/' + version + '/etc/php.ini'
return path
def status(version):
cmd = "ps -ef|grep 'php/" + version + \
"' |grep -v grep | grep -v python | awk '{print $2}'"
data = slemp.execShell(cmd)
if data[0] == '':
return 'stop'
return 'start'
def contentReplace(content, version):
service_path = slemp.getServerDir()
content = content.replace('{$ROOT_PATH}', slemp.getRootDir())
content = content.replace('{$SERVER_PATH}', service_path)
content = content.replace('{$PHP_VERSION}', version)
content = content.replace('{$LOCAL_IP}', slemp.getLocalIp())
if slemp.isAppleSystem():
# user = slemp.execShell(
# "who | sed -n '2, 1p' |awk '{print $1}'")[0].strip()
content = content.replace('{$PHP_USER}', 'nobody')
content = content.replace('{$PHP_GROUP}', 'nobody')
rep = 'listen.owner\s*=\s*(.+)\r?\n'
val = ';listen.owner = nobody\n'
content = re.sub(rep, val, content)
rep = 'listen.group\s*=\s*(.+)\r?\n'
val = ';listen.group = nobody\n'
content = re.sub(rep, val, content)
rep = 'user\s*=\s*(.+)\r?\n'
val = ';user = nobody\n'
content = re.sub(rep, val, content)
rep = r'[^\.]group\s*=\s*(.+)\r?\n'
val = ';group = nobody\n'
content = re.sub(rep, val, content)
else:
content = content.replace('{$PHP_USER}', 'www')
content = content.replace('{$PHP_GROUP}', 'www')
return content
def makeOpenrestyConf():
phpversions = ['00', '52', '53', '54', '55', '56',
'70', '71', '72', '73', '74', '80']
if slemp.isInstalledWeb():
sdir = slemp.getServerDir()
d_pathinfo = sdir + '/openresty/nginx/conf/pathinfo.conf'
if not os.path.exists(d_pathinfo):
s_pathinfo = getPluginDir() + '/conf/pathinfo.conf'
shutil.copyfile(s_pathinfo, d_pathinfo)
info = getPluginDir() + '/info.json'
content = slemp.readFile(info)
content = json.loads(content)
versions = content['versions']
tpl = getPluginDir() + '/conf/enable-php.conf'
tpl_content = slemp.readFile(tpl)
for x in phpversions:
dfile = sdir + '/openresty/nginx/conf/enable-php-' + x + '.conf'
if not os.path.exists(dfile):
if x == '00':
slemp.writeFile(dfile, '')
else:
w_content = contentReplace(tpl_content, x)
slemp.writeFile(dfile, w_content)
# php-fpm status
for version in phpversions:
dfile = sdir + '/openresty/nginx/conf/php_status/phpfpm_status_' + version + '.conf'
tpl = getPluginDir() + '/conf/phpfpm_status.conf'
if not os.path.exists(dfile):
content = slemp.readFile(tpl)
content = contentReplace(content, version)
slemp.writeFile(dfile, content)
slemp.restartWeb()
def phpPrependFile(version):
app_start = getServerDir() + '/app_start.php'
if not os.path.exists(app_start):
tpl = getPluginDir() + '/conf/app_start.php'
content = slemp.readFile(tpl)
content = contentReplace(content, version)
slemp.writeFile(app_start, content)
def phpFpmReplace(version):
desc_php_fpm = getServerDir() + '/' + version + '/etc/php-fpm.conf'
if not os.path.exists(desc_php_fpm):
tpl_php_fpm = getPluginDir() + '/conf/php-fpm.conf'
content = slemp.readFile(tpl_php_fpm)
content = contentReplace(content, version)
slemp.writeFile(desc_php_fpm, content)
else:
if version == '52':
tpl_php_fpm = tpl_php_fpm = getPluginDir() + '/conf/php-fpm-52.conf'
content = slemp.readFile(tpl_php_fpm)
slemp.writeFile(desc_php_fpm, content)
def phpFpmWwwReplace(version):
service_php_fpm_dir = getServerDir() + '/' + version + '/etc/php-fpm.d/'
if not os.path.exists(service_php_fpm_dir):
os.mkdir(service_php_fpm_dir)
service_php_fpslempww = service_php_fpm_dir + '/www.conf'
if not os.path.exists(service_php_fpslempww):
tpl_php_fpslempww = getPluginDir() + '/conf/www.conf'
content = slemp.readFile(tpl_php_fpslempww)
content = contentReplace(content, version)
slemp.writeFile(service_php_fpslempww, content)
def makePhpIni(version):
d_ini = slemp.getServerDir() + '/php/' + version + '/etc/php.ini'
if not os.path.exists(d_ini):
s_ini = getPluginDir() + '/conf/php' + version[0:1] + '.ini'
# shutil.copyfile(s_ini, d_ini)
content = slemp.readFile(s_ini)
if version == '52':
content = content + "auto_prepend_file=/home/slemp/server/php/app_start.php"
slemp.writeFile(d_ini, content)
def initReplace(version):
makeOpenrestyConf()
makePhpIni(version)
initD_path = getServerDir() + '/init.d'
if not os.path.exists(initD_path):
os.mkdir(initD_path)
file_bin = initD_path + '/php' + version
if not os.path.exists(file_bin):
file_tpl = getPluginDir() + '/init.d/php.tpl'
if version == '52':
file_tpl = getPluginDir() + '/init.d/php52.tpl'
content = slemp.readFile(file_tpl)
content = contentReplace(content, version)
slemp.writeFile(file_bin, content)
slemp.execShell('chmod +x ' + file_bin)
phpPrependFile(version)
phpFpmWwwReplace(version)
phpFpmReplace(version)
session_path = '/tmp/session'
if not os.path.exists(session_path):
os.mkdir(session_path)
if not slemp.isAppleSystem():
slemp.execShell('chown -R www:www ' + session_path)
upload_path = '/tmp/upload'
if not os.path.exists(upload_path):
os.mkdir(upload_path)
if not slemp.isAppleSystem():
slemp.execShell('chown -R www:www ' + upload_path)
return file_bin
def phpOp(version, method):
file = initReplace(version)
data = slemp.execShell(file + ' ' + method)
if data[1] == '':
return 'ok'
return data[1]
def start(version):
return phpOp(version, 'start')
def stop(version):
return phpOp(version, 'stop')
def restart(version):
return phpOp(version, 'restart')
def reload(version):
return phpOp(version, 'reload')
def initdStatus(version):
if not app_debug:
if slemp.isAppleSystem():
return "Apple Computer does not support"
initd_bin = getInitDFile(version)
if os.path.exists(initd_bin):
return 'ok'
return 'fail'
def initdInstall(version):
import shutil
if not app_debug:
if slemp.isAppleSystem():
return "Apple Computer does not support"
source_bin = initReplace(version)
initd_bin = getInitDFile(version)
shutil.copyfile(source_bin, initd_bin)
slemp.execShell('chmod +x ' + initd_bin)
slemp.execShell('chkconfig --add ' + getPluginName() + version)
return 'ok'
def initdUinstall(version):
if not app_debug:
if slemp.isAppleSystem():
return "Apple Computer does not support"
slemp.execShell('chkconfig --del ' + getPluginName())
initd_bin = getInitDFile(version)
os.remove(initd_bin)
return 'ok'
def fpmLog(version):
return getServerDir() + '/' + version + '/var/log/php-fpm.log'
def fpmSlowLog(version):
return getServerDir() + '/' + version + '/var/log/www-slow.log'
def getPhpConf(version):
gets = [
{'name': 'short_open_tag', 'type': 1, 'ps': 'Short tag support'},
{'name': 'asp_tags', 'type': 1, 'ps': 'ASP tag support'},
{'name': 'max_execution_time', 'type': 2, 'ps': 'Maximum script run time'},
{'name': 'max_input_time', 'type': 2, 'ps': 'Maximum input time'},
{'name': 'memory_limit', 'type': 2, 'ps': 'Script memory limit'},
{'name': 'post_max_size', 'type': 2, 'ps': 'POST data maximum size'},
{'name': 'file_uploads', 'type': 1, 'ps': 'Whether to allow uploading of files'},
{'name': 'upload_max_filesize', 'type': 2, 'ps': 'Maximum size allowed to upload files'},
{'name': 'max_file_uploads', 'type': 2, 'ps': 'Maximum number of files allowed to be uploaded at the same time'},
{'name': 'default_socket_timeout', 'type': 2, 'ps': 'Socket timeout'},
{'name': 'error_reporting', 'type': 3, 'ps': 'Error level'},
{'name': 'display_errors', 'type': 1, 'ps': 'Whether to output detailed error information'},
{'name': 'cgi.fix_pathinfo', 'type': 0, 'ps': 'Whether to enable pathinfo'},
{'name': 'date.timezone', 'type': 3, 'ps': 'Time zone'}
]
phpini = slemp.readFile(getServerDir() + '/' + version + '/etc/php.ini')
result = []
for g in gets:
rep = g['name'] + '\s*=\s*([0-9A-Za-z_& ~]+)(\s*;?|\r?\n)'
tmp = re.search(rep, phpini)
if not tmp:
continue
g['value'] = tmp.groups()[0]
result.append(g)
return slemp.getJson(result)
def submitPhpConf(version):
gets = ['display_errors', 'cgi.fix_pathinfo', 'date.timezone', 'short_open_tag',
'asp_tags', 'max_execution_time', 'max_input_time', 'memory_limit',
'post_max_size', 'file_uploads', 'upload_max_filesize', 'max_file_uploads',
'default_socket_timeout', 'error_reporting']
args = getArgs()
filename = getServerDir() + '/' + version + '/etc/php.ini'
phpini = slemp.readFile(filename)
for g in gets:
if g in args:
rep = g + '\s*=\s*(.+)\r?\n'
val = g + ' = ' + args[g] + '\n'
phpini = re.sub(rep, val, phpini)
slemp.writeFile(filename, phpini)
slemp.execShell(getServerDir() + '/init.d/php' + version + ' reload')
return slemp.returnJson(True, 'Set successfully')
def getLimitConf(version):
fileini = getServerDir() + "/" + version + "/etc/php.ini"
phpini = slemp.readFile(fileini)
filefpm = getServerDir() + "/" + version + "/etc/php-fpm.conf"
phpfpm = slemp.readFile(filefpm)
# print fileini, filefpm
data = {}
try:
rep = "upload_max_filesize\s*=\s*([0-9]+)M"
tmp = re.search(rep, phpini).groups()
data['max'] = tmp[0]
except:
data['max'] = '50'
try:
rep = "request_terminate_timeout\s*=\s*([0-9]+)\n"
tmp = re.search(rep, phpfpm).groups()
data['maxTime'] = tmp[0]
except:
data['maxTime'] = 0
try:
rep = r"\n;*\s*cgi\.fix_pathinfo\s*=\s*([0-9]+)\s*\n"
tmp = re.search(rep, phpini).groups()
if tmp[0] == '1':
data['pathinfo'] = True
else:
data['pathinfo'] = False
except:
data['pathinfo'] = False
return slemp.getJson(data)
def setMaxTime(version):
args = getArgs()
data = checkArgs(args, ['time'])
if not data[0]:
return data[1]
time = args['time']
if int(time) < 30 or int(time) > 86400:
return slemp.returnJson(False, 'Please fill in the value between 30-86400!')
filefpm = getServerDir() + "/" + version + "/etc/php-fpm.conf"
conf = slemp.readFile(filefpm)
rep = "request_terminate_timeout\s*=\s*([0-9]+)\n"
conf = re.sub(rep, "request_terminate_timeout = " + time + "\n", conf)
slemp.writeFile(filefpm, conf)
fileini = getServerDir() + "/" + version + "/etc/php.ini"
phpini = slemp.readFile(fileini)
rep = "max_execution_time\s*=\s*([0-9]+)\r?\n"
phpini = re.sub(rep, "max_execution_time = " + time + "\n", phpini)
rep = "max_input_time\s*=\s*([0-9]+)\r?\n"
phpini = re.sub(rep, "max_input_time = " + time + "\n", phpini)
slemp.writeFile(fileini, phpini)
return slemp.returnJson(True, 'Set successfully!')
def setMaxSize(version):
args = getArgs()
if not 'max' in args:
return 'missing time args!'
max = args['max']
if int(max) < 2:
return slemp.returnJson(False, 'Upload size limit cannot be less than 2MB!')
path = getServerDir() + '/' + version + '/etc/php.ini'
conf = slemp.readFile(path)
rep = u"\nupload_max_filesize\s*=\s*[0-9]+M"
conf = re.sub(rep, u'\nupload_max_filesize = ' + max + 'M', conf)
rep = u"\npost_max_size\s*=\s*[0-9]+M"
conf = re.sub(rep, u'\npost_max_size = ' + max + 'M', conf)
slemp.writeFile(path, conf)
msg = slemp.getInfo('Set PHP-{1} maximum upload size to [{2}MB]!', (version, max,))
slemp.writeLog('Plugin Management [PHP]', msg)
return slemp.returnJson(True, 'Set successfully!')
def getFpmConfig(version):
filefpm = getServerDir() + '/' + version + '/etc/php-fpm.d/www.conf'
conf = slemp.readFile(filefpm)
data = {}
rep = "\s*pm.max_children\s*=\s*([0-9]+)\s*"
tmp = re.search(rep, conf).groups()
data['max_children'] = tmp[0]
rep = "\s*pm.start_servers\s*=\s*([0-9]+)\s*"
tmp = re.search(rep, conf).groups()
data['start_servers'] = tmp[0]
rep = "\s*pm.min_spare_servers\s*=\s*([0-9]+)\s*"
tmp = re.search(rep, conf).groups()
data['min_spare_servers'] = tmp[0]
rep = "\s*pm.max_spare_servers \s*=\s*([0-9]+)\s*"
tmp = re.search(rep, conf).groups()
data['max_spare_servers'] = tmp[0]
rep = "\s*pm\s*=\s*(\w+)\s*"
tmp = re.search(rep, conf).groups()
data['pm'] = tmp[0]
return slemp.getJson(data)
def setFpmConfig(version):
args = getArgs()
# if not 'max' in args:
# return 'missing time args!'
version = args['version']
max_children = args['max_children']
start_servers = args['start_servers']
min_spare_servers = args['min_spare_servers']
max_spare_servers = args['max_spare_servers']
pm = args['pm']
file = getServerDir() + '/' + version + '/etc/php-fpm.d/www.conf'
conf = slemp.readFile(file)
rep = "\s*pm.max_children\s*=\s*([0-9]+)\s*"
conf = re.sub(rep, "\npm.max_children = " + max_children, conf)
rep = "\s*pm.start_servers\s*=\s*([0-9]+)\s*"
conf = re.sub(rep, "\npm.start_servers = " + start_servers, conf)
rep = "\s*pm.min_spare_servers\s*=\s*([0-9]+)\s*"
conf = re.sub(rep, "\npm.min_spare_servers = " +
min_spare_servers, conf)
rep = "\s*pm.max_spare_servers \s*=\s*([0-9]+)\s*"
conf = re.sub(rep, "\npm.max_spare_servers = " +
max_spare_servers + "\n", conf)
rep = "\s*pm\s*=\s*(\w+)\s*"
conf = re.sub(rep, "\npm = " + pm + "\n", conf)
slemp.writeFile(file, conf)
reload(version)
msg = slemp.getInfo('Set PHP-{1} concurrency settings,max_children={2},start_servers={3},min_spare_servers={4},max_spare_servers={5}', (version, max_children,
start_servers, min_spare_servers, max_spare_servers,))
slemp.writeLog('Plugin management [PHP]', msg)
return slemp.returnJson(True, 'Set successfully!')
def checkFpmStatusFile(version):
if slemp.isInstalledWeb():
sdir = slemp.getServerDir()
dfile = sdir + '/openresty/nginx/conf/php_status/phpfpm_status_' + version + '.conf'
if not os.path.exists(dfile):
tpl = getPluginDir() + '/conf/phpfpm_status.conf'
content = slemp.readFile(tpl)
content = contentReplace(content, version)
slemp.writeFile(dfile, content)
slemp.restartWeb()
def getFpmStatus(version):
checkFpmStatusFile(version)
result = slemp.httpGet(
'http://127.0.0.1/phpfpm_status_' + version + '?json')
tmp = json.loads(result)
fTime = time.localtime(int(tmp['start time']))
tmp['start time'] = time.strftime('%Y-%m-%d %H:%M:%S', fTime)
return slemp.getJson(tmp)
def getDisableFunc(version):
filename = slemp.getServerDir() + '/php/' + version + '/etc/php.ini'
if not os.path.exists(filename):
return slemp.returnJson(False, 'The specified PHP version does not exist!')
phpini = slemp.readFile(filename)
data = {}
rep = "disable_functions\s*=\s{0,1}(.*)\n"
tmp = re.search(rep, phpini).groups()
data['disable_functions'] = tmp[0]
return slemp.getJson(data)
def setDisableFunc(version):
filename = slemp.getServerDir() + '/php/' + version + '/etc/php.ini'
if not os.path.exists(filename):
return slemp.returnJson(False, 'The specified PHP version does not exist!')
args = getArgs()
disable_functions = args['disable_functions']
phpini = slemp.readFile(filename)
rep = "disable_functions\s*=\s*.*\n"
phpini = re.sub(rep, 'disable_functions = ' +
disable_functions + "\n", phpini)
msg = slemp.getInfo('Modify the disabled function of PHP-{1} to [{2}]', (version, disable_functions,))
slemp.writeLog('Plugin management [PHP]', msg)
slemp.writeFile(filename, phpini)
reload(version)
return slemp.returnJson(True, 'Set successfully!')
def checkPhpinfoFile(v):
if slemp.isInstalledWeb():
sdir = slemp.getServerDir()
dfile = sdir + '/openresty/nginx/conf/php_status/phpinfo_' + v + '.conf'
if not os.path.exists(dfile):
tpl = getPluginDir() + '/conf/phpinfo.conf'
content = slemp.readFile(tpl)
content = contentReplace(content, v)
slemp.writeFile(dfile, content)
slemp.restartWeb()
def getPhpinfo(v):
checkPhpinfoFile(v)
sPath = slemp.getRootDir() + '/phpinfo/' + v
slemp.execShell("rm -rf " + slemp.getRootDir() + '/phpinfo')
slemp.execShell("mkdir -p " + sPath)
slemp.writeFile(sPath + '/phpinfo.php', '<?php phpinfo(); ?>')
url = 'http://127.0.0.1/' + v + '/phpinfo.php'
phpinfo = slemp.httpGet(url)
os.system("rm -rf " + slemp.getRootDir() + '/phpinfo')
return phpinfo
def get_php_info(args):
return getPhpinfo(args['version'])
def getLibConf(version):
fname = slemp.getServerDir() + '/php/' + version + '/etc/php.ini'
if not os.path.exists(fname):
return slemp.returnJson(False, 'The specified PHP version does not exist!')
phpini = slemp.readFile(fname)
libpath = getPluginDir() + '/versions/phplib.conf'
phplib = json.loads(slemp.readFile(libpath))
libs = []
tasks = slemp.M('tasks').where(
"status!=?", ('1',)).field('status,name').select()
for lib in phplib:
lib['task'] = '1'
for task in tasks:
tmp = slemp.getStrBetween('[', ']', task['name'])
if not tmp:
continue
tmp1 = tmp.split('-')
if tmp1[0].lower() == lib['name'].lower():
lib['task'] = task['status']
lib['phpversions'] = []
lib['phpversions'].append(tmp1[1])
if phpini.find(lib['check']) == -1:
lib['status'] = False
else:
lib['status'] = True
libs.append(lib)
return slemp.returnJson(True, 'OK!', libs)
def installLib(version):
args = getArgs()
data = checkArgs(args, ['name'])
if not data[0]:
return data[1]
name = args['name']
execstr = "cd " + getPluginDir() + '/versions/' + version + " && /bin/bash " + \
name + '.sh' + ' install ' + version
rettime = time.strftime('%Y-%m-%d %H:%M:%S')
insert_info = (None, 'Install [' + name + '-' + version + ']',
'execshell', '0', rettime, execstr)
slemp.M('tasks').add('id,name,type,status,addtime,execstr', insert_info)
return slemp.returnJson(True, 'Added download task to queue!')
def uninstallLib(version):
args = getArgs()
data = checkArgs(args, ['name'])
if not data[0]:
return data[1]
name = args['name']
execstr = "cd " + getPluginDir() + '/versions/' + version + " && /bin/bash " + \
name + '.sh' + ' uninstall ' + version
data = slemp.execShell(execstr)
if data[0] == '' and data[1] == '':
return slemp.returnJson(True, 'Uninstalled successfully!')
else:
return slemp.returnJson(False, 'Uninstall info! [Channel 0]:' + data[0] + "[Channel 0]:" + data[1])
def getConfAppStart():
pstart = slemp.getServerDir() + '/php/app_start.php'
return pstart
if __name__ == "__main__":
if len(sys.argv) < 3:
print 'missing parameters'
exit(0)
func = sys.argv[1]
version = sys.argv[2]
if func == 'status':
print status(version)
elif func == 'start':
print start(version)
elif func == 'stop':
print stop(version)
elif func == 'restart':
print restart(version)
elif func == 'reload':
print reload(version)
elif func == 'initd_status':
print initdStatus(version)
elif func == 'initd_install':
print initdInstall(version)
elif func == 'initd_uninstall':
print initdUinstall(version)
elif func == 'fpm_log':
print fpmLog(version)
elif func == 'fpm_slow_log':
print fpmSlowLog(version)
elif func == 'conf':
print getConf(version)
elif func == 'app_start':
print getConfAppStart()
elif func == 'get_php_conf':
print getPhpConf(version)
elif func == 'submit_php_conf':
print submitPhpConf(version)
elif func == 'get_limit_conf':
print getLimitConf(version)
elif func == 'set_max_time':
print setMaxTime(version)
elif func == 'set_max_size':
print setMaxSize(version)
elif func == 'get_fpm_conf':
print getFpmConfig(version)
elif func == 'set_fpm_conf':
print setFpmConfig(version)
elif func == 'get_fpm_status':
print getFpmStatus(version)
elif func == 'get_disable_func':
print getDisableFunc(version)
elif func == 'set_disable_func':
print setDisableFunc(version)
elif func == 'get_phpinfo':
print getPhpinfo(version)
elif func == 'get_lib_conf':
print getLibConf(version)
elif func == 'install_lib':
print installLib(version)
elif func == 'uninstall_lib':
print uninstallLib(version)
else:
print "fail"
| 32.420028 | 172 | 0.59172 |
8a2512f83aa697e3d419712733c2826c3672b075 | 2,255 | py | Python | tests/test_hbridge.py | duyenle1312/Salvius | 27d9e72dfbdaf1bc87f75dece2aacaf641468647 | [
"MIT"
] | 74 | 2015-07-11T21:13:27.000Z | 2021-09-26T01:22:05.000Z | tests/test_hbridge.py | devjewel01/Salvius | 27d9e72dfbdaf1bc87f75dece2aacaf641468647 | [
"MIT"
] | 18 | 2015-09-27T18:12:05.000Z | 2020-05-15T21:08:35.000Z | tests/test_hbridge.py | devjewel01/Salvius | 27d9e72dfbdaf1bc87f75dece2aacaf641468647 | [
"MIT"
] | 26 | 2015-08-06T00:02:47.000Z | 2021-07-23T04:33:39.000Z | from unittest import TestCase
from zorg.test import MockAdaptor
from salvius.hbridge import RelayHBridge, ServoHBridge
class HBridgeTestCase(TestCase):
def setUp(self):
super(HBridgeTestCase, self).setUp()
self.connection = MockAdaptor({
'methods': ['servo_write', 'digital_write']
})
self.options = {}
class RelayHBridgeTestCase(HBridgeTestCase):
def setUp(self):
super(RelayHBridgeTestCase, self).setUp()
self.options['pins'] = [1, 2, 3, 4]
self.driver = RelayHBridge(self.options, self.connection)
def test_command_method_exists(self):
"""
Check that each command listed has a corresponding
method on the driver class.
"""
for command in self.driver.commands:
self.assertIn(command, dir(self.driver))
def test_four_pins_not_given(self):
self.options['pins'] = []
with self.assertRaises(RelayHBridge.RelayException):
RelayHBridge(self.options, self.connection)
def test_turn_off(self):
self.driver.turn_off()
self.assertEqual(self.driver.state, 0)
def test_rotate_clockwise(self):
value = self.driver.rotate_clockwise()
self.assertEqual(self.driver.state, 1)
def test_rotate_counterclockwise(self):
value = self.driver.rotate_counterclockwise()
self.assertEqual(self.driver.state, -1)
class ServoHBridgeTestCase(HBridgeTestCase):
def setUp(self):
super(ServoHBridgeTestCase, self).setUp()
self.driver = ServoHBridge(self.options, self.connection)
def test_command_method_exists(self):
"""
Check that each command listed has a corresponding
method on the driver class.
"""
for command in self.driver.commands:
self.assertIn(command, dir(self.driver))
def test_turn_off(self):
self.driver.turn_off()
self.assertEqual(self.driver.state, 0)
def test_rotate_clockwise(self):
value = self.driver.rotate_clockwise()
self.assertEqual(self.driver.state, 1)
def test_rotate_counterclockwise(self):
value = self.driver.rotate_counterclockwise()
self.assertEqual(self.driver.state, -1)
| 30.066667 | 65 | 0.664745 |
13a8e92f7421fd4eea77f2ac3b1c71dee67066a9 | 1,637 | py | Python | src/bitcoin_acks/migrations/versions/ac1f880ead3a_add_high_priority_fields.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 43 | 2018-04-29T03:30:18.000Z | 2021-02-11T05:24:49.000Z | src/bitcoin_acks/migrations/versions/ac1f880ead3a_add_high_priority_fields.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 46 | 2018-05-02T01:27:34.000Z | 2022-03-26T13:29:55.000Z | src/bitcoin_acks/migrations/versions/ac1f880ead3a_add_high_priority_fields.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 11 | 2018-05-15T23:47:47.000Z | 2021-01-27T14:57:54.000Z | """add high priority fields
Revision ID: ac1f880ead3a
Revises: 7bb40ce4cd80
Create Date: 2019-06-11 20:16:46.699366
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ac1f880ead3a'
down_revision = '7bb40ce4cd80'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('toots',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('pull_request_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('pull_request_id')
)
op.add_column('pull_requests', sa.Column('added_to_high_priority', sa.DateTime(), nullable=True))
op.add_column('pull_requests', sa.Column('removed_from_high_priority', sa.DateTime(), nullable=True))
op.add_column('pull_requests', sa.Column('toot_id', sa.Integer(), nullable=True))
op.create_unique_constraint(None, 'pull_requests', ['toot_id'])
op.drop_column('pull_requests', 'head_repository_url')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pull_requests', sa.Column('head_repository_url', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'pull_requests', type_='unique')
op.drop_column('pull_requests', 'toot_id')
op.drop_column('pull_requests', 'removed_from_high_priority')
op.drop_column('pull_requests', 'added_to_high_priority')
op.drop_table('toots')
# ### end Alembic commands ###
| 36.377778 | 118 | 0.717166 |
a30a81664b095d89a9bcae0c1456883d5e0516c6 | 546 | py | Python | .history/postImages/index_20201006184017.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | 2 | 2020-10-21T22:14:15.000Z | 2020-10-21T22:14:16.000Z | .history/postImages/index_20201006184017.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | .history/postImages/index_20201006184017.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj - {}
for i in finalCsv:
obj[i[0]] = {projectCode:i[1],before_image:}
print(finalCsv)
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
| 32.117647 | 97 | 0.679487 |
3c237374920407f2103ab40169b97633a0b41364 | 3,273 | py | Python | test/srcchange.py | kurazsi/scons | 348f8d81b6aa49d8798eac268700806d121c5f4f | [
"MIT"
] | 1 | 2020-03-21T05:24:47.000Z | 2020-03-21T05:24:47.000Z | test/srcchange.py | kurazsi/scons | 348f8d81b6aa49d8798eac268700806d121c5f4f | [
"MIT"
] | null | null | null | test/srcchange.py | kurazsi/scons | 348f8d81b6aa49d8798eac268700806d121c5f4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test changing the C source files based on an always-executed revision
extraction and substitution.
This makes sure we evaluate the content of intermediate files as
expected. This relies on the default behavior being the equivalent
of Decider('content').
"""
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('getrevision', r"""
from __future__ import print_function
with open('revnum.in', 'r') as f:
print(f.read().strip(), end='')
""")
test.write('SConstruct', r"""
import re
def subrevision(target, source ,env):
orig = target[0].get_text_contents()
new = re.sub(r'\$REV.*?\$',
'$REV: %%s$'%%source[0].get_text_contents().strip(),
target[0].get_text_contents())
with open(str(target[0]),'w') as outf:
outf.write(new)
SubRevision = Action(subrevision)
env=Environment()
content_env=env.Clone()
content_env.Command('revision.in', [], r'%(_python_)s getrevision > $TARGET')
content_env.AlwaysBuild('revision.in')
env.Precious('main.c')
env.Command('main.c', 'revision.in', SubRevision)
exe = env.Program('main.c')
env.Default(exe)
""" % locals())
test.write('main.c', r"""
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char *argv[])
{
printf("Revision $REV$\n");
exit (0);
}
""", mode='w')
test.write('revnum.in', '3.2\n')
program_name = 'main' + TestSCons._exe
light_build = test.wrap_stdout("""\
%(_python_)s getrevision > revision.in
""" % locals())
test.run(arguments='.')
test.must_exist(program_name)
test.run(program=test.workpath(program_name), stdout='Revision $REV: 3.2$\n')
test.run(arguments='.', stdout=light_build)
test.must_exist(program_name)
test.run(arguments='.', stdout=light_build)
test.must_exist(program_name)
test.write('revnum.in', '3.3\n', mode='w')
test.run(arguments='.')
test.must_exist(program_name)
test.run(program=test.workpath(program_name), stdout='Revision $REV: 3.3$\n')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.974359 | 77 | 0.721051 |
a1f886dfcc028820a3a57c16c5976e23eb147fbf | 11,491 | py | Python | tests/scripts/thread-cert/border_routing/test_dnssd_server.py | lab11/openthread | 3bb025a7924b8f3281445f3c6c500230f0d41784 | [
"BSD-3-Clause"
] | null | null | null | tests/scripts/thread-cert/border_routing/test_dnssd_server.py | lab11/openthread | 3bb025a7924b8f3281445f3c6c500230f0d41784 | [
"BSD-3-Clause"
] | 5 | 2020-08-31T04:22:48.000Z | 2021-01-26T11:54:29.000Z | tests/scripts/thread-cert/border_routing/test_dnssd_server.py | lab11/openthread | 3bb025a7924b8f3281445f3c6c500230f0d41784 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ipaddress
import json
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies DNS-SD server works on a Duckhorn BR and is accessible from a Host.
#
# Topology:
# ----------------(eth)--------------------
# | |
# BR1 (Leader, Server) HOST
# / \
# CLIENT1 CLIENT2
SERVER = BR1 = 1
CLIENT1, CLIENT2 = 2, 3
HOST = 4
DIGGER = HOST
DOMAIN = 'default.service.arpa.'
SERVICE = '_testsrv._udp'
SERVICE_FULL_NAME = f'{SERVICE}.{DOMAIN}'
VALID_SERVICE_NAMES = [
'_abc._udp.default.service.arpa.',
'_abc._tcp.default.service.arpa.',
]
WRONG_SERVICE_NAMES = [
'_testsrv._udp.default.service.xxxx.',
'_testsrv._txp,default.service.arpa.',
]
class TestDnssdServerOnBr(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR1: {
'name': 'SERVER',
'is_otbr': True,
'version': '1.2',
'router_selection_jitter': 1,
},
CLIENT1: {
'name': 'CLIENT1',
'router_selection_jitter': 1,
},
CLIENT2: {
'name': 'CLIENT2',
'router_selection_jitter': 1,
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
self.nodes[HOST].start(start_radvd=False)
self.simulator.go(5)
self.nodes[BR1].start()
self.simulator.go(5)
self.assertEqual('leader', self.nodes[BR1].get_state())
self.nodes[SERVER].srp_server_set_enabled(True)
self.nodes[CLIENT1].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[CLIENT1].get_state())
self.nodes[CLIENT2].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[CLIENT2].get_state())
self.simulator.go(10)
# Router1 can ping to/from the Host on infra link.
self.assertTrue(self.nodes[BR1].ping(self.nodes[HOST].get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0],
backbone=True))
self.assertTrue(self.nodes[HOST].ping(self.nodes[BR1].get_ip6_address(config.ADDRESS_TYPE.OMR)[0],
backbone=True))
client1_addrs = [
self.nodes[CLIENT1].get_mleid(), self.nodes[CLIENT1].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]
]
self._config_srp_client_services(CLIENT1, 'ins1', 'host1', 11111, 1, 1, client1_addrs)
client2_addrs = [
self.nodes[CLIENT2].get_mleid(), self.nodes[CLIENT2].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]
]
self._config_srp_client_services(CLIENT2, 'ins2', 'host2', 22222, 2, 2, client2_addrs)
ins1_full_name = f'ins1.{SERVICE_FULL_NAME}'
ins2_full_name = f'ins2.{SERVICE_FULL_NAME}'
host1_full_name = f'host1.{DOMAIN}'
host2_full_name = f'host2.{DOMAIN}'
server_addr = self.nodes[SERVER].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]
# check if PTR query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, SERVICE_FULL_NAME, 'PTR')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(SERVICE_FULL_NAME, 'IN', 'PTR')],
'ANSWER': [(SERVICE_FULL_NAME, 'IN', 'PTR', f'ins1.{SERVICE_FULL_NAME}'),
(SERVICE_FULL_NAME, 'IN', 'PTR', f'ins2.{SERVICE_FULL_NAME}')],
'ADDITIONAL': [
(ins1_full_name, 'IN', 'SRV', 1, 1, 11111, host1_full_name),
(ins1_full_name, 'IN', 'TXT', '""'),
(host1_full_name, 'IN', 'AAAA', client1_addrs[0]),
(host1_full_name, 'IN', 'AAAA', client1_addrs[1]),
(ins2_full_name, 'IN', 'SRV', 2, 2, 22222, host2_full_name),
(ins2_full_name, 'IN', 'TXT', '""'),
(host2_full_name, 'IN', 'AAAA', client2_addrs[0]),
(host2_full_name, 'IN', 'AAAA', client2_addrs[1]),
],
})
# check if SRV query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins1_full_name, 'SRV')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(ins1_full_name, 'IN', 'SRV')],
'ANSWER': [(ins1_full_name, 'IN', 'SRV', 1, 1, 11111, host1_full_name),],
'ADDITIONAL': [
(host1_full_name, 'IN', 'AAAA', client1_addrs[0]),
(host1_full_name, 'IN', 'AAAA', client1_addrs[1]),
],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins2_full_name, 'SRV')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(ins2_full_name, 'IN', 'SRV')],
'ANSWER': [(ins2_full_name, 'IN', 'SRV', 2, 2, 22222, host2_full_name),],
'ADDITIONAL': [
(host2_full_name, 'IN', 'AAAA', client2_addrs[0]),
(host2_full_name, 'IN', 'AAAA', client2_addrs[1]),
],
})
# check if TXT query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins1_full_name, 'TXT')
self._assert_dig_result_matches(dig_result, {
'QUESTION': [(ins1_full_name, 'IN', 'TXT')],
'ANSWER': [(ins1_full_name, 'IN', 'TXT', '""'),],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins2_full_name, 'TXT')
self._assert_dig_result_matches(dig_result, {
'QUESTION': [(ins2_full_name, 'IN', 'TXT')],
'ANSWER': [(ins2_full_name, 'IN', 'TXT', '""'),],
})
# check if AAAA query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, host1_full_name, 'AAAA')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(host1_full_name, 'IN', 'AAAA'),],
'ANSWER': [
(host1_full_name, 'IN', 'AAAA', client1_addrs[0]),
(host1_full_name, 'IN', 'AAAA', client1_addrs[1]),
],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, host2_full_name, 'AAAA')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(host2_full_name, 'IN', 'AAAA'),],
'ANSWER': [
(host2_full_name, 'IN', 'AAAA', client2_addrs[0]),
(host2_full_name, 'IN', 'AAAA', client2_addrs[1]),
],
})
# check some invalid queries
for qtype in ['A', 'CNAME']:
dig_result = self.nodes[DIGGER].dns_dig(server_addr, host1_full_name, qtype)
self._assert_dig_result_matches(dig_result, {
'status': 'NOTIMP',
'QUESTION': [(host1_full_name, 'IN', qtype)],
})
for service_name in WRONG_SERVICE_NAMES:
dig_result = self.nodes[DIGGER].dns_dig(server_addr, service_name, 'PTR')
self._assert_dig_result_matches(dig_result, {
'status': 'NXDOMAIN',
'QUESTION': [(service_name, 'IN', 'PTR')],
})
def _config_srp_client_services(self, client, instancename, hostname, port, priority, weight, addrs):
self.nodes[client].netdata_show()
srp_server_port = self.nodes[client].get_srp_server_port()
self.nodes[client].srp_client_start(self.nodes[SERVER].get_mleid(), srp_server_port)
self.nodes[client].srp_client_set_host_name(hostname)
self.nodes[client].srp_client_set_host_address(*addrs)
self.nodes[client].srp_client_add_service(instancename, SERVICE, port, priority, weight)
self.simulator.go(5)
self.assertEqual(self.nodes[client].srp_client_get_host_state(), 'Registered')
def _assert_have_question(self, dig_result, question):
self.assertIn(question, dig_result['QUESTION'], (question, dig_result))
def _assert_have_answer(self, dig_result, record, additional=False):
for dig_answer in dig_result['ANSWER' if not additional else 'ADDITIONAL']:
dig_answer = list(dig_answer)
dig_answer[1:2] = [] # remove TTL from answer
record = list(record)
# convert IPv6 addresses to `ipaddress.IPv6Address` before matching
if dig_answer[2] == 'AAAA':
dig_answer[3] = ipaddress.IPv6Address(dig_answer[3])
if record[2] == 'AAAA':
record[3] = ipaddress.IPv6Address(record[3])
if dig_answer == record:
return
self.fail((record, dig_result))
def _assert_dig_result_matches(self, dig_result, expected_result):
self.assertEqual(dig_result['opcode'], expected_result.get('opcode', 'QUERY'), dig_result)
self.assertEqual(dig_result['status'], expected_result.get('status', 'NOERROR'), dig_result)
self.assertEqual(len(dig_result['QUESTION']), len(expected_result.get('QUESTION', [])), dig_result)
self.assertEqual(len(dig_result['ANSWER']), len(expected_result.get('ANSWER', [])), dig_result)
self.assertEqual(len(dig_result['ADDITIONAL']), len(expected_result.get('ADDITIONAL', [])), dig_result)
for question in expected_result.get('QUESTION', []):
self._assert_have_question(dig_result, question)
for record in expected_result.get('ANSWER', []):
self._assert_have_answer(dig_result, record, additional=False)
for record in expected_result.get('ADDITIONAL', []):
self._assert_have_answer(dig_result, record, additional=True)
logging.info("dig result matches:\r%s", json.dumps(dig_result, indent=True))
if __name__ == '__main__':
unittest.main()
| 40.748227 | 113 | 0.605604 |
27c74f26ed9b06ca9e4dc6f6b59dd58490f44126 | 545 | py | Python | setup.py | baidalala/PAV-Healthcare | 3bcd2fe0a272643b480e538a836827c41ba84445 | [
"MIT"
] | null | null | null | setup.py | baidalala/PAV-Healthcare | 3bcd2fe0a272643b480e538a836827c41ba84445 | [
"MIT"
] | null | null | null | setup.py | baidalala/PAV-Healthcare | 3bcd2fe0a272643b480e538a836827c41ba84445 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in pav_healthcare/__init__.py
from pav_healthcare import __version__ as version
setup(
name='pav_healthcare',
version=version,
description='PAV Healthcare',
author='Partner Consulting Solutions',
author_email='baida@partner-cons.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 25.952381 | 69 | 0.776147 |
2be4ccf08a199832f2683715bb513b38bcba5a2b | 2,862 | py | Python | deepnlpf/helpers/mongodb.py | deepnlpf/deepnlpf | 6508ab1e8fd395575d606ee20223f25591541e25 | [
"Apache-2.0"
] | 3 | 2020-04-11T14:12:45.000Z | 2020-05-30T16:31:06.000Z | deepnlpf/helpers/mongodb.py | deepnlpf/deepnlpf | 6508ab1e8fd395575d606ee20223f25591541e25 | [
"Apache-2.0"
] | 34 | 2020-03-20T19:36:40.000Z | 2022-03-20T13:00:32.000Z | deepnlpf/helpers/mongodb.py | deepnlpf/deepnlpf | 6508ab1e8fd395575d606ee20223f25591541e25 | [
"Apache-2.0"
] | 1 | 2020-09-05T06:44:15.000Z | 2020-09-05T06:44:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Path: deepnlpf/conn/
File: mongodb.py
Class: ConnectMongoDB
Description:
Date: 23/03/2019
"""
from mongoengine import *
from pymongo import MongoClient
from deepnlpf.config import database
class ConnectMongoDB(object):
def __init__(self):
self.client = MongoClient(database.DB['hostname'], database.DB['port'], maxPoolSize=200)
#self.client.authenticate(database.DB['username'], database.DB['password'])
self.db = self.client[database.DB['database']]
self.conn()
def conn(self):
return self.db
def insert_document(self, collection_name, document):
"""
@param collection_name
@param document
@return document_id
"""
try:
collection = self.db[collection_name]
_id = collection.insert_one(document).inserted_id
return _id
except Exception as err:
return err
def select_document(self, collection_name, key):
"""
@param collection_name
@param key
"""
try:
collection = self.db[collection_name]
result = collection.find_one(key)
if(result):
return result
else:
return False
except Exception as err:
return err
def select_document_all_key(self, collection_name, key):
"""
@param collection_name
@param key
"""
try:
collection = self.db[collection_name]
result = collection.find(key)
annotations = []
for anotation in result:
annotations.append(anotation)
return annotations
except Exception as err:
return err
def select_document_all(self, collection_name):
try:
collection = self.db[collection_name]
item = []
for data in collection.find():
item.append(data)
return item
except Exception as err:
return err
def update(self, collection_name, key, document):
"""
@param collection_name
@param key
@param document
"""
try:
collection = self.db[collection_name]
result = collection.update_one(key, document)
return result
except Exception as err:
return err
def delete(self, collection_name, key):
"""
@param key
@param document
"""
try:
collection = self.db[collection_name]
result = collection.delete_one(key)
return result
except Exception as err:
return err
| 24.461538 | 96 | 0.540881 |
15d530d59739f46f7d5f42f5827305073976203a | 924 | py | Python | novaclient/tests/functional/v2/test_fixedips.py | alvarolopez/python-novaclient | ef7cb1d44d47a1273810603fd96d982d7f0bd7d6 | [
"Apache-1.1"
] | null | null | null | novaclient/tests/functional/v2/test_fixedips.py | alvarolopez/python-novaclient | ef7cb1d44d47a1273810603fd96d982d7f0bd7d6 | [
"Apache-1.1"
] | null | null | null | novaclient/tests/functional/v2/test_fixedips.py | alvarolopez/python-novaclient | ef7cb1d44d47a1273810603fd96d982d7f0bd7d6 | [
"Apache-1.1"
] | null | null | null | # Copyright 2015 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests.functional.v2.legacy import test_fixedips
class TestFixedIPsNovaClientV24(test_fixedips.TestFixedIPsNovaClient):
"""FixedIPs functional tests for v2.4 nova-api microversion."""
COMPUTE_API_VERSION = '2.4'
def test_fixedip_get(self):
self._test_fixedip_get(expect_reserved=True)
| 38.5 | 78 | 0.745671 |
84791d9dd8426753ced1361f6eacf7576e446a76 | 8,115 | py | Python | xpf/__init__.py | dooglewoogle/xpf | 46b38f8096f22ae2e5c00e86b57f190b3c8b8249 | [
"MIT"
] | 1 | 2020-06-17T10:20:27.000Z | 2020-06-17T10:20:27.000Z | xpf/__init__.py | dooglewoogle/xpf | 46b38f8096f22ae2e5c00e86b57f190b3c8b8249 | [
"MIT"
] | 1 | 2021-01-15T02:06:44.000Z | 2021-01-15T02:06:44.000Z | xpf/__init__.py | dooglewoogle/xpf | 46b38f8096f22ae2e5c00e86b57f190b3c8b8249 | [
"MIT"
] | 3 | 2019-09-27T08:51:02.000Z | 2021-01-17T20:32:21.000Z | """
#Overview
Xpf is an interface to perforce which offers the following benefits:
* Pure python, no compiled dependencies (allow for use in Python 2, Python 3, Max, Maya & Motion Builder etc)
* Failsafe calls - this module will not raise errors if the perforce server is in-accessible.
* PyPi distribution making it easier for sharing tools with outsourcers
* Easy form injection
#Why not P4Python
Perforce offers its own p4Python module which is a well established and
stable library.
The downside to P4Python is that its a compiled library and therefore requires
different distributions if you're jumping between Python 2.x and Python 3.x
This is further exasperated when wanting to utilise perforce integrations into
tools built within embedded interpreters such as Autodesk Maya/Max/Motionbuilder
which are all compiled against different compiler versions than the native
python distributions.
The other benefit xpf brings is that its has a soft requirement on perforce.
Its common place to want to embed perforce support into tools within
applications such as Max/Maya but it opens the challenge of allowing your tools
to still operate outside of your studio environments when the Perforce server
is in-accessible.
Xpf resolves this by ensuring all the perforce calls can return default
variable types in circumstances where the Perforce server is unreachable. This
allows for tools to operate outside the main studio environment, and is made
easier by xpf being freely accessible on PyPi.
##Xpf Direct
The library tries to make it easy to use for those that are used to utilising
P4Python or the perforce command line. With that in mind you can utilise the
the ```xpf.direct``` module which mimics the types of calls and interface of
P4Python.
In this regard you're given access to functions for each perforce command and
are able to pass in any arguments you want to give. As per P4Python conventions
you will be returned a list of dictionaries representing the results.
Some examples of this would be:
```python
import xpf
# -- Sync to the head revision of a directory
xpf.direct.sync(
[
'/usr/my_files/...'
]
)
# -- Force sync, passing the usual perforce argument switches
xpf.direct.sync(
[
'/usr/my_files/...'
],
'-f',
)
```
As well as the defined functions (which are auto-generated) there is also a
more generic run function which you can utilise directly:
```python
import xpf
# -- Sync to the head revision of a directory
result = xpf.direct.run(
'describe',
13569, # -- Change list to describe
'Os', # -- Shorten output by excluding client workspace data
'Rc', # -- Limit output to files mapped into the current workspace.
)
```
Using ```xpf.direct``` should feel very familiar to anyone who has utilised
P4Python or the perforce commandline.
##Xpf Assist
Working at the ```xpf.direct``` level makes a lot of sense in a lot of
situations, however there are various circumstances which call for multiple
queries to be carried out in order to answer slightly higher level questions.
Examples of these might be to add a file to a changelist regardless of whether
its an add or edit operation. Another example might be where you want to manage
the changelist descriptions a little easier.
The ```xpf.assist``` module aims to give higher level functionality whereby the
function in question will carry out multiple calls and wrangle data to solve
a particular request.
Examples of these are:
```python
import xpf
# -- Given a chnagelist description, find the most likely changelist
# -- number for the current user. In this case, if that changelist
# -- does not exist it will be created for you
result = xpf.assist.get_changelist('My change description')
```
The following example exposes a method of submitting which forces all
files being submitted to be added to a changelist with the supplied
description and submitted together:
```python
import xpf
xpf.assist.submit_files(
[
'/usr/my_files/...'
],
description='My submission description',
)
```
##Xpf Variables
Xpf works at a module level. It is not class based and it wraps the perforce
command line. With this in mind it has some variables which are considered
global, which are queried for on the first run (based on p4 set) but can be
altered by you.
```python
import xpf
# -- Get the host
host = xpf.variables.get_host()
# -- Set the host to something specific
xpf.variables.set_host(host)
```
Variables which can be retrieved and set in this way include:
* host
* client
* user
* port (server)
* timeout
A special variable which can be turned on/off is the `debugging` variable.
When debugging is turned on xpf will print ever command line its about to
process in the final format its constructed in. This is particularly useful
if you're getting a result you do not expect and want to recreate the steps
using the commandline.
To enable this option you do:
```python
import xpf
xpf.variables.set_debug(True)
```
##Failsafes
One of the big advantages of xpf is that includes in-built failsafe mechanisms
to protect functionality whenever the server is in-accessible. During the first
xpf call (regardless of whether that is through ```xpf.direct``` or
```xpf.assist```) a ```p4 info``` query is run. If this timesout then the
xpf variable is set to mark the server as inaccessible.
When a server is inaccessible all functions will return a default value which
is defined by their failsafe decorator. This allows your code to continue
without having to handle server failure directly.
In many situations when calling functions within ```xpf.direct``` they will
return an empty list upon server failure - this is because their functions
usually return a list. With that in mind, whilst you dont have to handle server
failure you should handle being given empty data of the correct (expected)
type.
A good example of this would be:
```python
description = xpf.direct.describe(13569)
if not description:
pass
...
```
In the example above we do have to handle not being given a description but
we do not have to handle unexpected exceptions beacuse of an inaccessible
perforce server.
Fundamentally this means you can safely embed an xpf dependency in your tool
to give a rich user experience knowing that the tool will work even if its taken
off-site.
##Timeouts
Xpf offers the ability to define a timeout on all perforce queries. By default
this timeout is exposed through ```xpf.set_timeout(value)``` and is defaulted
to one second. If your server is particularly slow you can use that call to
increase the global timeout.
Alternatively, you can set the timeout on a per-call basis too. This is
particularly useful when you know your call will take a longer than expected
time. This is done with the `timeout` argument as shown here:
```python
import xpf
xpf.direct.info(timeout=10)
```
## Marshalling
By default all queries run through xpf are marshaled, and therefore return
pythonic objects. If you want raw output (strings) rather than lists of
dictionaries you can set the marshaling to false on a per call basis as
shown here:
```python
string_dump = xof.direct.run('set', marshal=False) # -- (Equivalent to p4 set)
```
##Forms
Forms are used in perforce to deliver multiple pieces of user input. When
running perforce through the commandline this typically pops open a text
editor - which is not particularly useful when interacting with perforce
via a python library.
Therefore all forms that are requested by a particular p4 command can be
given in the form of a dictionary. This is shown here:
```python
import xpf
result = xpf.direct.changelist(
'-i',
form={
'Change': 'new',
'Status': 'new',
'Description': 'My New Changelist Description',
},
**kwargs
)
```
#Compatibility
Xpf has been tested under Python 2.7 and Python 3.7 on Windows.
"""
from . import assist
from . import direct
from . import contexts
from . import variables
from . import connection
| 30.507519 | 113 | 0.754405 |
62eb06670ccc32cdd015f000af42037d10f4f016 | 520 | py | Python | ratter/tests/test_stack.py | fa-me/ratter | dce438b047d818aca92d47aa72a0268d48aab2d9 | [
"BSD-3-Clause"
] | null | null | null | ratter/tests/test_stack.py | fa-me/ratter | dce438b047d818aca92d47aa72a0268d48aab2d9 | [
"BSD-3-Clause"
] | null | null | null | ratter/tests/test_stack.py | fa-me/ratter | dce438b047d818aca92d47aa72a0268d48aab2d9 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from ratter.material import Material
from ratter.stack import Layerstack
from ratter.layer import Layer
class LayerstackTests(unittest.TestCase):
def setUp(self):
l1 = Layer("layer1", Material("air"))
l2 = Layer("layer2", Material("water"))
self.layerstack = Layerstack([l1, l2])
def test_layerstack_exists(self):
assert self.layerstack.layers is not None
def test_transfer_matrix_exists(self):
assert self.layerstack.transfer_matrix() is not None
| 28.888889 | 60 | 0.713462 |
69b0b4d551f1371e3761671b529ac5d0f70690f6 | 9,264 | py | Python | financeAPI/financeAPI.py | tirthajyoti/Finance-with-Python | d64d0cbec3533ef93e506c9097ca91ac610eb531 | [
"MIT"
] | 63 | 2020-06-07T04:00:17.000Z | 2022-03-29T08:59:01.000Z | financeAPI/financeAPI.py | vegatek/Finance-with-Python | d64d0cbec3533ef93e506c9097ca91ac610eb531 | [
"MIT"
] | 3 | 2020-06-14T11:54:36.000Z | 2022-02-26T16:03:30.000Z | financeAPI/financeAPI.py | vegatek/Finance-with-Python | d64d0cbec3533ef93e506c9097ca91ac610eb531 | [
"MIT"
] | 63 | 2020-06-06T21:43:16.000Z | 2022-03-21T11:57:58.000Z | """
A finance API class using the API from https://financialmodelingprep.com/
NOTE: You need to register on their website to obtain your own secret API key
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
from urllib.request import urlopen
class FinanceAPI():
"""
Core class
"""
def __init__(self):
"""
Initiates the object
"""
self.key = ''
self.key_registered = False
pass
def registerKey_(self,key):
"""
Registers a secret API key to the object
"""
assert type(key) is str,"Key must be a string"
self.key = str(key)
self.key_registered = True
def __repr__(self):
return ("This is a finance API class.\n")
def profile_data_(self,symbol):
"""
Pulls the metrics data from the API for the given ticker symbol
Parameters
----------
symbol : A ticker symbol (str) e.g. 'MSFT','FB','AAPL', or 'TWTR'
Returns
-------
None. Updates the self.profile with the data.
"""
if not self.key_registered:
print("API key not registered yet.")
return None
url = "https://financialmodelingprep.com/api/v3/"+\
"company/profile/"+str(symbol)+'?apikey='+self.key
response = urlopen(url)
data = response.read().decode("utf-8")
self.profile = json.loads(data)
def metrics_data_(self,symbol):
"""
Pulls the metrics data from the API for the given ticker symbol
Parameters
----------
symbol : A ticker symbol (str) e.g. 'MSFT','FB','AAPL', or 'TWTR'
Returns
-------
None. Updates the self.metrics with the data.
"""
if not self.key_registered:
print("API key not registered yet.")
return None
url = "https://financialmodelingprep.com/api/v3/"+\
"company-key-metrics/"+str(symbol)+'?apikey='+self.key
response = urlopen(url)
data = response.read().decode("utf-8")
self.metrics = json.loads(data)
def ratios_data_(self,symbol):
"""
Pulls the ratios data from the API for the given ticker symbol
Parameters
----------
symbol : A ticker symbol (str) e.g. 'MSFT','FB','AAPL', or 'TWTR'
Returns
-------
None. Updates the self.ratios with the data.
"""
if not self.key_registered:
print("API key not registered yet.")
return None
url = "https://financialmodelingprep.com/api/v3/"+\
"ratios/"+str(symbol)+'?apikey='+self.key
response = urlopen(url)
data = response.read().decode("utf-8")
self.ratios = json.loads(data)
def build_dict(self,symbol):
"""
Builds a dictionary with a given ticker symbols
Parameters
----------
symbol : A ticker symbol (str) e.g. 'MSFT','FB','AAPL', or 'TWTR'
Returns
-------
A dictionary with all the profile and metrics data pulled from the API
"""
# Pull data
if not self.key_registered:
print("API key not registered yet.")
return None
self.profile_data_(symbol)
self.metrics_data_(symbol)
self.ratios_data_(symbol)
# Empty dict
data_dict = {}
# Symbol
data_dict['symbol']=symbol
# Profile data
for k in self.profile['profile'].keys():
data_dict[k]=self.profile['profile'][k]
# Metrics data
for k in self.metrics['metrics'][0].keys():
data_dict[k]=self.metrics['metrics'][0][k]
# Ratios data
c=[(k,v) for k,v in self.ratios[0].items()]
for k in c[2:]:
data_dict[k[0]]=k[1]
return data_dict
def available_data(self,data_type='profile'):
"""
Prints a list of data items that are available
Arguments
----------
data_type: One of 'profile','metrics',or 'ratios'
Returns
--------
None. Prints a list.
"""
assert str(data_type) in ['profile','metrics','ratios'], \
"Data type not recognized. Should be one of \'profile\',\'metrics\',or \'ratios\'"
available_data = []
if data_type=='profile':
for k in self.profile['profile'].keys():
if self.profile['profile'][k] is not None:
available_data.append(k)
print("Following data are available for profile")
print("="*60)
for d in available_data[:-1]:
print(d,end=', ')
print(available_data[-1],end='.')
if data_type=='metrics':
for k in self.metrics['metrics'][0].keys():
if self.metrics['metrics'][0][k] is not None:
available_data.append(k)
print("Following data are available for metrics")
print("="*60)
for d in available_data[:-1]:
print(d,end=', ')
print(available_data[-1],end='.')
if data_type=='ratios':
c=[(k,v) for k,v in self.ratios[0].items()]
for k in c[2:]:
if k[1] is not None:
available_data.append(k[0])
print("Following data are available for ratios")
print("="*60)
for d in available_data[:-1]:
print(d,end=', ')
print(available_data[-1],end='.')
def cols_numeric_(self):
"""
Transforms columns to numeric (float) wherever applicable
"""
for c in self.df.columns:
try:
self.df[c] = self.df[c].apply(float)
except ValueError:
pass
def replace_None_(self):
"""
Replaces NoneType data by np.nan in the DataFrame
"""
self.df.fillna(value=pd.np.nan, inplace=True)
def build_dataframe(self,lst):
"""
Builds a DataFrame with a given list of ticker symbols
Parameters
----------
lst : A list of ticker symbols (str)
e.g. ['MSFT','FB','AAPL','TWTR']
Returns
-------
A Pandas DataFrame with all the data pulled from the API,
indexed by the symbol (company)
"""
if not self.key_registered:
print("API key not registered yet.")
return None
data_companies = []
for c in lst:
data_companies.append(self.build_dict(c))
# Build the DataFrame
self.df = pd.DataFrame(data_companies)
# Convert to numeric columns wherever applicable
self.replace_None_()
self.cols_numeric_()
return self.df
def bar_chart(self,var='price',**kwargs):
"""
Plots a bar chart of the given variable
Parameters
----------
var : Name of the variable to be plotted (str)
e.g. 'price', 'PE ratio', 'Book Value per Share'
Returns
-------
None
"""
plt.figure(figsize=(10,4))
plt.title("{}".format(var),fontsize=18)
plt.bar(x=self.df['companyName'],height=self.df[var],**kwargs)
plt.xticks(fontsize=14,rotation=45)
plt.yticks(fontsize=14)
plt.ylabel(var,fontsize=16)
plt.show()
def scatter(self,varX,varY,sizeZ=None,**kwargs):
"""
Plots a scatter chart of the given variable pair,
and (optionally) size the as per a third variable
Parameters
----------
varX : Name of the x-axis variable (str), e.g. 'price', 'PE ratio', 'Book Value per Share'
varY : Name of the x-axis variable (str), e.g. 'price', 'PE ratio', 'Book Value per Share'
sizeZ : Name of the variable (str) for sizing, e.g. 'price', 'PE ratio', 'Book Value per Share'
Returns
-------
None
"""
if sizeZ is not None:
scale = 1000/(self.df[sizeZ].max()**2)
plt.figure(figsize=(10,4))
plt.title("{} vs. {}".format(varX,varY),fontsize=18)
if sizeZ is not None:
plt.scatter(x=self.df[varX],y=self.df[varY],s=scale*self.df[sizeZ]**2,**kwargs)
else:
plt.scatter(x=self.df[varX],y=self.df[varY],s=200,**kwargs)
for i in (range(len(self.df))):
plt.annotate(s=self.df['symbol'][i],
xy=(self.df[varX][i],
self.df[varY][i]))
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(varX,fontsize=16)
plt.ylabel(varY,fontsize=16)
plt.show() | 32.734982 | 104 | 0.505289 |
095f0153e74f2529cd5fa99c0dd298a30f7504ba | 896 | py | Python | pymtl3/passes/PlaceholderPass.py | tancheng/pymtl3 | 9e3a582c805a1aa3d9c12a208e907bc73f2514d5 | [
"BSD-3-Clause"
] | 1 | 2022-01-03T06:22:11.000Z | 2022-01-03T06:22:11.000Z | pymtl3/passes/PlaceholderPass.py | tancheng/pymtl3 | 9e3a582c805a1aa3d9c12a208e907bc73f2514d5 | [
"BSD-3-Clause"
] | null | null | null | pymtl3/passes/PlaceholderPass.py | tancheng/pymtl3 | 9e3a582c805a1aa3d9c12a208e907bc73f2514d5 | [
"BSD-3-Clause"
] | null | null | null | #=========================================================================
# PlaceholderPass.py
#=========================================================================
# Author : Peitian Pan
# Date : Jan 27, 2020
from pymtl3 import Placeholder
from pymtl3.passes.BasePass import BasePass, PassMetadata
from pymtl3.passes.errors import PlaceholderConfigError
class PlaceholderPass( BasePass ):
def __call__( s, m ):
if not isinstance(m, Placeholder) and hasattr(m, 'config_placeholder'):
raise PlaceholderConfigError(m,
"the given object is not a Placeholder but has `config_placeholder` attribute!")
if isinstance( m, Placeholder ):
s.visit_placeholder( m )
for child in m.get_child_components():
s.__call__( child )
def visit_placeholder( s, m ):
if not hasattr( m, '_placeholder_meta' ):
m._placeholder_meta = PassMetadata()
| 33.185185 | 90 | 0.59375 |
30255b664bd660a30aaebb390ee3e537c6182204 | 17,191 | py | Python | desktop/core/src/desktop/api2_tests.py | sbaudoin/hue | 55c125f389915b23608c825a98ca2e41b702f32a | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/api2_tests.py | sbaudoin/hue | 55c125f389915b23608c825a98ca2e41b702f32a | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/api2_tests.py | sbaudoin/hue | 55c125f389915b23608c825a98ca2e41b702f32a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import json
import re
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal, assert_raises
from useradmin.models import get_default_user_group
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from desktop.models import Document2, User
class TestApi2(object):
def setUp(self):
self.client = make_logged_in_client(username="api2_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="api2_user")
grant_access(self.user.username, self.user.username, "desktop")
def test_search_entities_interactive_xss(self):
query = Document2.objects.create(
name='<script>alert(5)</script>',
description='<script>alert(5)</script>',
type='query-hive',
owner=self.user
)
try:
response = self.client.post('/desktop/api/search/entities_interactive/', data={
'sources': json.dumps(['documents']),
'query_s': json.dumps('alert')
})
results = json.loads(response.content)['results']
assert_true(results)
result_json = json.dumps(results)
assert_false(re.match('<(?!em)', result_json), result_json)
assert_false(re.match('(?!em)>', result_json), result_json)
assert_false('<script>' in result_json, result_json)
assert_false('</script>' in result_json, result_json)
assert_true('<' in result_json, result_json)
assert_true('>' in result_json, result_json)
finally:
query.delete()
class TestDocumentApiSharingPermissions(object):
def setUp(self):
self.client = make_logged_in_client(username="perm_user", groupname="default", recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(username="not_perm_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="perm_user")
self.user_not_me = User.objects.get(username="not_perm_user")
grant_access(self.user.username, self.user.username, "desktop")
grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
def _add_doc(self, name):
return Document2.objects.create(
name=name,
type='query-hive',
owner=self.user
)
def share_doc(self, doc, permissions, client=None):
if client is None:
client = self.client
return client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps(permissions)
})
def share_link_doc(self, doc, perm, is_on=False, client=None):
if client is None:
client = self.client
return client.post("/desktop/api2/doc/share/link", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({'name': 'link_%s' % perm, 'is_link_on': is_on})
})
def test_update_permissions(self):
doc = self._add_doc('test_update_permissions')
response = self.share_doc(
doc,
{
'read': {
'user_ids': [self.user_not_me.id],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
def test_share_document_permissions(self):
# No doc
response = self.client.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
# Add doc
doc = self._add_doc('test_update_permissions')
doc_id = '%s' % doc.id
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
# Share by user
response = self.share_doc(doc, {
'read': {
'user_ids': [
self.user_not_me.id
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
# Un-share
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
# Share by group
default_group = get_default_user_group()
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': [default_group.id]
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
# Un-share
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
# Modify by other user
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [self.user_not_me.id],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
# Un-share
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
# Modify by group
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': [default_group.id]
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
# Un-share
response = self.share_doc(doc, {
'read': {
'user_ids': [
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/')
assert_false(json.loads(response.content)['documents'])
def test_update_permissions_cannot_escalate_privileges(self):
doc = self._add_doc('test_update_permissions_cannot_escape_privileges')
# Share read permissions
response = self.share_doc(doc, {
'read': {
'user_ids': [
self.user_not_me.id
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': []
}
}
)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
# Try, and fail to escalate privileges.
response = self.share_doc(doc, {
'read': {
'user_ids': [
self.user_not_me.id
],
'group_ids': []
},
'write': {
'user_ids': [
self.user_not_me.id,
],
'group_ids': []
}
},
self.client_not_me
)
content = json.loads(response.content)
assert_equal(content['status'], -1)
assert_true("Document does not exist or you don\'t have the permission to access it." in content['message'], content['message'])
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
def test_link_sharing_permissions(self):
# Add doc
doc = self._add_doc('test_link_sharing_permissions')
doc_id = '%s' % doc.id
response = self.client.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_false(json.loads(response.content)['documents'])
response = self.client.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
response = self.client_not_me.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(-1, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
# Share by read link
response = self.share_link_doc(doc, perm='read', is_on=True)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_false(json.loads(response.content)['documents']) # Link sharing does not list docs in Home, only provides direct access
response = self.client.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
response = self.client_not_me.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
# Un-share
response = self.share_link_doc(doc, perm='read', is_on=False)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_false(json.loads(response.content)['documents'])
response = self.client.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
response = self.client_not_me.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(-1, json.loads(response.content)['status'], response.content)
# Share by write link
response = self.share_link_doc(doc, perm='write', is_on=True)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_false(json.loads(response.content)['documents'])
response = self.client.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
response = self.client_not_me.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
# Un-share
response = self.share_link_doc(doc, perm='write', is_on=False)
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
response = self.client.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_true(json.loads(response.content)['documents'])
response = self.client_not_me.get('/desktop/api2/docs/?text=test_link_sharing_permissions')
assert_false(json.loads(response.content)['documents'])
response = self.client.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(0, json.loads(response.content)['status'], response.content)
response = self.client_not_me.get('/desktop/api2/doc/?uuid=%s' % doc_id)
assert_equal(-1, json.loads(response.content)['status'], response.content)
| 32.68251 | 132 | 0.662963 |
4ec8f2bd358a9b667c7613d8933616928ab19ce4 | 1,949 | py | Python | setup.py | alexmojaki/more-itertools | 97f501415c5efd96a0a4af56ffdc0a524cca03b6 | [
"MIT"
] | null | null | null | setup.py | alexmojaki/more-itertools | 97f501415c5efd96a0a4af56ffdc0a524cca03b6 | [
"MIT"
] | null | null | null | setup.py | alexmojaki/more-itertools | 97f501415c5efd96a0a4af56ffdc0a524cca03b6 | [
"MIT"
] | 1 | 2020-05-05T03:17:42.000Z | 2020-05-05T03:17:42.000Z | from re import sub
from setuptools import setup
from more_itertools import __version__
def get_long_description():
# Fix display issues on PyPI caused by RST markup
readme = open('README.rst').read()
version_lines = []
with open('docs/versions.rst') as infile:
next(infile)
for line in infile:
line = line.rstrip().replace('.. automodule:: more_itertools', '')
version_lines.append(line)
version_history = '\n'.join(version_lines)
version_history = sub(r':func:`([a-zA-Z0-9._]+)`', r'\1', version_history)
ret = readme + '\n\n' + version_history
return ret
setup(
name='more-itertools',
version=__version__,
description='More routines for operating on iterables, beyond itertools',
long_description=get_long_description(),
author='Erik Rose',
author_email='erikrose@grinchcentral.com',
license='MIT',
packages=['more_itertools'],
package_data={'more_itertools': ['py.typed', '*.pyi']},
include_package_data=True,
python_requires='>=3.5',
url='https://github.com/more-itertools/more-itertools',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries'],
keywords=['itertools', 'iterator', 'iteration', 'filter', 'peek',
'peekable', 'collate', 'chunk', 'chunked'],
)
| 35.436364 | 78 | 0.63058 |
df17aefc323f7004d7def63ad4ba2ecba2181e42 | 2,794 | py | Python | external/anomaly/anomaly_segmentation/configs/stfpm/configuration.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 44 | 2018-10-27T15:28:19.000Z | 2019-02-26T12:50:39.000Z | external/anomaly/anomaly_segmentation/configs/stfpm/configuration.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 31 | 2018-11-09T20:33:47.000Z | 2019-02-28T09:58:22.000Z | external/anomaly/anomaly_segmentation/configs/stfpm/configuration.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 27 | 2018-11-05T21:59:34.000Z | 2019-02-28T14:28:50.000Z | """
Configurable parameters for STFPM anomaly classification task
"""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from attr import attrs
from ote_anomalib.configs.configuration import BaseAnomalyConfig
from ote_anomalib.configs.configuration_enums import EarlyStoppingMetrics
from ote_sdk.configuration.elements import (
ParameterGroup,
add_parameter_group,
configurable_integer,
selectable,
string_attribute,
)
from ote_sdk.configuration.model_lifecycle import ModelLifecycle
@attrs
class STFPMAnomalySegmentationConfig(BaseAnomalyConfig):
"""
Configurable parameters for STFPM anomaly classification task.
"""
header = string_attribute("Configuration for STFPM")
description = header
@attrs
class ModelParameters(ParameterGroup):
"""
Parameter Group for training model
"""
header = string_attribute("Model Parameters")
description = header
@attrs
class EarlyStoppingParameters(ParameterGroup):
"""
Early stopping parameters
"""
header = string_attribute("Early Stopping Parameters")
description = header
metric = selectable(
default_value=EarlyStoppingMetrics.IMAGE_F1,
header="Early Stopping Metric",
description="The metric used to determine if the model should stop training",
)
patience = configurable_integer(
default_value=10,
min_value=1,
max_value=100,
header="Early Stopping Patience",
description="Number of epochs to wait for an improvement in the monitored metric. If the metric has "
"not improved for this many epochs, the training will stop and the best model will be "
"returned.",
warning="Setting this value too low might lead to underfitting. Setting the value too high will "
"increase the training time and might lead to overfitting.",
affects_outcome_of=ModelLifecycle.TRAINING,
)
early_stopping = add_parameter_group(EarlyStoppingParameters)
model = add_parameter_group(ModelParameters)
| 34.493827 | 117 | 0.681818 |
269d2d0e9bcda1619fdd0fb9004ecff5e1f8a871 | 8,587 | py | Python | chia/wallet/cc_wallet/cc_utils.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | 6 | 2021-06-15T00:43:35.000Z | 2021-11-01T02:32:37.000Z | chia/wallet/cc_wallet/cc_utils.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | 33 | 2021-09-28T10:17:59.000Z | 2022-03-29T10:13:18.000Z | chia/wallet/cc_wallet/cc_utils.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | 1 | 2021-07-01T05:31:02.000Z | 2021-07-01T05:31:02.000Z | import dataclasses
from typing import List, Optional, Tuple
from blspy import AugSchemeMPL, G2Element
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program, INFINITE_COST
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.spend_bundle import CoinSpend, SpendBundle
from chia.util.condition_tools import conditions_dict_for_solution
from chia.util.ints import uint64
from chia.wallet.puzzles.cc_loader import CC_MOD, LOCK_INNER_PUZZLE
from chia.wallet.puzzles.genesis_by_coin_id_with_0 import (
genesis_coin_id_for_genesis_coin_checker,
lineage_proof_for_coin,
lineage_proof_for_genesis,
lineage_proof_for_zero,
)
NULL_SIGNATURE = G2Element()
ANYONE_CAN_SPEND_PUZZLE = Program.to(1) # simply return the conditions
# information needed to spend a cc
# if we ever support more genesis conditions, like a re-issuable coin,
# we may need also to save the `genesis_coin_mod` or its hash
@dataclasses.dataclass
class SpendableCC:
coin: Coin
genesis_coin_id: bytes32
inner_puzzle: Program
lineage_proof: Program
def cc_puzzle_for_inner_puzzle(mod_code, genesis_coin_checker, inner_puzzle) -> Program:
"""
Given an inner puzzle, generate a puzzle program for a specific cc.
"""
return mod_code.curry(mod_code.get_tree_hash(), genesis_coin_checker, inner_puzzle)
# return mod_code.curry([mod_code.get_tree_hash(), genesis_coin_checker, inner_puzzle])
def cc_puzzle_hash_for_inner_puzzle_hash(mod_code, genesis_coin_checker, inner_puzzle_hash) -> bytes32:
"""
Given an inner puzzle hash, calculate a puzzle program hash for a specific cc.
"""
gcc_hash = genesis_coin_checker.get_tree_hash()
return mod_code.curry(mod_code.get_tree_hash(), gcc_hash, inner_puzzle_hash).get_tree_hash(
gcc_hash, inner_puzzle_hash
)
def lineage_proof_for_cc_parent(parent_coin: Coin, parent_inner_puzzle_hash: bytes32) -> Program:
return Program.to(
(
1,
[parent_coin.parent_coin_info, parent_inner_puzzle_hash, parent_coin.amount],
)
)
def subtotals_for_deltas(deltas) -> List[int]:
"""
Given a list of deltas corresponding to input coins, create the "subtotals" list
needed in solutions spending those coins.
"""
subtotals = []
subtotal = 0
for delta in deltas:
subtotals.append(subtotal)
subtotal += delta
# tweak the subtotals so the smallest value is 0
subtotal_offset = min(subtotals)
subtotals = [_ - subtotal_offset for _ in subtotals]
return subtotals
def coin_spend_for_lock_coin(
prev_coin: Coin,
subtotal: int,
coin: Coin,
) -> CoinSpend:
puzzle_reveal = LOCK_INNER_PUZZLE.curry(prev_coin.as_list(), subtotal)
coin = Coin(coin.name(), puzzle_reveal.get_tree_hash(), uint64(0))
coin_spend = CoinSpend(coin, puzzle_reveal, Program.to(0))
return coin_spend
def bundle_for_spendable_cc_list(spendable_cc: SpendableCC) -> Program:
pair = (spendable_cc.coin.as_list(), spendable_cc.lineage_proof)
return Program.to(pair)
def spend_bundle_for_spendable_ccs(
mod_code: Program,
genesis_coin_checker: Program,
spendable_cc_list: List[SpendableCC],
inner_solutions: List[Program],
sigs: Optional[List[G2Element]] = [],
) -> SpendBundle:
"""
Given a list of `SpendableCC` objects and inner solutions for those objects, create a `SpendBundle`
that spends all those coins. Note that it the signature is not calculated it, so the caller is responsible
for fixing it.
"""
N = len(spendable_cc_list)
if len(inner_solutions) != N:
raise ValueError("spendable_cc_list and inner_solutions are different lengths")
input_coins = [_.coin for _ in spendable_cc_list]
# figure out what the output amounts are by running the inner puzzles & solutions
output_amounts = []
for cc_spend_info, inner_solution in zip(spendable_cc_list, inner_solutions):
error, conditions, cost = conditions_dict_for_solution(
cc_spend_info.inner_puzzle, inner_solution, INFINITE_COST
)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
output_amounts.append(total)
coin_spends = []
deltas = [input_coins[_].amount - output_amounts[_] for _ in range(N)]
subtotals = subtotals_for_deltas(deltas)
if sum(deltas) != 0:
raise ValueError("input and output amounts don't match")
bundles = [bundle_for_spendable_cc_list(_) for _ in spendable_cc_list]
for index in range(N):
cc_spend_info = spendable_cc_list[index]
puzzle_reveal = cc_puzzle_for_inner_puzzle(mod_code, genesis_coin_checker, cc_spend_info.inner_puzzle)
prev_index = (index - 1) % N
next_index = (index + 1) % N
prev_bundle = bundles[prev_index]
my_bundle = bundles[index]
next_bundle = bundles[next_index]
solution = [
inner_solutions[index],
prev_bundle,
my_bundle,
next_bundle,
subtotals[index],
]
coin_spend = CoinSpend(input_coins[index], puzzle_reveal, Program.to(solution))
coin_spends.append(coin_spend)
if sigs is None or sigs == []:
return SpendBundle(coin_spends, NULL_SIGNATURE)
else:
return SpendBundle(coin_spends, AugSchemeMPL.aggregate(sigs))
def is_cc_mod(inner_f: Program):
"""
You may want to generalize this if different `CC_MOD` templates are supported.
"""
return inner_f == CC_MOD
def check_is_cc_puzzle(puzzle: Program):
r = puzzle.uncurry()
if r is None:
return False
inner_f, args = r
return is_cc_mod(inner_f)
def uncurry_cc(puzzle: Program) -> Optional[Tuple[Program, Program, Program]]:
"""
Take a puzzle and return `None` if it's not a `CC_MOD` cc, or
a triple of `mod_hash, genesis_coin_checker, inner_puzzle` if it is.
"""
r = puzzle.uncurry()
if r is None:
return r
inner_f, args = r
if not is_cc_mod(inner_f):
return None
mod_hash, genesis_coin_checker, inner_puzzle = list(args.as_iter())
return mod_hash, genesis_coin_checker, inner_puzzle
def get_lineage_proof_from_coin_and_puz(parent_coin, parent_puzzle):
r = uncurry_cc(parent_puzzle)
if r:
mod_hash, genesis_checker, inner_puzzle = r
lineage_proof = lineage_proof_for_cc_parent(parent_coin, inner_puzzle.get_tree_hash())
else:
if parent_coin.amount == 0:
lineage_proof = lineage_proof_for_zero(parent_coin)
else:
lineage_proof = lineage_proof_for_genesis(parent_coin)
return lineage_proof
def spendable_cc_list_from_coin_spend(coin_spend: CoinSpend, hash_to_puzzle_f) -> List[SpendableCC]:
"""
Given a `CoinSpend`, extract out a list of `SpendableCC` objects.
Since `SpendableCC` needs to track the inner puzzles and a `Coin` only includes
puzzle hash, we also need a `hash_to_puzzle_f` function that turns puzzle hashes into
the corresponding puzzles. This is generally either a `dict` or some kind of DB
(if it's large or persistent).
"""
spendable_cc_list = []
coin = coin_spend.coin
puzzle = Program.from_bytes(bytes(coin_spend.puzzle_reveal))
r = uncurry_cc(puzzle)
if r:
mod_hash, genesis_coin_checker, inner_puzzle = r
lineage_proof = lineage_proof_for_cc_parent(coin, inner_puzzle.get_tree_hash())
else:
lineage_proof = lineage_proof_for_coin(coin)
for new_coin in coin_spend.additions():
puzzle = hash_to_puzzle_f(new_coin.puzzle_hash)
if puzzle is None:
# we don't recognize this puzzle hash, skip it
continue
r = uncurry_cc(puzzle)
if r is None:
# this isn't a cc puzzle
continue
mod_hash, genesis_coin_checker, inner_puzzle = r
genesis_coin_id = genesis_coin_id_for_genesis_coin_checker(genesis_coin_checker)
# TODO: address hint error and remove ignore
# error: Argument 2 to "SpendableCC" has incompatible type "Optional[bytes32]"; expected "bytes32"
# [arg-type]
cc_spend_info = SpendableCC(new_coin, genesis_coin_id, inner_puzzle, lineage_proof) # type: ignore[arg-type]
spendable_cc_list.append(cc_spend_info)
return spendable_cc_list
| 33.542969 | 117 | 0.707348 |
516ef50a25bae41e00d159f0b6d672a5b30d5ee9 | 9,982 | py | Python | src/cool_compiler/parser/cool_parser.py | matcom-school/cool-compiler-2021 | 0a982f1708ed948a45610035a597d6ff12bab22b | [
"MIT"
] | null | null | null | src/cool_compiler/parser/cool_parser.py | matcom-school/cool-compiler-2021 | 0a982f1708ed948a45610035a597d6ff12bab22b | [
"MIT"
] | null | null | null | src/cool_compiler/parser/cool_parser.py | matcom-school/cool-compiler-2021 | 0a982f1708ed948a45610035a597d6ff12bab22b | [
"MIT"
] | null | null | null | from sly import Parser
from .__dependency import CoolTokens
from .factory_decored import NodesName
class CoolParser(Parser):
tokens = CoolTokens.tokens
start = 'program'
precedence = (
('right', 'ARROW'),
('left','NOT'),
('nonassoc', '=','<','LESS_OR'),
('left', '+', '-'),
('left', '*', '/'),
('left', "ISVOID"),
('left', '~'),
('left', '@'),
('right', 'IN'),
('left', '.'),
)
def __init__(self, factory, errors):
self.factory = factory
self.cool_error = errors
self.lte = None
def error(self, token):
tok = next(self.tokens, None)
if self.lte is None or not self.lte == token :
if token is None:
try:
tok = self.symstack[-1]
self.cool_error(tok.lineno, tok.index)
except AttributeError:
self.cool_error.pos = (0, 0)
self.cool_error.add_syntactic(f"ERROR at or near EOF")
return
else:
char = token.value
self.cool_error(token.lineno, token.index)
self.cool_error.add_syntactic(f"ERROR at or near {char}")
self.lte = tok
return tok
@_("")
def epsilon(self, prod):
pass
@_('class_list')
def program(self, prod):
return self.factory( NodesName.Program, prod.class_list )
@_("cclass epsilon")
def class_list(self, prod):
return [prod.cclass]
@_('cclass class_list')
def class_list(self, prod):
return [prod.cclass] + prod.class_list
@_('CLASS TYPE "{" class_feature "}" ";" ')
def cclass(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Class, prod.TYPE, None, prod.class_feature )
@_('CLASS TYPE INHERITS TYPE "{" class_feature "}" ";"')
def cclass(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Class, prod.TYPE0, prod.TYPE1, prod.class_feature )
@_('def_atr ";" class_feature')
def class_feature(self, prod):
return [prod.def_atr] + prod.class_feature
@_('def_func ";" class_feature')
def class_feature(self, prod):
return [prod.def_func] + prod.class_feature
@_('epsilon')
def class_feature(self, prod):
return []
@_('ID ":" TYPE')
def def_atr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.DefAtr, prod.ID, prod.TYPE, None )
@_('ID ":" TYPE ARROW expr')
def def_atr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.DefAtr, prod.ID, prod.TYPE, prod.expr )
@_('ID "(" param_list ")" ":" TYPE "{" expr "}"')
def def_func(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.DefFunc, prod.ID, prod.param_list, prod.TYPE, prod.expr )
@_('ID "(" ")" ":" TYPE "{" expr "}"')
def def_func(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.DefFunc, prod.ID, [], prod.TYPE, prod.expr )
@_('ID ":" TYPE "," param_list')
def param_list(self, prod):
return [( prod.ID, prod.TYPE )] + prod.param_list
@_('ID ":" TYPE')
def param_list(self, prod):
return [( prod.ID, prod.TYPE )]
@_('ID ARROW expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Assing, prod.ID, prod.expr)
@_('expr "@" TYPE "." ID "(" expr_list ")"', 'expr "@" TYPE "." ID "(" ")"' )
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
try:
return self.factory( NodesName.CastingDispatch, prod.expr, prod.TYPE, prod.ID, prod.expr_list)
except AttributeError:
return self.factory( NodesName.CastingDispatch, prod.expr, prod.TYPE, prod.ID, [])
@_('expr "." ID "(" expr_list ")"', 'expr "." ID "(" ")"')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
try:
return self.factory( NodesName.Dispatch, prod.expr, prod.ID, prod.expr_list)
except:
return self.factory( NodesName.Dispatch, prod.expr, prod.ID, [])
@_('ID "(" expr_list ")"', 'ID "(" ")"')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
try:
return self.factory( NodesName.StaticDispatch, prod.ID, prod.expr_list )
except AttributeError:
return self.factory( NodesName.StaticDispatch, prod.ID, [] )
@_('expr "," expr_list')
def expr_list(self, prod):
return [prod.expr] + prod.expr_list
@_('expr')
def expr_list(self, prod):
return [prod.expr]
@_('IF expr THEN expr ELSE expr FI')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.IfThenElse, prod.expr0, prod.expr1, prod.expr2)
@_('WHILE expr LOOP expr POOL')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.While, prod.expr0, prod.expr1)
@_('"{" block_list "}"')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Block, prod.block_list)
@_('expr ";" block_list')
def block_list(self, prod):
return [prod.expr] + prod.block_list
@_('expr ";" epsilon')
def block_list(self, prod):
return [prod.expr]
@_('LET let_list IN expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.LetIn, prod.let_list, prod.expr)
@_('let_assign "," let_list')
def let_list(self, prod):
return [prod.let_assign] + prod.let_list
@_('let_assign epsilon')
def let_list(self, prod):
return [prod.let_assign]
@_('ID ":" TYPE ARROW expr')
def let_assign(self, prod):
return (prod.ID, prod.TYPE, prod.expr)
@_('ID ":" TYPE')
def let_assign(self, prod):
return (prod.ID, prod.TYPE, None)
@_('CASE expr OF case_list ESAC')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Case, prod.case_list, prod.expr)
@_('ID ":" TYPE LOGICAR expr ";" case_list')
def case_list(self, prod):
return [( prod.ID, prod.TYPE, prod.expr )] + prod.case_list
@_('ID ":" TYPE LOGICAR expr ";"')
def case_list(self, prod):
return [( prod.ID, prod.TYPE, prod.expr )]
@_('NEW TYPE')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.New, prod.TYPE )
@_('ISVOID expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.IsVoid, prod.expr )
@_('expr "+" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Sum, prod.expr0, prod.expr1 )
@_('expr "-" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Rest, prod.expr0, prod.expr1 )
@_('expr "*" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Mult, prod.expr0, prod.expr1 )
@_('expr "/" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Div, prod.expr0, prod.expr1 )
@_('"~" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Complement, prod.expr )
@_('expr "<" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Less, prod.expr0, prod.expr1 )
@_('expr LESS_OR expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.LessOrEquals, prod.expr0, prod.expr1 )
@_('expr "=" expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Equals, prod.expr0, prod.expr1 )
@_('NOT expr')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Neg, prod.expr )
@_('"(" expr ")"')
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return prod.expr
@_("ID")
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.IdExpr, prod.ID)
@_("NUMBER")
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Int, prod.NUMBER)
@_("STRING")
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Str, prod.STRING)
@_("TRUE")
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Bool, prod.TRUE)
@_("FALSE")
def expr(self, prod):
self.factory.get_pos_to_errors(prod.lineno, prod.index)
return self.factory( NodesName.Bool, prod.FALSE) | 34.780488 | 106 | 0.599479 |
f7ae0c26027712ff2b57fc030224fdd6939d3aad | 1,911 | py | Python | simplemonitor/Alerters/pushbullet.py | cgroschupp/simplemonitor | 0d4cb4823193bdd93c3f9176eea3bfab07007be1 | [
"BSD-3-Clause"
] | 373 | 2015-12-21T02:39:21.000Z | 2022-03-08T10:49:43.000Z | simplemonitor/Alerters/pushbullet.py | cgroschupp/simplemonitor | 0d4cb4823193bdd93c3f9176eea3bfab07007be1 | [
"BSD-3-Clause"
] | 910 | 2015-10-13T08:16:38.000Z | 2022-03-29T12:16:52.000Z | simplemonitor/Alerters/pushbullet.py | cgroschupp/simplemonitor | 0d4cb4823193bdd93c3f9176eea3bfab07007be1 | [
"BSD-3-Clause"
] | 196 | 2015-03-24T19:15:42.000Z | 2022-02-06T22:39:55.000Z | """
SimpleMonitor alerts via pushbullet
"""
from typing import cast
import requests
import requests.auth
from ..Monitors.monitor import Monitor
from .alerter import Alerter, AlertLength, AlertType, register
@register
class PushbulletAlerter(Alerter):
"""Send push notification via Pushbullet."""
alerter_type = "pushbullet"
def __init__(self, config_options: dict) -> None:
super().__init__(config_options)
self.pushbullet_token = cast(
str, self.get_config_option("token", required=True, allow_empty=False)
)
self.support_catchup = True
def send_pushbullet_notification(self, subject: str, body: str) -> None:
"""Send a push notification."""
_payload = {"type": "note", "title": subject, "body": body}
_auth = requests.auth.HTTPBasicAuth(self.pushbullet_token, "")
response = requests.post(
"https://api.pushbullet.com/v2/pushes", data=_payload, auth=_auth
)
if not response.status_code == requests.codes.ok:
raise RuntimeError("Unable to send Pushbullet notification")
def send_alert(self, name: str, monitor: Monitor) -> None:
"""Build up the content for the push notification."""
alert_type = self.should_alert(monitor)
if alert_type == AlertType.NONE:
return
subject = self.build_message(AlertLength.NOTIFICATION, alert_type, monitor)
body = self.build_message(AlertLength.FULL, alert_type, monitor)
if not self._dry_run:
try:
self.send_pushbullet_notification(subject, body)
except Exception:
self.alerter_logger.exception("Couldn't send push notification")
else:
self.alerter_logger.info("dry_run: would send push notification: %s", body)
def _describe_action(self) -> str:
return "posting to pushbullet"
| 32.389831 | 87 | 0.658817 |
2d0a18e0eaa274379d7aefa3648d3db294c3bd9f | 6,292 | py | Python | tests/test_bootstraphistogram.py | davehadley/bootstraphistogram | 4f4c4e73c0209a3508a56abfe4acb4cb0e85b1bd | [
"MIT"
] | 1 | 2020-06-26T14:29:29.000Z | 2020-06-26T14:29:29.000Z | tests/test_bootstraphistogram.py | davehadley/bootstraphistogram | 4f4c4e73c0209a3508a56abfe4acb4cb0e85b1bd | [
"MIT"
] | 4 | 2021-11-09T17:11:38.000Z | 2022-02-07T11:22:05.000Z | tests/test_bootstraphistogram.py | davehadley/bootstraphistogram | 4f4c4e73c0209a3508a56abfe4acb4cb0e85b1bd | [
"MIT"
] | null | null | null | import pickle
import unittest
from typing import Optional
import boost_histogram as bh
import numpy as np
from bootstraphistogram import BootstrapHistogram
def _standard_error_mean(size, sigma=1.0):
return sigma / np.sqrt(size)
def _standard_error_std(size, sigma=1.0):
return np.sqrt(sigma ** 2 / (2.0 * size))
class TestBootstrapHistogram1D(unittest.TestCase):
def assertArrayEqual(
self, actual: np.ndarray, expected: np.ndarray, msg: Optional[str] = None
) -> None:
return self.assertTrue(np.array_equal(actual, expected), msg=msg)
def assertArrayAlmostEqual(
self,
actual: np.ndarray,
expected: np.ndarray,
delta: float,
msg: Optional[str] = None,
) -> None:
return self.assertTrue(np.all(np.abs(actual - expected) < delta), msg=msg)
def test_contructor(self):
# check constructor works without raising error
BootstrapHistogram(bh.axis.Regular(100, -1.0, 1.0), rng=1234)
return
def test_fill(self):
hist = BootstrapHistogram(
bh.axis.Regular(100, -5.0, 5.0), numsamples=10, rng=1234
)
size = 100000
data = np.random.normal(loc=0.0, scale=1.0, size=size)
hist.fill(data)
x = hist.axes[0].centers
y = hist.view()[:, np.random.randint(0, hist.numsamples)]
mean = np.average(x, weights=y)
std = np.average((x - mean) ** 2, weights=y)
binwidth = hist.axes[0].edges[1] - hist.axes[0].edges[0]
self.assertAlmostEqual(
mean, 0.0, delta=5.0 * _standard_error_mean(size=size) + binwidth
)
self.assertAlmostEqual(
std, 1.0, delta=5.0 * _standard_error_std(size=size) + binwidth
)
return
def test_samples(self):
numsamples = 100
hist = BootstrapHistogram(
bh.axis.Regular(100, 0.0, 1.0), numsamples=numsamples, rng=1234
)
size = 100000
data = np.random.uniform(size=size)
hist.fill(data)
y = hist.view()
mean = np.average(y, axis=1)
std = np.std(y, axis=1)
nbins = len(hist.axes[0])
self.assertArrayAlmostEqual(
mean, size / nbins, delta=5.0 * np.sqrt(size / nbins)
)
self.assertArrayAlmostEqual(
std,
np.sqrt(size / nbins),
delta=5.0
* _standard_error_std(size=numsamples, sigma=np.sqrt(size / nbins)),
)
return
def test_numsamples_property(self):
numsamples = 100
hist = BootstrapHistogram(
bh.axis.Regular(100, -5.0, 5.0), numsamples=numsamples, rng=1234
)
self.assertEqual(hist.numsamples, numsamples)
def test_axes_property(self):
axes = (bh.axis.Regular(100, -5.0, 5.0),)
hist = BootstrapHistogram(*axes, rng=1234)
self.assertEqual(hist.axes[:-1], axes)
def test_view_property(self):
numsamples = 10
nbins = 5
hist = BootstrapHistogram(
bh.axis.Regular(nbins, -5.0, 5.0), numsamples=numsamples, rng=1234
)
view = hist.view()
self.assertArrayEqual(view, np.zeros(shape=(nbins, numsamples)))
def test_equality(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=123)
hist2 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=123)
data = np.random.normal(size=1000)
hist1.fill(data)
hist2.fill(data)
self.assertEqual(hist1, hist2)
def test_inequality(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0))
hist2 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0))
data = np.random.normal(size=1000)
hist1.fill(data)
hist2.fill(data)
self.assertNotEqual(hist1, hist2)
def test_add(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist2 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
hist2.fill(np.random.normal(size=1000))
a1 = hist1.view()
a2 = hist2.view()
hist3 = hist1 + hist2
self.assertArrayEqual(hist3.view(), a1 + a2)
def test_multiply_by_scalar(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
scale = 2.0
a1 = hist1.view() * scale
hist3 = hist1 * scale
self.assertArrayEqual(hist3.view(), a1)
def test_divide_by_scalar(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
scale = 2.0
a1 = hist1.view() / scale
hist3 = hist1 / scale
self.assertArrayEqual(hist3.view(), a1)
def test_pickle(self):
hist1 = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
hist1.fill(np.random.normal(size=1000))
hist2 = pickle.loads(pickle.dumps(hist1))
self.assertEqual(hist1, hist2)
def test_nominal(self):
hist = BootstrapHistogram(bh.axis.Regular(100, -5.0, 5.0), rng=1234)
data = np.random.normal(size=1000)
hist.fill(data)
arr, _ = np.histogram(data, bins=hist.axes[0].edges)
self.assertArrayEqual(hist.nominal.view(), arr)
def test_mean(self):
size = 100000
hist = BootstrapHistogram(
bh.axis.Regular(100, 0.0, 1.0), numsamples=100, rng=1234
)
data = np.random.uniform(size=size)
hist.fill(data)
nbins = len(hist.axes[0])
self.assertArrayAlmostEqual(
hist.mean(), size / nbins, delta=5.0 * np.sqrt(size / nbins)
)
return
def test_std(self):
numsamples = 100
hist = BootstrapHistogram(
bh.axis.Regular(100, 0.0, 1.0), numsamples=numsamples, rng=1234
)
size = 100000
data = np.random.uniform(size=size)
hist.fill(data)
nbins = len(hist.axes[0])
self.assertArrayAlmostEqual(
hist.std(),
np.sqrt(size / nbins),
delta=5.0
* _standard_error_std(size=numsamples, sigma=np.sqrt(size / nbins)),
)
return
| 33.647059 | 82 | 0.594882 |
fdc3a9bd3808cc9d6c833eef78d6caf5e7a7ab93 | 1,511 | py | Python | episode-6/django/src/projwatson/watsonlanguage/watsonutils/baseservice.py | chughts/python-primer-companion-code | 3a147616183932d52714373b68054c212a040dc9 | [
"Apache-2.0"
] | 18 | 2016-03-30T14:55:28.000Z | 2019-01-01T12:41:27.000Z | episode-6/django/src/projwatson/watsonlanguage/watsonutils/baseservice.py | chughts/python-primer-companion-code | 3a147616183932d52714373b68054c212a040dc9 | [
"Apache-2.0"
] | 5 | 2016-02-22T20:12:33.000Z | 2018-11-19T15:33:46.000Z | episode-6/django/src/projwatson/watsonlanguage/watsonutils/baseservice.py | chughts/python-primer-companion-code | 3a147616183932d52714373b68054c212a040dc9 | [
"Apache-2.0"
] | 21 | 2016-02-22T19:22:59.000Z | 2020-12-02T14:46:36.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
from .creds import CredentialStore
from .vcap import get_vcap_settings
import logging
logger = logging.getLogger(__name__)
class BaseService(object):
def __init__(self, serviceName):
super(BaseService, self).__init__()
self.username = "<username>"
self.password = "<password>"
self.apikey = "<apikey>"
creds = get_vcap_settings(serviceName)
if not creds:
credStore = CredentialStore()
creds = credStore.getCreds(serviceName)
if creds:
self.username = str(creds.get('username', None))
self.password = str(creds.get('password', None))
self.apikey = str(creds.get('apikey', None))
else:
logger.warn("No credentials found for service %s" % serviceName)
def getUser(self):
return self.username
def getPassword(self):
return self.password
def getAPIKey(self):
return self.apikey
| 29.627451 | 74 | 0.704169 |
a6fc091d614a11e2d28bf5fef379bc72f02eeb6a | 6,862 | py | Python | original/chartbot_original.py | JCLemme/chartbot | cdb79d294a1e79469efea6248c62c8e9d2de1442 | [
"MIT"
] | null | null | null | original/chartbot_original.py | JCLemme/chartbot | cdb79d294a1e79469efea6248c62c8e9d2de1442 | [
"MIT"
] | 1 | 2019-03-07T07:47:43.000Z | 2019-03-07T07:47:43.000Z | original/chartbot_original.py | JCLemme/chartbot | cdb79d294a1e79469efea6248c62c8e9d2de1442 | [
"MIT"
] | null | null | null | import sys
import datetime
import config
import billboard
import inflect
import spotipy
import spotipy.oauth2 as oauth2
import spotipy.util as util
from fuzzywuzzy import fuzz
def compareData(str1, str2):
if(len(str1) > len(str2)):
if str2 not in str1:
return 0
else:
return 1
else:
if str1 not in str2:
return 0
else:
return 1
def findSong(spotify, trackInfo, guess):
trackResult = spotify.search(q=trackInfo[2], limit=10, type='track')
for i in range(0, len(trackResult['tracks']['items'])):
songFound = trackResult['tracks']['items'][i]['name']
artistFound = trackResult['tracks']['items'][i]['artists'][0]['name']
print(" found \"%s\" by %s" %(songFound, artistFound))
if(guess):
songScore = fuzz.ratio(trackInfo[0].upper(), songFound.upper())
artistScore = fuzz.ratio(trackInfo[1].upper(), artistFound.upper())
if(songScore > 60 and artistScore > 60):
print(" Using above (fuzzy score %d, %d)" %(songScore, artistScore))
return trackResult['tracks']['items'][i]['id']
else:
if(compareData(trackInfo[0].upper(), songFound.upper())
and compareData(trackInfo[1].upper(), artistFound.upper())):
print(" Using above")
return trackResult['tracks']['items'][i]['id']
return 0
# All the charts we're gonna support
chartsData = []
chartsData.append(['Hot 100', 'hot-100', 'Hot 100'])
chartsData.append(['Billboard 200', 'billboard-200', 'Bill 200'])
chartsData.append(['Mainstream Top 40', 'pop-songs', 'Pop'])
chartsData.append(['Adult Contemporary', 'adult-contemporary', 'Adult Cntmp'])
chartsData.append(['Adult Top 40', 'adult-pop-songs', 'Adult Pop'])
chartsData.append(['Hot Country Songs', 'country-songs', 'Country'])
chartsData.append(['Hot Rock Songs', 'rock-songs', 'Rock'])
chartsData.append(['Alternative Songs', 'alternative-songs', 'Alt'])
chartsData.append(['Triple-A', 'triple-a', 'Tri-A'])
chartsData.append(['Mainstream Rock', 'hot-mainstream-rock-tracks', 'Main Rock'])
chartsData.append(['Hot R&B/Hip-Hop Songs', 'r-b-hip-hop-songs', 'Hip-Hop'])
chartsData.append(['Hot R&B Songs', 'r-and-b-songs', 'R&B'])
chartsData.append(['Hot Rap Songs', 'rap-song', 'Rap'])
chartsData.append(['Adult R&B', 'hot-adult-r-and-b-airplay', 'Adult R&B'])
chartsData.append(['Hot Dance/Electronic Songs', 'dance-electronic-songs', 'EDM'])
print(" _____ _ _ ____ _ ")
print(" / ____|| | | | | _ \ | | ")
print(" | | | |__ __ _ _ __ | |_ | |_) | ___ | |_ ")
print(" | | | '_ \ / _` || '__|| __|| _ < / _ \ | __|")
print(" | |____ | | | || (_| || | | |_ | |_) || (_) || |_ ")
print(" \_____||_| |_| \__,_||_| \__||____/ \___/ \__|")
print(" ")
srcUser = raw_input("Enter your Spotify username: ")
print("Now logging you in. Follow the instructions below...")
spotToken = util.prompt_for_user_token(srcUser, 'playlist-modify-private playlist-read-private',
client_id=config.client_id,
client_secret=config.client_secret,
redirect_uri='https://example.com/callback/')
print("")
print("Available charts:")
for l in range(0, len(chartsData)):
print(str(l) + ": " + chartsData[l][0])
srcChart = ""
selection = 0;
while(srcChart == ""):
inputSelection = raw_input("Selection [Hot 100]: ")
if(inputSelection == ""):
srcChart = chartsData[0][1]
else:
selection = int(inputSelection)
if(selection >= 0 and selection < len(chartsData)):
srcChart = chartsData[selection][1]
print("")
srcDate = datetime.datetime.now().strftime("%Y-%m-%d")
inputDate = raw_input("Enter the latest week you want chart info for [This week]: ")
if(inputDate != ""): srcDate = inputDate
srcRange = 1
inputRange = input("Enter the previous weeks you want to include [1]: ")
if(inputRange != ""): srcRange = inputRange
srcPlaylist = "ChartBot: " + chartsData[selection][2] + " for " + srcDate
inputPlaylist = raw_input("Enter the name for the new playlist [" + srcPlaylist + "]: ")
if(inputPlaylist != ""): srcPlaylist = inputPlaylist
# Get master chart data
print("")
print("Fetching chart data...")
chart = billboard.ChartData(srcChart, srcDate)
tracksToFind = []
for c in chart:
tracksToFind.append(c)
for w in range(0, srcRange-1):
chart = billboard.ChartData(srcChart, chart.previousDate)
for c in chart:
# O god forgive me
found = 0
for ck in tracksToFind:
if(c.title == ck.title and c.artist == ck.artist):
found = 1
if(found == 0):
tracksToFind.append(c)
found = 0
print('Tracks to be added:')
for t in tracksToFind:
print(" "),
print(t)
print('Total: %d tracks' % len(tracksToFind))
# Login to Spotify
print(' ')
spotify = spotipy.Spotify(auth=spotToken)
tracksToAdd = []
tracksRejected = []
# Loop through each track and get the Spotify ID
for chartTrack in tracksToFind:
songSearch = chartTrack.title
songSearch = songSearch.replace('&', 'and')
artistSearch = chartTrack.artist
print('looking for track \"%s\" by %s' % (songSearch, artistSearch))
searchCombos = []
searchCombos.append([songSearch, artistSearch, songSearch + ' ' + artistSearch.split(' ', 1)[0]])
if(len(artistSearch.split(' ', 1)) > 1):
searchCombos.append([songSearch, artistSearch, songSearch + ' ' + artistSearch.split(' ', 1)[0] + ' ' + artistSearch.split(' ', 1)[1]])
found = False
for combo in searchCombos:
foundID = findSong(spotify, combo, False)
if(foundID != 0):
tracksToAdd.append(foundID)
found = True
break;
if(found == False):
for combo in searchCombos:
foundID = findSong(spotify, combo, True)
if(foundID != 0):
tracksToAdd.append(foundID)
found = True
break;
if(found == False):
tracksRejected.append(chartTrack)
print(" ")
print('Found ' + str(len(tracksToAdd)) + ' of ' + str(len(tracksToFind)) + ' tracks.')
print('Not found:')
for t in tracksRejected:
print(t)
print(' ')
# Add tracks to the playlist
newPlaylist = spotify.user_playlist_create(srcUser, name=srcPlaylist, public=False)
spotify.user_playlist_add_tracks(srcUser, newPlaylist['id'], tracksToAdd)
print('Made new playlist \"%s\"' % srcPlaylist)
| 31.62212 | 143 | 0.585835 |
7472ed2fb8a864d30c104797835724d0383c10da | 17,359 | py | Python | dvc/remote/gdrive.py | dchichkov/dvc | 907853b98598094caef4d0c45c4f0f54573af6e4 | [
"Apache-2.0"
] | null | null | null | dvc/remote/gdrive.py | dchichkov/dvc | 907853b98598094caef4d0c45c4f0f54573af6e4 | [
"Apache-2.0"
] | null | null | null | dvc/remote/gdrive.py | dchichkov/dvc | 907853b98598094caef4d0c45c4f0f54573af6e4 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
import os
import posixpath
import logging
import re
import threading
from urllib.parse import urlparse
from funcy import retry, wrap_with, wrap_prop, cached_property
from funcy.py3 import cat
from dvc.progress import Tqdm
from dvc.scheme import Schemes
from dvc.path_info import CloudURLInfo
from dvc.remote.base import BaseRemote
from dvc.exceptions import DvcException
from dvc.utils import tmp_fname, format_link
logger = logging.getLogger(__name__)
FOLDER_MIME_TYPE = "application/vnd.google-apps.folder"
class GDrivePathNotFound(DvcException):
def __init__(self, path_info):
super().__init__("Google Drive path '{}' not found.".format(path_info))
class GDriveAccessTokenRefreshError(DvcException):
def __init__(self):
super().__init__("Google Drive access token refreshment is failed.")
class GDriveMissedCredentialKeyError(DvcException):
def __init__(self, path):
super().__init__(
"Google Drive user credentials file '{}' "
"misses value for key.".format(path)
)
def _extract(exc, field):
from pydrive2.files import ApiRequestError
assert isinstance(exc, ApiRequestError)
# https://cloud.google.com/storage/docs/json_api/v1/status-codes#errorformat
return (
exc.error["errors"][0].get(field, "") if "errors" in exc.error else ""
)
def _gdrive_retry(func):
def should_retry(exc):
from pydrive2.files import ApiRequestError
if not isinstance(exc, ApiRequestError):
return False
error_code = exc.error.get("code", 0)
result = False
if 500 <= error_code < 600:
result = True
if error_code == 403:
result = _extract(exc, "reason") in [
"userRateLimitExceeded",
"rateLimitExceeded",
]
if result:
logger.debug("Retrying GDrive API call, error: {}.".format(exc))
return result
# 16 tries, start at 0.5s, multiply by golden ratio, cap at 20s
return retry(
16,
timeout=lambda a: min(0.5 * 1.618 ** a, 20),
filter_errors=should_retry,
)(func)
class GDriveURLInfo(CloudURLInfo):
def __init__(self, url):
super().__init__(url)
# GDrive URL host part is case sensitive,
# we are restoring it here.
p = urlparse(url)
self.host = p.netloc
assert self.netloc == self.host
# Normalize path. Important since we have a cache (path to ID)
# and don't want to deal with different variations of path in it.
self._spath = re.sub("/{2,}", "/", self._spath.rstrip("/"))
class GDriveRemote(BaseRemote):
scheme = Schemes.GDRIVE
path_cls = GDriveURLInfo
REQUIRES = {"pydrive2": "pydrive2"}
DEFAULT_VERIFY = True
# Always prefer traverse for GDrive since API usage quotas are a concern.
TRAVERSE_WEIGHT_MULTIPLIER = 1
TRAVERSE_PREFIX_LEN = 2
GDRIVE_CREDENTIALS_DATA = "GDRIVE_CREDENTIALS_DATA"
DEFAULT_USER_CREDENTIALS_FILE = "gdrive-user-credentials.json"
DEFAULT_GDRIVE_CLIENT_ID = "710796635688-iivsgbgsb6uv1fap6635dhvuei09o66c.apps.googleusercontent.com" # noqa: E501
DEFAULT_GDRIVE_CLIENT_SECRET = "a1Fz59uTpVNeG_VGuSKDLJXv"
def __init__(self, repo, config):
super().__init__(repo, config)
self.path_info = self.path_cls(config["url"])
if not self.path_info.bucket:
raise DvcException(
"Empty Google Drive URL '{}'. Learn more at "
"{}.".format(
config["url"],
format_link("https://man.dvc.org/remote/add"),
)
)
self._bucket = self.path_info.bucket
self._trash_only = config.get("gdrive_trash_only")
self._use_service_account = config.get("gdrive_use_service_account")
self._service_account_email = config.get(
"gdrive_service_account_email"
)
self._service_account_user_email = config.get(
"gdrive_service_account_user_email"
)
self._service_account_p12_file_path = config.get(
"gdrive_service_account_p12_file_path"
)
self._client_id = config.get("gdrive_client_id")
self._client_secret = config.get("gdrive_client_secret")
self._validate_config()
self._gdrive_user_credentials_path = (
tmp_fname(os.path.join(self.repo.tmp_dir, ""))
if os.getenv(GDriveRemote.GDRIVE_CREDENTIALS_DATA)
else config.get(
"gdrive_user_credentials_file",
os.path.join(
self.repo.tmp_dir, self.DEFAULT_USER_CREDENTIALS_FILE
),
)
)
def _validate_config(self):
# Validate Service Account configuration
if self._use_service_account and (
not self._service_account_email
or not self._service_account_p12_file_path
):
raise DvcException(
"To use service account please specify {}, {} and "
"{} in DVC config. Learn more at "
"{}.".format(
"gdrive_service_account_email",
"gdrive_service_account_p12_file_path",
"gdrive_service_account_user_email (optional)",
format_link("https://man.dvc.org/remote/modify"),
)
)
# Validate OAuth 2.0 Client ID configuration
if not self._use_service_account:
if bool(self._client_id) != bool(self._client_secret):
raise DvcException(
"Please specify Google Drive's client id and secret in "
"DVC config or omit both to use defaults. Learn more at "
"{}.".format(
format_link("https://man.dvc.org/remote/modify")
)
)
@wrap_prop(threading.RLock())
@cached_property
def _drive(self):
from pydrive2.auth import RefreshError
from pydrive2.auth import GoogleAuth
from pydrive2.drive import GoogleDrive
if os.getenv(GDriveRemote.GDRIVE_CREDENTIALS_DATA):
with open(
self._gdrive_user_credentials_path, "w"
) as credentials_file:
credentials_file.write(
os.getenv(GDriveRemote.GDRIVE_CREDENTIALS_DATA)
)
GoogleAuth.DEFAULT_SETTINGS["client_config_backend"] = "settings"
if self._use_service_account:
GoogleAuth.DEFAULT_SETTINGS["service_config"] = {
"client_service_email": self._service_account_email,
"client_user_email": self._service_account_user_email,
"client_pkcs12_file_path": self._service_account_p12_file_path,
}
else:
GoogleAuth.DEFAULT_SETTINGS["client_config"] = {
"client_id": self._client_id or self.DEFAULT_GDRIVE_CLIENT_ID,
"client_secret": self._client_secret
or self.DEFAULT_GDRIVE_CLIENT_SECRET,
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"revoke_uri": "https://oauth2.googleapis.com/revoke",
"redirect_uri": "",
}
GoogleAuth.DEFAULT_SETTINGS["save_credentials"] = True
GoogleAuth.DEFAULT_SETTINGS["save_credentials_backend"] = "file"
GoogleAuth.DEFAULT_SETTINGS[
"save_credentials_file"
] = self._gdrive_user_credentials_path
GoogleAuth.DEFAULT_SETTINGS["get_refresh_token"] = True
GoogleAuth.DEFAULT_SETTINGS["oauth_scope"] = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.appdata",
]
# Pass non existent settings path to force DEFAULT_SETTINGS loading
gauth = GoogleAuth(settings_file="")
try:
if self._use_service_account:
gauth.ServiceAuth()
else:
gauth.CommandLineAuth()
except RefreshError as exc:
raise GDriveAccessTokenRefreshError from exc
except KeyError as exc:
raise GDriveMissedCredentialKeyError(
self._gdrive_user_credentials_path
) from exc
# Handle pydrive2.auth.AuthenticationError and other auth failures
except Exception as exc:
raise DvcException("Google Drive authentication failed") from exc
finally:
if os.getenv(GDriveRemote.GDRIVE_CREDENTIALS_DATA):
os.remove(self._gdrive_user_credentials_path)
return GoogleDrive(gauth)
@wrap_prop(threading.RLock())
@cached_property
def _ids_cache(self):
cache = {
"dirs": defaultdict(list),
"ids": {},
"root_id": self._get_item_id(self.path_info, use_cache=False),
}
self._cache_path_id(self.path_info.path, cache["root_id"], cache)
for item in self._gdrive_list(
"'{}' in parents and trashed=false".format(cache["root_id"])
):
item_path = (self.path_info / item["title"]).path
self._cache_path_id(item_path, item["id"], cache)
return cache
def _cache_path_id(self, path, item_id, cache=None):
cache = cache or self._ids_cache
cache["dirs"][path].append(item_id)
cache["ids"][item_id] = path
@cached_property
def _list_params(self):
params = {"corpora": "default"}
if self._bucket != "root" and self._bucket != "appDataFolder":
drive_id = self._gdrive_shared_drive_id(self._bucket)
if drive_id:
params["driveId"] = drive_id
params["corpora"] = "drive"
return params
@_gdrive_retry
def _gdrive_shared_drive_id(self, item_id):
param = {"id": item_id}
# it does not create a file on the remote
item = self._drive.CreateFile(param)
# ID of the shared drive the item resides in.
# Only populated for items in shared drives.
item.FetchMetadata("driveId")
return item.get("driveId", None)
@_gdrive_retry
def _gdrive_upload_file(
self,
parent_id,
title,
no_progress_bar=False,
from_file="",
progress_name="",
):
item = self._drive.CreateFile(
{"title": title, "parents": [{"id": parent_id}]}
)
with open(from_file, "rb") as fobj:
total = os.path.getsize(from_file)
with Tqdm.wrapattr(
fobj,
"read",
desc=progress_name,
total=total,
disable=no_progress_bar,
) as wrapped:
# PyDrive doesn't like content property setting for empty files
# https://github.com/gsuitedevs/PyDrive/issues/121
if total:
item.content = wrapped
item.Upload()
return item
@_gdrive_retry
def _gdrive_download_file(
self, item_id, to_file, progress_desc, no_progress_bar
):
param = {"id": item_id}
# it does not create a file on the remote
gdrive_file = self._drive.CreateFile(param)
bar_format = (
"Downloading {desc:{ncols_desc}.{ncols_desc}}... "
+ Tqdm.format_sizeof(int(gdrive_file["fileSize"]), "B", 1024)
)
with Tqdm(
bar_format=bar_format, desc=progress_desc, disable=no_progress_bar
):
gdrive_file.GetContentFile(to_file)
@_gdrive_retry
def _gdrive_delete_file(self, item_id):
from pydrive2.files import ApiRequestError
param = {"id": item_id}
# it does not create a file on the remote
item = self._drive.CreateFile(param)
try:
item.Trash() if self._trash_only else item.Delete()
except ApiRequestError as exc:
http_error_code = exc.error.get("code", 0)
if (
http_error_code == 403
and self._list_params["corpora"] == "drive"
and _extract(exc, "location") == "file.permissions"
):
raise DvcException(
"Insufficient permissions to {}. You should have {} "
"access level for the used shared drive. More details "
"at {}.".format(
"move the file into Trash"
if self._trash_only
else "permanently delete the file",
"Manager or Content Manager"
if self._trash_only
else "Manager",
"https://support.google.com/a/answer/7337554",
)
) from exc
raise
def _gdrive_list(self, query):
param = {"q": query, "maxResults": 1000}
param.update(self._list_params)
file_list = self._drive.ListFile(param)
# Isolate and decorate fetching of remote drive items in pages.
get_list = _gdrive_retry(lambda: next(file_list, None))
# Fetch pages until None is received, lazily flatten the thing.
return cat(iter(get_list, None))
@_gdrive_retry
def _gdrive_create_dir(self, parent_id, title):
parent = {"id": parent_id}
item = self._drive.CreateFile(
{"title": title, "parents": [parent], "mimeType": FOLDER_MIME_TYPE}
)
item.Upload()
return item
@wrap_with(threading.RLock())
def _create_dir(self, parent_id, title, remote_path):
cached = self._ids_cache["dirs"].get(remote_path)
if cached:
return cached[0]
item = self._gdrive_create_dir(parent_id, title)
if parent_id == self._ids_cache["root_id"]:
self._cache_path_id(remote_path, item["id"])
return item["id"]
def _get_remote_item_ids(self, parent_ids, title):
if not parent_ids:
return None
query = "trashed=false and ({})".format(
" or ".join(
"'{}' in parents".format(parent_id) for parent_id in parent_ids
)
)
query += " and title='{}'".format(title.replace("'", "\\'"))
# GDrive list API is case insensitive, we need to compare
# all results and pick the ones with the right title
return [
item["id"]
for item in self._gdrive_list(query)
if item["title"] == title
]
def _get_cached_item_ids(self, path, use_cache):
if not path:
return [self._bucket]
if use_cache:
return self._ids_cache["dirs"].get(path, [])
return []
def _path_to_item_ids(self, path, create, use_cache):
item_ids = self._get_cached_item_ids(path, use_cache)
if item_ids:
return item_ids
parent_path, title = posixpath.split(path)
parent_ids = self._path_to_item_ids(parent_path, create, use_cache)
item_ids = self._get_remote_item_ids(parent_ids, title)
if item_ids:
return item_ids
return (
[self._create_dir(min(parent_ids), title, path)] if create else []
)
def _get_item_id(self, path_info, create=False, use_cache=True):
assert path_info.bucket == self._bucket
item_ids = self._path_to_item_ids(path_info.path, create, use_cache)
if item_ids:
return min(item_ids)
raise GDrivePathNotFound(path_info)
def exists(self, path_info):
try:
self._get_item_id(path_info)
except GDrivePathNotFound:
return False
else:
return True
def _upload(self, from_file, to_info, name=None, no_progress_bar=False):
dirname = to_info.parent
assert dirname
parent_id = self._get_item_id(dirname, True)
self._gdrive_upload_file(
parent_id, to_info.name, no_progress_bar, from_file, name
)
def _download(self, from_info, to_file, name=None, no_progress_bar=False):
item_id = self._get_item_id(from_info)
self._gdrive_download_file(item_id, to_file, name, no_progress_bar)
def list_cache_paths(self, prefix=None, progress_callback=None):
if not self._ids_cache["ids"]:
return
if prefix:
dir_ids = self._ids_cache["dirs"].get(prefix[:2])
if not dir_ids:
return
else:
dir_ids = self._ids_cache["ids"]
parents_query = " or ".join(
"'{}' in parents".format(dir_id) for dir_id in dir_ids
)
query = "({}) and trashed=false".format(parents_query)
for item in self._gdrive_list(query):
if progress_callback:
progress_callback()
parent_id = item["parents"][0]["id"]
yield posixpath.join(
self._ids_cache["ids"][parent_id], item["title"]
)
def remove(self, path_info):
item_id = self._get_item_id(path_info)
self._gdrive_delete_file(item_id)
| 35.068687 | 119 | 0.597212 |
ac651cd203c99f5f40e6d837d43c472c29aee026 | 870 | py | Python | ml_project/src/entities/train_pipeline_parameters.py | made-ml-in-prod-2021/mrtimmy89 | d1c6d69aae63e396d1ee6b4f5e9238a8b04a44f2 | [
"MIT"
] | null | null | null | ml_project/src/entities/train_pipeline_parameters.py | made-ml-in-prod-2021/mrtimmy89 | d1c6d69aae63e396d1ee6b4f5e9238a8b04a44f2 | [
"MIT"
] | 4 | 2021-05-09T15:56:09.000Z | 2021-06-06T07:33:40.000Z | ml_project/src/entities/train_pipeline_parameters.py | made-ml-in-prod-2021/mrtimmy89 | d1c6d69aae63e396d1ee6b4f5e9238a8b04a44f2 | [
"MIT"
] | null | null | null | """
Create dataclass for storing train parameters
"""
from dataclasses import dataclass
from marshmallow_dataclass import class_schema
import yaml
from src.entities.split_parameters import SplittingParams
from src.entities.training_parameters import TrainingParams
@dataclass()
class TrainingPipelineParams:
"""
Parameters
"""
input_data_path: str
target_name: str
splitting_params: SplittingParams
train_params: TrainingParams
dump_model: str
TrainingPipelineParamsSchema = class_schema(TrainingPipelineParams)
def read_training_pipeline_params(path: str) -> TrainingPipelineParams:
"""
Read from file
:param path:
:return:
"""
with open(path, "r") as input_stream:
schema = TrainingPipelineParamsSchema()
return schema.load(yaml.safe_load(input_stream))
| 24.166667 | 72 | 0.722989 |
e6ad88ef180d8eba69f72bbaaa19757edf66bacd | 1,462 | py | Python | stochSMB.py | ehultee/stoch-SMB | 324f8fdfa0e19bd66f5e904cf07c31d7af71cde7 | [
"MIT"
] | 3 | 2021-05-12T19:15:22.000Z | 2021-06-23T00:04:03.000Z | stochSMB.py | ehultee/stoch-SMB | 324f8fdfa0e19bd66f5e904cf07c31d7af71cde7 | [
"MIT"
] | 1 | 2021-08-06T14:24:51.000Z | 2021-08-06T14:24:51.000Z | stochSMB.py | ehultee/stoch-SMB | 324f8fdfa0e19bd66f5e904cf07c31d7af71cde7 | [
"MIT"
] | 1 | 2021-08-03T20:21:02.000Z | 2021-08-03T20:21:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 18:14:37 2021
Edited Thu Nov 4 2021: rename and make this the main resource file for the rpo
@author: lizz
"""
import numpy as np
from scipy import optimize
def segments_fit(X, Y, maxcount):
xmin = X.min()
xmax = X.max()
n = len(X)
AIC_ = float('inf')
BIC_ = float('inf')
r_ = None
for count in range(1, maxcount+1):
seg = np.full(count - 1, (xmax - xmin) / count)
px_init = np.r_[np.r_[xmin, seg].cumsum(), xmax]
py_init = np.array([Y[np.abs(X - x) < (xmax - xmin) * 0.1].mean() for x in px_init])
def func(p):
seg = p[:count - 1]
py = p[count - 1:]
px = np.r_[np.r_[xmin, seg].cumsum(), xmax]
return px, py
def err(p): # This is RSS / n
px, py = func(p)
Y2 = np.interp(X, px, py)
return np.mean((Y - Y2)**2)
r = optimize.minimize(err, x0=np.r_[seg, py_init], method='Nelder-Mead')
# Compute AIC/ BIC.
AIC = n * np.log10(err(r.x)) + 4 * count
BIC = n * np.log10(err(r.x)) + 2 * count * np.log(n)
if((BIC < BIC_) & (AIC < AIC_)): # Continue adding complexity.
r_ = r
AIC_ = AIC
BIC_ = BIC
else: # Stop.
count = count - 1
break
return func(r_.x) ## Return the last (n-1) | 27.074074 | 92 | 0.491108 |
d75a285783b9493a02c8f5cda11843d9e02afec4 | 1,857 | py | Python | src/marketClassify.py | ChampionNi/money-tree | 64f6894f46f8df011e9a875d9c866958969fce83 | [
"Apache-2.0"
] | null | null | null | src/marketClassify.py | ChampionNi/money-tree | 64f6894f46f8df011e9a875d9c866958969fce83 | [
"Apache-2.0"
] | null | null | null | src/marketClassify.py | ChampionNi/money-tree | 64f6894f46f8df011e9a875d9c866958969fce83 | [
"Apache-2.0"
] | null | null | null | #-*-coding=utf-8-*-
__author__ = 'ni'
import pandas as pd
import os
import numpy as np
import tushare as ts
global global_rate
global_rate = 10000
#pd.set_option('display.max_rows',None)
class StatisticsMarketValue():
def __init__(self):
print("StatisticsMarketValue")
def start_statistics(self, min, max):
if min > max:
tmp = min
min = max
max = tmp
csvfile = "today_all.csv"
if not os.path.exists(csvfile):
print("get data from tushare")
df = ts.get_today_all()#属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率,成交额,市盈率,市净率,总市值,流通市值
df.to_csv(csvfile)
else:
df = pd.read_csv(csvfile)
result = df.loc[(df["mktcap"] >= min) & (df["mktcap"] <= max)].sort_values(["mktcap"], ascending=False)
filterFrame = result.name.str.contains('ST') | result.name.str.contains('退')
exclefile = str(min/global_rate) + '-' + str(max/global_rate) + '.xlsx'
print('filename:' + exclefile)
result[~filterFrame].to_excel(exclefile)
exclefile = str(min/global_rate) + '-' + str(max/global_rate) + '-ST.xlsx'
print('filename:' + exclefile)
result[filterFrame].to_excel(exclefile)
print(result)
def main():
obj = StatisticsMarketValue()
obj.start_statistics(0*global_rate, 10*global_rate)
obj.start_statistics(10*global_rate, 30*global_rate)
obj.start_statistics(30*global_rate, 50*global_rate)
obj.start_statistics(50*global_rate, 100*global_rate)
obj.start_statistics(300*global_rate, 100*global_rate)
obj.start_statistics(300*global_rate, 500*global_rate)
obj.start_statistics(1000*global_rate, 500*global_rate)
obj.start_statistics(1000*global_rate, 5000*global_rate)
obj.start_statistics(100000*global_rate, 5000*global_rate)
main() | 35.037736 | 111 | 0.661282 |
061c5d43929c7dd2176a9d2cded13f1f9589b995 | 1,465 | py | Python | lib/spyder.py | hack123321/Instagram | 46f9765f7e45ccfe298b9e415efc2ea60f18c60f | [
"MIT"
] | null | null | null | lib/spyder.py | hack123321/Instagram | 46f9765f7e45ccfe298b9e415efc2ea60f18c60f | [
"MIT"
] | null | null | null | lib/spyder.py | hack123321/Instagram | 46f9765f7e45ccfe298b9e415efc2ea60f18c60f | [
"MIT"
] | null | null | null | # Date: 05/05/2018
# Author: Pure-L0G1C
# Description: Browser Manager
from requests import Session
from .const import site_details
from .scraper import Scraper, Queue
class Spyder(object):
def __init__(self):
self.proxy = None
self.isAlive = True
self.proxy_info = None
self.proxies = Queue()
self.scraper = Scraper()
def proxy_manager(self):
while self.isAlive:
while all([self.isAlive, self.proxies.qsize]):
pass
if not self.isAlive:
break
self.proxies = self.scraper.scrape(ssl_proxies=True)
[self.proxies.put(proxy) for proxy in self.scraper.scrape(new_proxies=True).queue if self.isAlive]
@property
def br(self):
session = Session()
session.proxies.update(self.proxy)
session.headers.update(site_details['header'])
return session
def renew_proxy(self, n=10):
_proxy = self.proxies.get()
addr = 'http://{}:{}'.format(_proxy['ip'], _proxy['port'])
proxy = { 'http': addr, 'https': addr }
if self.proxy:
if all([self.proxy == proxy, self.proxies.qsize, n]):
self.renew_proxy(n-1)
self.proxy_info = _proxy
self.proxy = proxy
def ip_addr(self):
if self.proxy:
return self.proxy['http'].split(':')[1][2:] | 26.160714 | 111 | 0.558362 |
2d1bc4005ce612b201b4c642b5ed415b286d5ef6 | 6,175 | py | Python | Lab_Week_08_-_Temporal_Difference_Learning/Solutions/env.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | Lab_Week_08_-_Temporal_Difference_Learning/Solutions/env.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | Lab_Week_08_-_Temporal_Difference_Learning/Solutions/env.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | from td.Environment import BaseEnvironment
class CliffWalkEnvironment(BaseEnvironment):
# Initialization function which is called once when an environment object is created.
# In this function, the grid dimensions and special locations (start and goal locations and the cliff locations)
# are stored for easy use later.
def env_init(self, env_info={}):
"""
Setup for the environment called when the experiment first starts.
Note:
Initialize a tuple with the reward, first state, boolean
indicating if it's terminal.
"""
# Note, we can setup the following variables later, in env_start() as it is equivalent.
# Code is left here to adhere to the note above, but these variables are initialized once more
# in env_start() [See the env_start() function below.]
reward = None
state = None # See Aside
termination = None
self.reward_state_term = (reward, state, termination)
# AN ASIDE: Observation is a general term used in the RL-Glue files that can be interachangeably
# used with the term "state" for our purposes and for this assignment in particular.
# A difference arises in the use of the terms when we have what is called Partial Observability where
# the environment may return states that may not fully represent all the information needed to
# predict values or make decisions (i.e., the environment is non-Markovian.)
# Set the default height to 4 and width to 12 (as in the diagram given above)
self.grid_h = env_info.get("grid_height", 4)
self.grid_w = env_info.get("grid_width", 12)
# Now, we can define a frame of reference. Let positive x be towards the direction down and
# positive y be towards the direction right (following the row-major NumPy convention.)
# Then, keeping with the usual convention that arrays are 0-indexed, max x is then grid_h - 1
# and max y is then grid_w - 1. So, we have:
# Starting location of agent is the bottom-left corner, (max x, min y).
self.start_loc = (self.grid_h - 1, 0)
# Goal location is the bottom-right corner. (max x, max y).
self.goal_loc = (self.grid_h - 1, self.grid_w - 1)
# The cliff will contain all the cells between the start_loc and goal_loc.
self.cliff = [(self.grid_h - 1, i) for i in range(1, (self.grid_w - 1))]
def env_start(self):
"""The first method called when the episode starts, called before the
agent starts.
Returns:
The first state from the environment.
"""
reward = 0
# agent_loc will hold the current location of the agent
self.agent_loc = self.start_loc
# state is the one dimensional state representation of the agent location.
state = self.state(self.agent_loc)
termination = False
self.reward_state_term = (reward, state, termination)
return self.reward_state_term[1]
# Work Required: Yes. Fill in the code for action UP and implement the logic for reward and termination.
# Lines: ~7.
def env_step(self, action):
"""A step taken by the environment.
Args:
action: The action taken by the agent
Returns:
(float, state, Boolean): a tuple of the reward, state,
and boolean indicating if it's terminal.
"""
if action == 0: # UP (Task 1)
### START CODE HERE ###
# Hint: Look at the code given for the other actions and think about the logic in them.
possible_next_loc = (self.agent_loc[0] - 1, self.agent_loc[1])
if possible_next_loc[0] >= 0: # Within Bounds?
self.agent_loc = possible_next_loc
else:
pass
### END CODE HERE ###
elif action == 1: # LEFT
possible_next_loc = (self.agent_loc[0], self.agent_loc[1] - 1)
if possible_next_loc[1] >= 0: # Within Bounds?
self.agent_loc = possible_next_loc
else:
pass # Stay.
elif action == 2: # DOWN
possible_next_loc = (self.agent_loc[0] + 1, self.agent_loc[1])
if possible_next_loc[0] < self.grid_h: # Within Bounds?
self.agent_loc = possible_next_loc
else:
pass # Stay.
elif action == 3: # RIGHT
possible_next_loc = (self.agent_loc[0], self.agent_loc[1] + 1)
if possible_next_loc[1] < self.grid_w: # Within Bounds?
self.agent_loc = possible_next_loc
else:
pass # Stay.
else:
raise Exception(str(action) + " not in recognized actions [0: Up, 1: Left, 2: Down, 3: Right]!")
reward = -1
terminal = False
### START CODE HERE ###
# Hint: Consider the initialization of reward and terminal variables above. Then, note the
# conditional statements and comments given below and carefully ensure to set the variables reward
# and terminal correctly for each case.
if self.agent_loc == self.goal_loc: # Reached Goal!
terminal = True
pass
elif self.agent_loc in self.cliff: # Fell into the cliff!
reward = -100
self.agent_loc = (self.grid_h - 1, 0)
terminal = False
else:
pass
### END CODE HERE ###
self.reward_state_term = (reward, self.state(self.agent_loc), terminal)
return self.reward_state_term
def env_cleanup(self):
"""Cleanup done after the environment ends"""
self.agent_loc = self.start_loc
# helper method
def state(self, loc):
# Work Required: Yes. Modify the return statement of this function to return a correct single index as
# the state (see the logic for this in the previous cell.)
# Lines: 1
### START CODE HERE ###
return (((loc[0] * self.grid_w) + (loc[1])))
### END CODE HERE ### | 44.746377 | 116 | 0.608259 |
655a1519866446f39b503617fad196e2eeaae2b4 | 1,142 | py | Python | setup.py | msabramo/hmac_cli | 32a2e2fd86e2b96c6b21415b14257b834b3bce2e | [
"MIT"
] | null | null | null | setup.py | msabramo/hmac_cli | 32a2e2fd86e2b96c6b21415b14257b834b3bce2e | [
"MIT"
] | null | null | null | setup.py | msabramo/hmac_cli | 32a2e2fd86e2b96c6b21415b14257b834b3bce2e | [
"MIT"
] | 2 | 2015-04-12T16:31:22.000Z | 2020-04-10T19:40:29.000Z | import os
from setuptools import setup
this_dir = os.path.dirname(__file__)
long_description = "\n" + open(os.path.join(this_dir, 'README.rst')).read()
setup(
name='hmac_cli',
version='0.0.0',
description='Simple CLI for encrypting data with a private key, using HMAC',
long_description=long_description,
keywords='hmac',
author='Marc Abramowitz',
author_email='msabramo@gmail.com',
url='https://github.com/msabramo/hmac_cli',
py_modules=['hmac_cli'],
zip_safe=False,
install_requires=['click'],
entry_points="""\
[console_scripts]
hmac = hmac_cli:cli
""",
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
| 30.864865 | 80 | 0.619965 |
bd8e680a2701eff05dce387edc19d42c858d9d60 | 5,243 | py | Python | pyvideoai/metrics/AP.py | kiyoon/PyVideoAI | c4d3ba7a69723aeae7da48245989ae11cbdb1f8b | [
"MIT"
] | 22 | 2021-06-01T07:40:01.000Z | 2022-03-14T07:09:01.000Z | pyvideoai/metrics/AP.py | kiyoon/PyVideoAI | c4d3ba7a69723aeae7da48245989ae11cbdb1f8b | [
"MIT"
] | null | null | null | pyvideoai/metrics/AP.py | kiyoon/PyVideoAI | c4d3ba7a69723aeae7da48245989ae11cbdb1f8b | [
"MIT"
] | null | null | null | # @achalddave's AP implementations
from __future__ import division
import numpy as np
def compute_average_precision(groundtruth, predictions, false_negatives=0):
"""
Computes average precision for a binary problem. This is based off of the
PASCAL VOC implementation.
Args:
groundtruth (array-like): Binary vector indicating whether each sample
is positive or negative.
predictions (array-like): Contains scores for each sample.
false_negatives (int or None): In some tasks, such as object
detection, not all groundtruth will have a corresponding prediction
(i.e., it is not retrieved at _any_ score threshold). For these
cases, use false_negatives to indicate the number of groundtruth
instances that were not retrieved.
Returns:
Average precision.
"""
predictions = np.asarray(predictions).squeeze()
groundtruth = np.asarray(groundtruth, dtype=float).squeeze()
if predictions.ndim == 0:
predictions = predictions.reshape(-1)
if groundtruth.ndim == 0:
groundtruth = groundtruth.reshape(-1)
if predictions.ndim != 1:
raise ValueError(f'Predictions vector should be 1 dimensional, not '
f'{predictions.ndim}. (For multiple labels, use '
f'`compute_multiple_aps`.)')
if groundtruth.ndim != 1:
raise ValueError(f'Groundtruth vector should be 1 dimensional, not '
f'{groundtruth.ndim}. (For multiple labels, use '
f'`compute_multiple_aps`.)')
sorted_indices = np.argsort(predictions)[::-1]
predictions = predictions[sorted_indices]
groundtruth = groundtruth[sorted_indices]
# The false positives are all the negative groundtruth instances, since we
# assume all instances were 'retrieved'. Ideally, these will be low scoring
# and therefore in the end of the vector.
false_positives = 1 - groundtruth
tp = np.cumsum(groundtruth) # tp[i] = # of positive examples up to i
fp = np.cumsum(false_positives) # fp[i] = # of false positives up to i
num_positives = tp[-1] + false_negatives
precisions = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
recalls = tp / num_positives
# Append end points of the precision recall curve.
precisions = np.concatenate(([0.], precisions))
recalls = np.concatenate(([0.], recalls))
# Find points where prediction score changes.
prediction_changes = set(
np.where(predictions[1:] != predictions[:-1])[0] + 1)
num_examples = predictions.shape[0]
# Recall and scores always "change" at the first and last prediction.
c = prediction_changes | set([0, num_examples])
c = np.array(sorted(list(c)), dtype=np.int)
precisions = precisions[c[1:]]
# Set precisions[i] = max(precisions[j] for j >= i)
# This is because (for j > i), recall[j] >= recall[i], so we can always use
# a lower threshold to get the higher recall and higher precision at j.
precisions = np.maximum.accumulate(precisions[::-1])[::-1]
ap = np.sum((recalls[c[1:]] - recalls[c[:-1]]) * precisions)
return ap
def compute_multiple_aps(groundtruth, predictions, false_negatives=None):
"""Convenience function to compute APs for multiple labels.
Args:
groundtruth (np.array): Shape (num_samples, num_labels)
predictions (np.array): Shape (num_samples, num_labels)
false_negatives (list or None): In some tasks, such as object
detection, not all groundtruth will have a corresponding prediction
(i.e., it is not retrieved at _any_ score threshold). For these
cases, use false_negatives to indicate the number of groundtruth
instances which were not retrieved for each category.
Returns:
aps_per_label (np.array, shape (num_labels,)): Contains APs for each
label. NOTE: If a label does not have positive samples in the
groundtruth, the AP is set to -1.
"""
predictions = np.asarray(predictions)
groundtruth = np.asarray(groundtruth)
if predictions.ndim != 2:
raise ValueError('Predictions should be 2-dimensional,'
' but has shape %s' % (predictions.shape, ))
if groundtruth.ndim != 2:
raise ValueError('Groundtruth should be 2-dimensional,'
' but has shape %s' % (predictions.shape, ))
num_labels = groundtruth.shape[1]
aps = np.zeros(groundtruth.shape[1])
if false_negatives is None:
false_negatives = [0] * num_labels
for i in range(num_labels):
if not groundtruth[:, i].any():
# print('WARNING: No groundtruth for label: %s' % i)
aps[i] = -1
else:
aps[i] = compute_average_precision(groundtruth[:, i],
predictions[:, i],
false_negatives[i])
return aps
# CATER implementation
def mAP_from_AP(AP):
mAP = np.mean([el for el in AP if el >= 0])
return mAP
def mAP(labels, preds):
AP = compute_multiple_aps(labels, preds)
return mAP_from_AP(AP)
| 38.837037 | 79 | 0.639519 |
36421622c7d601b06a879015436c24ae647bd6a6 | 556 | py | Python | fdlb_backend/supervisor_neural_network/migrations/0002_auto_20180321_1917.py | flying-pi/forDLbook | bb29a906234191e2f76d59e1bc3c47427057f895 | [
"MIT"
] | 1 | 2018-01-27T16:15:15.000Z | 2018-01-27T16:15:15.000Z | fdlb_backend/supervisor_neural_network/migrations/0002_auto_20180321_1917.py | flying-pi/forDLbook | bb29a906234191e2f76d59e1bc3c47427057f895 | [
"MIT"
] | null | null | null | fdlb_backend/supervisor_neural_network/migrations/0002_auto_20180321_1917.py | flying-pi/forDLbook | bb29a906234191e2f76d59e1bc3c47427057f895 | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2018-03-21 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('supervisor_neural_network', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='weightmodel',
name='accuracy',
field=models.FloatField(default=-1),
),
migrations.AlterField(
model_name='weightmodel',
name='name',
field=models.TextField(default='unknown'),
),
]
| 23.166667 | 54 | 0.582734 |
0ec73e36339e5abfb3515e107eb01deff2014942 | 7,304 | py | Python | src/szlc_to_pads.py | Jiangshan00001/easyeda_to_pads | ce83d8faffd1af40d28242045e1da62a27e8f089 | [
"Apache-2.0"
] | 5 | 2021-07-08T08:13:24.000Z | 2022-01-15T21:33:48.000Z | src/szlc_to_pads.py | Jiangshan00001/easyeda_to_pads | ce83d8faffd1af40d28242045e1da62a27e8f089 | [
"Apache-2.0"
] | 1 | 2021-07-23T06:14:37.000Z | 2021-10-06T04:01:39.000Z | src/szlc_to_pads.py | Jiangshan00001/easyeda_to_pads | ce83d8faffd1af40d28242045e1da62a27e8f089 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
import sys
import time
import codecs
import requests # 导入requests包
import json
from pads_ascii import PadsAscii
from easyeda import EasyEda
from szlc_read import get_comp_uuid_list, get_one_decl
from easy_to_pads import easy_to_pads
import datetime
from line_profiler import LineProfiler
def lc_get_comp_decl(comp_uuid):
url = 'https://lceda.cn/api/components?version=6.4.20.2&docType=2&uid=0819f05c4eef4c71ace90d822a990e87&type=3'
url2 = 'https://lceda.cn/api/components/' + comp_uuid + '?version=6.4.20.2&uuid=' + comp_uuid + '&datastrid='
strhtml = requests.get(url2) # Get方式获取网页数据
comp_list_ret = json.loads(strhtml.text)
packageDetail = comp_list_ret['result']['packageDetail']
return packageDetail, comp_list_ret['result']['dataStr']['head']
def lc_search(user_id, keyword):
"""
:param user_id:
:param keyword:
:return:
"""
url = 'https://lceda.cn/api/components/search'
param = {'type': 3, 'doctype[]': 2, 'uid': user_id, 'returnListStyle': 'classifyarr', 'wd': keyword,
'version': '6.4.20.2'}
ret = requests.post(url, param)
ret_j = json.loads(ret.text)
if ret_j['success'] is not True:
print('some error:', ret_j['message'])
return None, None
pkt_uuid = ret_j['result']['lists']['lcsc'][0]['uuid']
pkt_title = ret_j['result']['lists']['lcsc'][0]['title']
return pkt_uuid, pkt_title
def etopads(ddetail_json: dict, partdetail_json: dict, a: PadsAscii):
"""
:param ddetail_json:
:param a:
:return:
"""
easy = EasyEda()
t1 = time.time()
packageDetail = ddetail_json
easy.parse_decl_json(packageDetail)
easy.org_to_zero()
easy.y_mirror()
easy.hole_to_pad()
easy.pin_renumber()
easy.pin_resort()
t2 = time.time()
package_decl_name = easy.pDetail['decl_name']
if partdetail_json is not None:
part_name = partdetail_json['c_para']['name']
part_time = partdetail_json.get('utime')
if (part_time == '') or (part_time is None):
part_time = time.time()
else:
part_time = 0
part_name = None
part_time = int(part_time)
a = easy_to_pads(easy, part_name, part_time, a)
t3 = time.time()
return a, [t2 - t1, t3 - t2]
def save_to_file(stri, file_name):
f = open(file_name, 'w+')
f.write(json.dumps(stri, indent=4))
f.close()
def pull_one_comp():
a = PadsAscii()
user_id = '0819f05c4eef4c71ace90d822a990e87'
keywords = ['SMA-TH_SMA-KWE903', 'ANT-SMD_KH-IPEX-K501-29', 'MICRO-SIM-SMD_SIM-002-A6',
'LCC-LGA-58_L17.7-W15.8-P1.1-TL-BC260Y-CN'] # , ]#, , 'SOT-23-3_L2.9-W1.3-P1.90-LS2.4-BR']
for kw in keywords:
puuid, ptitle = lc_search(user_id, kw)
# puuid = '5ec5c544aad7443f95c394098550fb07'
ddetail, partdetail = lc_get_comp_decl(puuid)
a = etopads(ddetail, partdetail, a)
f = open('out.d', 'w+')
a.set_format('pcb_decals')
f.write(a.dump())
f.close()
f = open('out.p', 'w+')
a.set_format('part_types')
f.write(a.dump())
f.close()
from szlc_read import get_decl_list
def szlc_to_pads_decl_list(decl_title_list):
"""
通过封装名称的列表,导出封装
:param title_list:
:return:
"""
a = PadsAscii()
cnt = 0
decl_list = get_decl_list(decl_title_list)
for i in decl_list:
t1 = time.time()
t2 = time.time()
a, time_list = etopads(i[3], None, a)
print('\r', cnt, end='')
cnt += 1
f = open('out.d', 'w+')
a.set_format('pcb_decals')
f.write(a.dump())
f.close()
f = open('out.p', 'w+')
a.set_format('part_types')
f.write(a.dump())
f.close()
def szlc_to_pads_2k():
"""
函数主要时间在通过uuid查询decl_json_data. 需要创建索引来加快速度
:return:
"""
a = PadsAscii()
cnt = 0
comp_list = get_comp_uuid_list()
t_read = 0
t_easy = 0
t_pads = 0
for i in comp_list:
t1 = time.time()
decl_data = get_one_decl(i[2])
t2 = time.time()
a, time_list = etopads(decl_data[1], i[3]['head'], a)
t_read += t2 - t1
t_easy += time_list[0]
t_pads += time_list[1]
print('\r', cnt, t_read, t_easy, t_pads, end='')
if cnt % 2000 == 0:
print('\r', t2 - t1, time_list, cnt, len(comp_list), end='')
if cnt > 0:
break
cnt += 1
f = open('out.d', 'w+')
a.set_format('pcb_decals')
f.write(a.dump())
f.close()
f = open('out.p', 'w+')
a.set_format('part_types')
f.write(a.dump())
f.close()
from szlc_read import get_comp_tags
def comp_save_by_tags():
# 按照分类导出各类数据到指定文件
# 7637 里面的某个值,有问题
comp_list = get_comp_tags()
tag_list=list(set([ i[4] for i in comp_list]))
tag_list.sort()
print(tag_list)
#tag_list = tag_list[0:10]
#tag_list = tag_list[10:20]
#tag_list = tag_list[20:30]
#tag_list = tag_list[30:50]
#tag_list = tag_list[50:70]
#tag_list = tag_list[70:90]
#tag_list = tag_list[90:110]
#tag_list = tag_list[110:150]
#tag_list = tag_list[150:200]
#tag_list = tag_list[200:300]
tag_list = tag_list[300:]
tags_pads = {}
cnt = 0
for i in comp_list:
curr_tag = i[4]
comp_uuid = i[1]
decl_uuid = i[2]
sch = i[3]
if curr_tag not in tag_list:
continue
print('\r', i[0], end='')
if curr_tag not in tags_pads:
tags_pads[curr_tag] = PadsAscii()
decl_data = get_one_decl(decl_uuid)
tags_pads[curr_tag], time_list = etopads(decl_data[1], sch['head'], tags_pads[curr_tag])
cnt += 1
print('to pads ready.')
cnt = 0
for tag_title in tags_pads:
tag_title_file_name = tag_title.encode('gbk', 'ignore').decode('gbk', 'ignore').replace('[', '').replace(']', '').replace('"', '').replace('/', '_').replace(
' ', '_').replace(',', '_').replace('\\uff0c', '_').replace('\\uff08', '').replace('\\uff09', '').replace('\\u4f5c', '').replace('\\u5e9f', '')
f = open('./lc_pads/'+tag_title_file_name + '.d', 'wb+')
tags_pads[tag_title].set_format('pcb_decals')
f.write(tags_pads[tag_title].dump().encode('gbk', 'ignore'))
f.close()
f = open('./lc_pads/'+tag_title_file_name + '.p', 'wb+')
tags_pads[tag_title].set_format('part_types')
f.write(tags_pads[tag_title].dump().encode('gbk', 'ignore'))
f.close()
print('\r', cnt, end='')
cnt += 1
if __name__ == '__main__':
comp_save_by_tags()
sys.exit(0)
lp = LineProfiler()
lp_wrapper = lp(comp_save_by_tags)
lp_wrapper()
lp.print_stats()
sys.exit(0)
# szlc_to_pads_decl_list(['SMA-SMD_BWSMA-KE-P001', 'IND-SMD_L3.6-W2.9', 'SOT-363_L2.0-W1.3-P0.65-LS2.1-TL', 'SOT-23-3_L2.9-W1.3-P1.90-LS2.4-BR'])
szlc_to_pads_decl_list(['CAP-SMD_L7.3-W4.3-R-RD'])
sys.exit(0)
lp = LineProfiler()
lp.add_function(get_comp_uuid_list) # add additional function to profile
lp.add_function(get_one_decl)
lp_wrapper = lp(szlc_to_pads_2k)
lp_wrapper()
lp.print_stats()
| 27.051852 | 165 | 0.601999 |
6ea419b85b75f1759926bbad0e00667fdd2f1ee7 | 91 | py | Python | micadoparser/__init__.py | micado-scale/micado-parser | 5feadb18ca3c83f6ffca261e28b98bdf08c80125 | [
"Apache-2.0"
] | null | null | null | micadoparser/__init__.py | micado-scale/micado-parser | 5feadb18ca3c83f6ffca261e28b98bdf08c80125 | [
"Apache-2.0"
] | 2 | 2022-01-25T12:45:02.000Z | 2022-01-25T12:54:04.000Z | micadoparser/__init__.py | micado-scale/micado-parser | 5feadb18ca3c83f6ffca261e28b98bdf08c80125 | [
"Apache-2.0"
] | null | null | null | from micadoparser.parser import set_template
from micadoparser.exceptions import MultiError | 45.5 | 46 | 0.901099 |
8baa1fb48322f2ea77493d73c5c6ecdf34ab330f | 144,193 | py | Python | datalad/support/gitrepo.py | soichih/datalad | 797dde3ab7497be170e2c4ea8824f33a4b38e5d8 | [
"MIT"
] | null | null | null | datalad/support/gitrepo.py | soichih/datalad | 797dde3ab7497be170e2c4ea8824f33a4b38e5d8 | [
"MIT"
] | null | null | null | datalad/support/gitrepo.py | soichih/datalad | 797dde3ab7497be170e2c4ea8824f33a4b38e5d8 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Internal low-level interface to Git repositories
"""
import logging
import os
import os.path as op
import posixpath
import re
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from functools import wraps
from itertools import chain
from os import linesep
from os.path import (
commonprefix,
curdir,
dirname,
exists,
isabs,
)
from os.path import join as opj
from os.path import (
pardir,
relpath,
sep,
)
import datalad.utils as ut
from datalad import ssh_manager
from datalad.cmd import (
BatchedCommand,
GitWitlessRunner,
NoCapture,
StdOutErrCapture,
WitlessProtocol,
)
from datalad.config import (
parse_gitconfig_dump,
write_config_section,
)
from datalad.consts import (
ILLEGAL_CHARS_WIN,
RESERVED_NAMES_WIN,
)
from datalad.core.local.repo import repo_from_path
from datalad.dataset.gitrepo import GitRepo as CoreGitRepo
from datalad.dataset.gitrepo import (
_get_dot_git,
path_based_str_repr,
)
from datalad.log import log_progress
from datalad.support.due import (
Doi,
due,
)
from datalad.utils import (
Path,
PurePosixPath,
ensure_dir,
ensure_list,
ensure_unicode,
generate_file_chunks,
getpwd,
is_interactive,
on_windows,
optional_args,
path_is_subpath,
posix_relpath,
)
from .exceptions import (
CapturedException,
CommandError,
FileNotInRepositoryError,
InvalidGitReferenceError,
InvalidGitRepositoryError,
NoSuchPathError,
)
# imports from same module:
from .external_versions import external_versions
from .network import (
RI,
PathRI,
is_ssh,
)
from .path import get_parent_paths
# shortcuts
_curdirsep = curdir + sep
_pardirsep = pardir + sep
lgr = logging.getLogger('datalad.gitrepo')
# outside the repo base classes only used in ConfigManager
def to_options(split_single_char_options=True, **kwargs):
"""Transform keyword arguments into a list of cmdline options
Imported from GitPython.
Original copyright:
Copyright (C) 2008, 2009 Michael Trier and contributors
Original license:
BSD 3-Clause "New" or "Revised" License
Parameters
----------
split_single_char_options: bool
kwargs:
Returns
-------
list
"""
def dashify(string):
return string.replace('_', '-')
def transform_kwarg(name, value, split_single_char_options):
if len(name) == 1:
if value is True:
return ["-%s" % name]
elif value not in (False, None):
if split_single_char_options:
return ["-%s" % name, "%s" % value]
else:
return ["-%s%s" % (name, value)]
else:
if value is True:
return ["--%s" % dashify(name)]
elif value is not False and value is not None:
return ["--%s=%s" % (dashify(name), value)]
return []
args = []
kwargs = OrderedDict(sorted(kwargs.items(), key=lambda x: x[0]))
for k, v in kwargs.items():
if isinstance(v, (list, tuple)):
for value in v:
args += transform_kwarg(k, value, split_single_char_options)
else:
args += transform_kwarg(k, v, split_single_char_options)
return args
def _normalize_path(base_dir, path):
"""Helper to check paths passed to methods of this class.
Checks whether `path` is beneath `base_dir` and normalizes it.
Additionally paths are converted into relative paths with respect to
`base_dir`, considering PWD in case of relative paths. This
is intended to be used in repository classes, which means that
`base_dir` usually will be the repository's base directory.
Parameters
----------
base_dir: str
directory to serve as base to normalized, relative paths
path: str
path to be normalized
Returns
-------
str:
path, that is a relative path with respect to `base_dir`
"""
if not path:
return path
pathobj = Path(path)
# do absolute() in addition to always get an absolute path
# even with non-existing base_dirs on windows
base_dir = str(Path(base_dir).resolve().absolute()) # realpath OK
# path = normpath(path)
# Note: disabled normpath, because it may break paths containing symlinks;
# But we don't want to realpath relative paths, in case cwd isn't the
# correct base.
if pathobj.is_absolute():
# path might already be a symlink pointing to annex etc,
# so realpath only its directory, to get "inline" with
# realpath(base_dir) above
path = str(pathobj.parent.resolve() / pathobj.name) # realpath OK
# Executive decision was made to not do this kind of magic!
#
# elif commonprefix([realpath(getpwd()), base_dir]) == base_dir:
# # If we are inside repository, rebuilt relative paths.
# path = opj(realpath(getpwd()), path)
#
# BUT with relative curdir/pardir start it would assume relative to curdir
#
elif path.startswith(_curdirsep) or path.startswith(_pardirsep):
path = str(Path(getpwd()).resolve() / pathobj) # realpath OK
else:
# We were called from outside the repo. Therefore relative paths
# are interpreted as being relative to self.path already.
return path
if commonprefix([path, base_dir]) != base_dir:
raise FileNotInRepositoryError(msg="Path outside repository: %s"
% base_dir, filename=path)
return relpath(path, start=base_dir)
@optional_args
def normalize_path(func):
"""Decorator to provide unified path conversion for a single file
Unlike normalize_paths, intended to be used for functions dealing with a
single filename at a time
Note
----
This is intended to be used within the repository classes and therefore
returns a class method!
The decorated function is expected to take a path at
first positional argument (after 'self'). Additionally the class `func`
is a member of, is expected to have an attribute 'path'.
"""
@wraps(func)
def _wrap_normalize_path(self, file_, *args, **kwargs):
file_new = _normalize_path(self.path, file_)
return func(self, file_new, *args, **kwargs)
return _wrap_normalize_path
@optional_args
def normalize_paths(func, match_return_type=True, map_filenames_back=False,
serialize=False):
"""Decorator to provide unified path conversions.
Note
----
This is intended to be used within the repository classes and therefore
returns a class method!
The decorated function is expected to take a path or a list of paths at
first positional argument (after 'self'). Additionally the class `func`
is a member of, is expected to have an attribute 'path'.
Accepts either a list of paths or a single path in a str. Passes a list
to decorated function either way, but would return based on the value of
match_return_type and possibly input argument.
If a call to the wrapped function includes normalize_path and it is False
no normalization happens for that function call (used for calls to wrapped
functions within wrapped functions, while possible CWD is within a
repository)
Parameters
----------
match_return_type : bool, optional
If True, and a single string was passed in, it would return the first
element of the output (after verifying that it is a list of length 1).
It makes easier to work with single files input.
map_filenames_back : bool, optional
If True and returned value is a dictionary, it assumes to carry entries
one per file, and then filenames are mapped back to as provided from the
normalized (from the root of the repo) paths
serialize : bool, optional
Loop through files giving only a single one to the function one at a time.
This allows to simplify implementation and interface to annex commands
which do not take multiple args in the same call (e.g. checkpresentkey)
"""
@wraps(func)
def _wrap_normalize_paths(self, files, *args, **kwargs):
normalize = _normalize_path if kwargs.pop('normalize_paths', True) \
else lambda rpath, filepath: filepath
if files:
if isinstance(files, str) or not files:
files_new = [normalize(self.path, files)]
single_file = True
elif isinstance(files, list):
files_new = [normalize(self.path, path) for path in files]
single_file = False
else:
raise ValueError("_files_decorator: Don't know how to handle "
"instance of %s." % type(files))
else:
single_file = None
files_new = []
if map_filenames_back:
def remap_filenames(out):
"""Helper to map files back to non-normalized paths"""
if isinstance(out, dict):
assert(len(out) == len(files_new))
files_ = [files] if single_file else files
mapped = out.__class__()
for fin, fout in zip(files_, files_new):
mapped[fin] = out[fout]
return mapped
else:
return out
else:
remap_filenames = lambda x: x
if serialize: # and not single_file:
result = [
func(self, f, *args, **kwargs)
for f in files_new
]
else:
result = func(self, files_new, *args, **kwargs)
if single_file is None:
# no files were provided, nothing we can do really
return result
elif (result is None) or not match_return_type or not single_file:
# If function doesn't return anything or no denormalization
# was requested or it was not a single file
return remap_filenames(result)
elif single_file:
if len(result) != 1:
# Magic doesn't apply
return remap_filenames(result)
elif isinstance(result, (list, tuple)):
return result[0]
elif isinstance(result, dict) and tuple(result)[0] == files_new[0]:
# assume that returned dictionary has files as keys.
return tuple(result.values())[0]
else:
# no magic can apply
return remap_filenames(result)
else:
return RuntimeError("should have not got here... check logic")
return _wrap_normalize_paths
if "2.24.0" <= external_versions["cmd:git"] < "2.25.0":
# An unintentional change in Git 2.24.0 led to `ls-files -o` traversing
# into untracked submodules when multiple pathspecs are given, returning
# repositories that are deeper than the first level. This helper filters
# these deeper levels out so that save_() doesn't fail trying to add them.
#
# This regression fixed with upstream's 072a231016 (2019-12-10).
def _prune_deeper_repos(repos):
firstlevel_repos = []
prev = None
for repo in sorted(repos):
if not (prev and str(repo).startswith(prev)):
prev = str(repo)
firstlevel_repos.append(repo)
return firstlevel_repos
else:
def _prune_deeper_repos(repos):
return repos
class GitProgress(WitlessProtocol):
"""Reduced variant of GitPython's RemoteProgress class
Original copyright:
Copyright (C) 2008, 2009 Michael Trier and contributors
Original license:
BSD 3-Clause "New" or "Revised" License
"""
# inform super-class to capture stderr
proc_err = True
_num_op_codes = 10
BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING, FINDING_SOURCES, CHECKING_OUT, ENUMERATING = \
[1 << x for x in range(_num_op_codes)]
STAGE_MASK = BEGIN | END
OP_MASK = ~STAGE_MASK
DONE_TOKEN = 'done.'
TOKEN_SEPARATOR = ', '
_known_ops = {
COUNTING: ("Counting", "Objects"),
ENUMERATING: ("Enumerating", "Objects"),
COMPRESSING: ("Compressing", "Objects"),
WRITING: ("Writing", "Objects"),
RECEIVING: ("Receiving", "Objects"),
RESOLVING: ("Resolving", "Deltas"),
FINDING_SOURCES: ("Finding", "Sources"),
CHECKING_OUT: ("Check out", "Things"),
}
__slots__ = ('_seen_ops', '_pbars', '_encoding')
re_op_absolute = re.compile(r"(remote: )?([\w\s]+):\s+()(\d+)()(.*)")
re_op_relative = re.compile(r"(remote: )?([\w\s]+):\s+(\d+)% \((\d+)/(\d+)\)(.*)")
def __init__(self, *args):
super().__init__(*args)
self._unprocessed = None
def connection_made(self, transport):
super().connection_made(transport)
self._seen_ops = []
self._pbars = set()
def process_exited(self):
# take down any progress bars that were not closed orderly
for pbar_id in self._pbars:
log_progress(
lgr.info,
pbar_id,
'Finished',
)
super().process_exited()
def pipe_data_received(self, fd, byts):
# progress reports only come from stderr
if fd != 2:
# let the base class decide what to do with it
super().pipe_data_received(fd, byts)
return
for line in byts.splitlines(keepends=True):
# put any unprocessed content back in front
line = self._unprocessed + line if self._unprocessed else line
self._unprocessed = None
if not self._parse_progress_line(line):
# anything that doesn't look like a progress report
# is retained and returned
# in case of partial progress lines, this can lead to
# leakage of progress info into the output, but
# it is better to enable better (maybe more expensive)
# subsequent filtering than hiding lines with
# unknown, potentially important info
lgr.debug('Non-progress stderr: %s', line)
if line.endswith((b'\r', b'\n')):
# complete non-progress line, pass on
super().pipe_data_received(fd, line)
else:
# an incomplete line, maybe the next batch completes
# it to become a recognizable progress report
self._unprocessed = line
def _parse_progress_line(self, line):
"""Process a single line
Parameters
----------
line : bytes
Returns
-------
bool
Flag whether the line was recognized as a Git progress report.
"""
# handle
# Counting objects: 4, done.
# Compressing objects: 50% (1/2)
# Compressing objects: 100% (2/2)
# Compressing objects: 100% (2/2), done.
line = line.decode(self.encoding) if isinstance(line, bytes) else line
if line.startswith(('warning:', 'error:', 'fatal:')):
return False
# find escape characters and cut them away - regex will not work with
# them as they are non-ascii. As git might expect a tty, it will send them
last_valid_index = None
for i, c in enumerate(reversed(line)):
if ord(c) < 32:
# its a slice index
last_valid_index = -i - 1
# END character was non-ascii
# END for each character in line
if last_valid_index is not None:
line = line[:last_valid_index]
# END cut away invalid part
line = line.rstrip()
cur_count, max_count = None, None
match = self.re_op_relative.match(line)
if match is None:
match = self.re_op_absolute.match(line)
if not match:
return False
# END could not get match
op_code = 0
_remote, op_name, _percent, cur_count, max_count, message = match.groups()
# get operation id
if op_name == "Counting objects":
op_code |= self.COUNTING
elif op_name == "Compressing objects":
op_code |= self.COMPRESSING
elif op_name == "Writing objects":
op_code |= self.WRITING
elif op_name == 'Receiving objects':
op_code |= self.RECEIVING
elif op_name == 'Resolving deltas':
op_code |= self.RESOLVING
elif op_name == 'Finding sources':
op_code |= self.FINDING_SOURCES
elif op_name == 'Checking out files':
op_code |= self.CHECKING_OUT
elif op_name == 'Enumerating objects':
op_code |= self.ENUMERATING
else:
# Note: On windows it can happen that partial lines are sent
# Hence we get something like "CompreReceiving objects", which is
# a blend of "Compressing objects" and "Receiving objects".
# This can't really be prevented.
lgr.debug(
'Output line matched a progress report of an unknown type: %s',
line)
# TODO investigate if there is any chance that we might swallow
# important info -- until them do not flag this line
# as progress
return False
# END handle op code
pbar_id = 'gitprogress-{}-{}'.format(id(self), op_code)
op_props = self._known_ops[op_code]
# figure out stage
if op_code not in self._seen_ops:
self._seen_ops.append(op_code)
op_code |= self.BEGIN
log_progress(
lgr.info,
pbar_id,
'Start {} {}'.format(
op_props[0].lower(),
op_props[1].lower(),
),
label=op_props[0],
unit=' {}'.format(op_props[1]),
total=float(max_count) if max_count else None,
)
self._pbars.add(pbar_id)
# END begin opcode
if message is None:
message = ''
# END message handling
done_progress = False
message = message.strip()
if message.endswith(self.DONE_TOKEN):
op_code |= self.END
message = message[:-len(self.DONE_TOKEN)]
done_progress = True
# END end message handling
message = message.strip(self.TOKEN_SEPARATOR)
if cur_count and max_count:
log_progress(
lgr.info,
pbar_id,
line,
update=float(cur_count),
noninteractive_level=logging.DEBUG,
)
if done_progress:
log_progress(
lgr.info,
pbar_id,
'Finished {} {}'.format(
op_props[0].lower(),
op_props[1].lower(),
),
noninteractive_level=logging.DEBUG,
)
self._pbars.discard(pbar_id)
return True
class StdOutCaptureWithGitProgress(GitProgress):
proc_out = True
class FetchInfo(dict):
"""
dict that carries results of a fetch operation of a single head
Reduced variant of GitPython's RemoteProgress class
Original copyright:
Copyright (C) 2008, 2009 Michael Trier and contributors
Original license:
BSD 3-Clause "New" or "Revised" License
"""
NEW_TAG, NEW_HEAD, HEAD_UPTODATE, TAG_UPDATE, REJECTED, FORCED_UPDATE, \
FAST_FORWARD, ERROR = [1 << x for x in range(8)]
_re_fetch_result = re.compile(r'^\s*(.) (\[?[\w\s\.$@]+\]?)\s+(.+) [-> ]+ ([^\s]+)( \(.*\)?$)?')
_flag_map = {
'!': ERROR,
'+': FORCED_UPDATE,
'*': 0,
'=': HEAD_UPTODATE,
' ': FAST_FORWARD,
'-': TAG_UPDATE,
}
_operation_map = {
NEW_TAG: 'new-tag',
NEW_HEAD: 'new-branch',
HEAD_UPTODATE: 'uptodate',
TAG_UPDATE: 'tag-update',
REJECTED: 'rejected',
FORCED_UPDATE: 'forced-update',
FAST_FORWARD: 'fast-forward',
ERROR: 'error',
}
@classmethod
def _from_line(cls, line):
"""Parse information from the given line as returned by git-fetch -v
and return a new FetchInfo object representing this information.
"""
match = cls._re_fetch_result.match(line)
if match is None:
raise ValueError("Failed to parse line: %r" % line)
# parse lines
control_character, operation, local_remote_ref, remote_local_ref, note = \
match.groups()
# parse flags from control_character
flags = 0
try:
flags |= cls._flag_map[control_character]
except KeyError:
raise ValueError(
"Control character %r unknown as parsed from line %r"
% (control_character, line))
# END control char exception handling
# parse operation string for more info - makes no sense for symbolic refs,
# but we parse it anyway
old_commit = None
if 'rejected' in operation:
flags |= cls.REJECTED
if 'new tag' in operation:
flags |= cls.NEW_TAG
if 'tag update' in operation:
flags |= cls.TAG_UPDATE
if 'new branch' in operation:
flags |= cls.NEW_HEAD
if '...' in operation or '..' in operation:
split_token = '...'
if control_character == ' ':
split_token = split_token[:-1]
old_commit = operation.split(split_token)[0]
# END handle refspec
return cls(
ref=remote_local_ref.strip(),
local_ref=local_remote_ref.strip(),
# convert flag int into a list of operation labels
operations=[
cls._operation_map[o]
for o in cls._operation_map.keys()
if flags & o
],
note=note,
old_commit=old_commit,
)
class PushInfo(dict):
"""dict that carries results of a push operation of a single head
Reduced variant of GitPython's RemoteProgress class
Original copyright:
Copyright (C) 2008, 2009 Michael Trier and contributors
Original license:
BSD 3-Clause "New" or "Revised" License
"""
NEW_TAG, NEW_HEAD, NO_MATCH, REJECTED, REMOTE_REJECTED, REMOTE_FAILURE, DELETED, \
FORCED_UPDATE, FAST_FORWARD, UP_TO_DATE, ERROR = [1 << x for x in range(11)]
_flag_map = {'X': NO_MATCH,
'-': DELETED,
'*': 0,
'+': FORCED_UPDATE,
' ': FAST_FORWARD,
'=': UP_TO_DATE,
'!': ERROR}
_operation_map = {
NEW_TAG: 'new-tag',
NEW_HEAD: 'new-branch',
NO_MATCH: 'no-match',
REJECTED: 'rejected',
REMOTE_REJECTED: 'remote-rejected',
REMOTE_FAILURE: 'remote-failure',
DELETED: 'deleted',
FORCED_UPDATE: 'forced-update',
FAST_FORWARD: 'fast-forward',
UP_TO_DATE: 'uptodate',
ERROR: 'error',
}
@classmethod
def _from_line(cls, line):
"""Create a new PushInfo instance as parsed from line which is expected to be like
refs/heads/master:refs/heads/master 05d2687..1d0568e as bytes"""
control_character, from_to, summary = line.split('\t', 3)
flags = 0
# control character handling
try:
flags |= cls._flag_map[control_character]
except KeyError:
raise ValueError("Control character %r unknown as parsed from line %r" % (control_character, line))
# END handle control character
# from_to handling
from_ref_string, to_ref_string = from_to.split(':')
# commit handling, could be message or commit info
old_commit = None
if summary.startswith('['):
if "[rejected]" in summary:
flags |= cls.REJECTED
elif "[remote rejected]" in summary:
flags |= cls.REMOTE_REJECTED
elif "[remote failure]" in summary:
flags |= cls.REMOTE_FAILURE
elif "[no match]" in summary:
flags |= cls.ERROR
elif "[new tag]" in summary:
flags |= cls.NEW_TAG
elif "[new branch]" in summary:
flags |= cls.NEW_HEAD
# up-to-date encoded in control character
else:
# fast-forward or forced update - was encoded in control character,
# but we parse the old and new commit
split_token = "..."
if control_character == " ":
split_token = ".."
old_sha, _new_sha = summary.split(' ')[0].split(split_token)
# have to use constructor here as the sha usually is abbreviated
old_commit = old_sha
# END message handling
return cls(
from_ref=from_ref_string.strip(),
to_ref=to_ref_string.strip(),
# convert flag int into a list of operation labels
operations=[
cls._operation_map[o]
for o in cls._operation_map.keys()
if flags & o
],
note=summary.strip(),
old_commit=old_commit,
)
@path_based_str_repr
class GitRepo(CoreGitRepo):
"""Representation of a git repository
"""
# We must check git config to have name and email set, but
# should do it once
_config_checked = False
GIT_MIN_VERSION = "2.19.1"
git_version = None
@classmethod
def _check_git_version(cls):
external_versions.check("cmd:git", min_version=cls.GIT_MIN_VERSION)
cls.git_version = external_versions['cmd:git']
# This is the least common denominator to claim that a user
# used DataLad.
# Citing JOSS publication https://joss.theoj.org/papers/10.21105/joss.03262
@due.dcite(Doi("10.21105/joss.03262"),
# override path since there is no need ATM for such details
path="datalad",
description="DataLad - Data management and distribution platform")
def __init__(self, path, runner=None, create=True,
git_opts=None, repo=None, fake_dates=False,
create_sanity_checks=True,
**kwargs):
"""Creates representation of git repository at `path`.
Can also be used to create a git repository at `path`.
Parameters
----------
path: str
path to the git repository; In case it's not an absolute path,
it's relative to PWD
create: bool, optional
if true, creates a git repository at `path` if there is none. Also
creates `path`, if it doesn't exist.
If set to false, an exception is raised in case `path` doesn't exist
or doesn't contain a git repository.
repo: git.Repo, optional
This argument is ignored.
create_sanity_checks: bool, optional
Whether to perform sanity checks during initialization (when
`create=True` and target path is not a valid repo already), such as
that new repository is not created in the directory where git already
tracks some files.
kwargs:
keyword arguments serving as additional options to the git-init
command. Therefore, it makes sense only if called with `create`.
Generally, this way of passing options to the git executable is
(or will be) used a lot in this class. It's a transformation of
python-style keyword arguments (or a `dict`) to command line arguments,
provided by GitPython.
A single character keyword will be prefixed by '-', multiple characters
by '--'. An underscore in the keyword becomes a dash. The value of the
keyword argument is used as the value for the corresponding command
line argument. Assigning a boolean creates a flag.
Examples:
no_commit=True => --no-commit
C='/my/path' => -C /my/path
"""
# this will set up .pathobj and .dot_git
super().__init__(path)
if self.git_version is None:
self._check_git_version()
# BEGIN Repo validity test
# We want to fail early for tests, that would be performed a lot. In
# particular this is about GitRepo.is_valid_repo. We would use the
# latter to decide whether or not to call GitRepo() only for __init__ to
# then test the same things again. If we fail early we can save the
# additional test from outer scope.
self.path = path
# Note, that the following three path objects are used often and
# therefore are stored for performance. Path object creation comes with
# a cost. Most notably, this is used for validity checking of the
# repository.
_valid_repo = self.is_valid_git()
do_create = False
if create and not _valid_repo:
if repo is not None:
# `repo` passed with `create`, which doesn't make sense
raise TypeError("argument 'repo' must not be used with 'create'")
do_create = True
else:
# Note: We used to call gitpy.Repo(path) here, which potentially
# raised NoSuchPathError or InvalidGitRepositoryError. This is
# used by callers of GitRepo.__init__() to detect whether we have a
# valid repo at `path`. Now, with switching to lazy loading property
# `repo`, we detect those cases without instantiating a
# gitpy.Repo().
if not exists(path):
raise NoSuchPathError(path)
if not _valid_repo:
raise InvalidGitRepositoryError(path)
# END Repo validity test
# So that we "share" control paths with git/git-annex
if ssh_manager:
ssh_manager.ensure_initialized()
# note: we may also want to distinguish between a path to the worktree
# and the actual repository
if git_opts is None:
git_opts = {}
if kwargs:
git_opts.update(kwargs)
self._cfg = None
if do_create: # we figured it out earlier
from_cmdline = git_opts.pop('_from_cmdline_', [])
self.init(
sanity_checks=create_sanity_checks,
init_options=from_cmdline + to_options(**git_opts),
)
# with DryRunProtocol path might still not exist
if exists(self.path):
self.inode = os.stat(self.path).st_ino
else:
self.inode = None
if fake_dates:
self.configure_fake_dates()
@property
def bare(self):
"""Returns a bool indicating whether the repository is bare
Importantly, this is not reporting the configuration value
of 'core.bare', in order to be usable at a stage where a
Repo instance is not yet equipped with a ConfigManager.
Instead, it is testing whether the repository path and its
"dot_git" are identical. The value of 'core.bare' can be query
from the ConfigManager in a fully initialized instance.
"""
return self.pathobj == self.dot_git
@classmethod
def clone(cls, url, path, *args, clone_options=None, **kwargs):
"""Clone url into path
Provides workarounds for known issues (e.g.
https://github.com/datalad/datalad/issues/785)
Parameters
----------
url : str
path : str
clone_options : dict or list
Arbitrary options that will be passed on to the underlying call to
`git-clone`. This may be a list of plain options or key-value pairs
that will be converted to a list of plain options with `to_options`.
expect_fail : bool
Whether expect that command might fail, so error should be logged then
at DEBUG level instead of ERROR
kwargs:
Passed to the Repo class constructor.
"""
if 'repo' in kwargs:
raise TypeError("argument 'repo' conflicts with cloning")
# TODO: what about 'create'?
expect_fail = kwargs.pop('expect_fail', False)
# fail early on non-empty target:
from os import listdir
if exists(path) and listdir(path):
raise ValueError(
"destination path '%s' already exists and is not an "
"empty directory." % path)
else:
# protect against cloning into existing and obviously dangling
# instance for that location
try:
del cls._unique_instances[path]
except KeyError:
# didn't exist - all fine
pass
# Massage URL
url_ri = RI(url) if not isinstance(url, RI) else url
if not on_windows:
# if we are on windows, the local path of a URL
# would not end up being a proper local path and cloning
# would fail. Don't try to be smart and just pass the
# URL along unmodified
# try to get a local path from `url`:
try:
url = url_ri.localpath
url_ri = RI(url)
except ValueError:
pass
if is_ssh(url_ri):
ssh_manager.get_connection(url).open()
else:
if isinstance(url_ri, PathRI):
# expand user, because execution not going through a shell
# doesn't work well otherwise
new_url = os.path.expanduser(url)
if url != new_url:
lgr.info("Expanded source path to %s from %s", new_url, url)
url = new_url
cmd = cls._git_cmd_prefix + ['clone', '--progress']
if clone_options:
if isinstance(clone_options, Mapping):
clone_options = to_options(**clone_options)
cmd.extend(clone_options)
cmd.extend([url, path])
fix_annex = None
ntries = 5 # 3 is not enough for robust workaround
for trial in range(ntries):
try:
lgr.debug("Git clone from {0} to {1}".format(url, path))
res = GitWitlessRunner().run(cmd, protocol=GitProgress)
# fish out non-critical warnings by git-clone
# (empty repo clone, etc.), all other content is logged
# by the progress helper to 'debug'
for errline in res['stderr'].splitlines():
if errline.startswith('warning:'):
lgr.warning(errline[8:].strip())
lgr.debug("Git clone completed")
break
except CommandError as e:
# log here but let caller decide what to do
ce = CapturedException(e)
str_e = str(e)
# see https://github.com/datalad/datalad/issues/785
if re.search("Request for .*aborted.*Unable to find", str_e,
re.DOTALL) \
and trial < ntries - 1:
lgr.info(
"Hit a known issue with Git (see GH#785). Trial #%d, "
"retrying",
trial)
continue
#(lgr.debug if expect_fail else lgr.error)(e_str)
if "Clone succeeded, but checkout failed." in str_e:
fix_annex = ce
break
raise
# get ourselves a repository instance
gr = cls(path, *args, **kwargs)
if fix_annex:
# cheap check whether we deal with an AnnexRepo - we can't check the class of `gr` itself, since we then
# would need to import our own subclass
if hasattr(gr, 'is_valid_annex'):
lgr.warning("Experienced issues while cloning. "
"Trying to fix it, using git-annex-fsck.")
if not gr.is_initialized():
gr._init()
gr.fsck()
else:
lgr.warning("Experienced issues while cloning: %s", fix_annex)
return gr
# Note: __del__ shouldn't be needed anymore as we switched to
# `weakref.finalize`.
# https://docs.python.org/3/library/weakref.html#comparing-finalizers-with-del-methods
#
# Keeping both methods and this comment around as a reminder to not
# use __del__, if we figure there's a need for cleanup in the future.
#
# def __del__(self):
# # unbind possibly bound ConfigManager, to prevent all kinds of weird
# # stalls etc
# self._cfg = None
def is_valid_git(self):
"""Returns whether the underlying repository appears to be still valid
Note, that this almost identical to the classmethod is_valid_repo().
However, if we are testing an existing instance, we can save Path object
creations. Since this testing is done a lot, this is relevant. Creation
of the Path objects in is_valid_repo() takes nearly half the time of the
entire function.
Also note, that this method is bound to an instance but still
class-dependent, meaning that a subclass cannot simply overwrite it.
This is particularly important for the call from within __init__(),
which in turn is called by the subclasses' __init__. Using an overwrite
would lead to the wrong thing being called.
"""
return self.is_valid()
@classmethod
def is_valid_repo(cls, path):
"""Returns if a given path points to a git repository"""
return cls.is_valid(path)
@staticmethod
def get_git_dir(repo):
"""figure out a repo's gitdir
'.git' might be a directory, a symlink or a file
Note
----
This method is likely to get deprecated, please use GitRepo.dot_git instead!
That one's not static, but it's cheaper and you should avoid
not having an instance of a repo you're working on anyway.
Note, that the property in opposition to this method returns an absolute path.
Parameters
----------
repo: path or Repo instance
currently expected to be the repos base dir
Returns
-------
str
relative path to the repo's git dir; So, default would be ".git"
"""
if isinstance(repo, GitRepo):
return str(repo.dot_git)
pathobj = Path(repo)
dot_git = _get_dot_git(pathobj, ok_missing=False)
try:
dot_git = dot_git.relative_to(pathobj)
except ValueError:
# is not a subpath, return as is
lgr.debug("Path %r is not subpath of %r", dot_git, pathobj)
return str(dot_git)
@property
def config(self):
# just proxy the core repo APIs property for backward-compatibility
return self.cfg
def is_with_annex(self):
"""Report if GitRepo (assumed) has (remotes with) a git-annex branch
"""
return any(
b['refname:strip=2'] == 'git-annex' or b['refname:strip=2'].endswith('/git-annex')
for b in self.for_each_ref_(fields='refname:strip=2', pattern=['refs/heads', 'refs/remotes'])
)
@classmethod
def get_toppath(cls, path, follow_up=True, git_options=None):
"""Return top-level of a repository given the path.
Parameters
-----------
follow_up : bool
If path has symlinks -- they get resolved by git. If follow_up is
True, we will follow original path up until we hit the same resolved
path. If no such path found, resolved one would be returned.
git_options: list of str
options to be passed to the git rev-parse call
Return None if no parent directory contains a git repository.
"""
cmd = ['git']
if git_options:
cmd.extend(git_options)
cmd += ["rev-parse", "--show-toplevel"]
try:
out = GitWitlessRunner(cwd=path).run(
cmd, protocol=StdOutErrCapture)
toppath = out['stdout'].rstrip('\n\r')
except CommandError:
return None
except OSError:
toppath = GitRepo.get_toppath(dirname(path), follow_up=follow_up,
git_options=git_options)
# normalize the report, because, e.g. on windows it can come out
# with improper directory separators (C:/Users/datalad)
toppath = str(Path(toppath))
if follow_up:
path_ = path
path_prev = ""
while path_ and path_ != path_prev: # on top /.. = /
if str(Path(path_).resolve()) == toppath:
toppath = path_
break
path_prev = path_
path_ = dirname(path_)
return toppath
@normalize_paths
def add(self, files, git=True, git_options=None, update=False):
"""Adds file(s) to the repository.
Parameters
----------
files: list
list of paths to add
git: bool
somewhat ugly construction to be compatible with AnnexRepo.add();
has to be always true.
update: bool
--update option for git-add. From git's manpage:
Update the index just where it already has an entry matching
<pathspec>. This removes as well as modifies index entries to match
the working tree, but adds no new files.
If no <pathspec> is given when --update option is used, all tracked
files in the entire working tree are updated (old versions of Git
used to limit the update to the current directory and its
subdirectories).
Returns
-------
list
Of status dicts.
"""
# under all circumstances call this class' add_ (otherwise
# AnnexRepo.add would go into a loop
return list(GitRepo.add_(self, files, git=git, git_options=git_options,
update=update))
def add_(self, files, git=True, git_options=None, update=False):
"""Like `add`, but returns a generator"""
# TODO: git_options is used as options for the git-add here,
# instead of options to the git executable => rename for consistency
if not git:
lgr.warning(
'GitRepo.add() called with git=%s, this should not happen',
git)
git = True
# there is no other way then to collect all files into a list
# at this point, because we need to pass them at once to a single
# `git add` call
files = [_normalize_path(self.path, f) for f in ensure_list(files) if f]
if not (files or git_options or update):
# wondering why just a warning? in cmdline this is also not an error
lgr.warning("add was called with empty file list and no options.")
return
try:
# without --verbose git 2.9.3 add does not return anything
add_out = self._call_git(
# Set annex.gitaddtoannex to prevent storing files in
# annex with a v6+ annex repo.
['-c', 'annex.gitaddtoannex=false', 'add'] +
ensure_list(git_options) +
to_options(update=update) + ['--verbose'],
files=files,
read_only=False,
)
# get all the entries
for o in self._process_git_get_output(*add_out):
yield o
# Note: as opposed to git cmdline, force is True by default in
# gitpython, which would lead to add things, that are
# ignored or excluded otherwise
# 2. Note: There is an issue with globbing (like adding '.'),
# which apparently doesn't care for 'force' and therefore
# adds '.git/...'. May be it's expanded at the wrong
# point in time or sth. like that.
# For now, use direct call to git add.
#self.cmd_call_wrapper(self.repo.index.add, files, write=True,
# force=False)
# TODO: May be make use of 'fprogress'-option to indicate
# progress
# But then, we don't have it for git-annex add, anyway.
#
# TODO: Is write=True a reasonable way to do it?
# May be should not write until success of operation is
# confirmed?
# What's best in case of a list of files?
except OSError as e:
lgr.error("add: %s", e)
raise
# Make sure return value from GitRepo is consistent with AnnexRepo
# currently simulating similar return value, assuming success
# for all files:
# TODO: Make return values consistent across both *Repo classes!
return
@staticmethod
def _process_git_get_output(stdout, stderr=None):
"""Given both outputs (stderr is ignored atm) of git add - process it
Primarily to centralize handling in both indirect annex and direct
modes when ran through proxy
"""
return [{u'file': f, u'success': True}
for f in re.findall("'(.*)'[\n$]", ensure_unicode(stdout))]
@normalize_paths(match_return_type=False)
def remove(self, files, recursive=False, **kwargs):
"""Remove files.
Calls git-rm.
Parameters
----------
files: list of str
list of paths to remove
recursive: False
whether to allow recursive removal from subdirectories
kwargs:
see `__init__`
Returns
-------
[str]
list of successfully removed files.
"""
if recursive:
kwargs['r'] = True
# output per removed file is expected to be "rm 'PATH'":
return [
line.strip()[4:-1]
for line in self.call_git_items_(
['rm'] + to_options(**kwargs), files=files)
]
def precommit(self):
"""Perform pre-commit maintenance tasks
"""
# we used to clean up GitPython here
pass
@staticmethod
def _get_prefixed_commit_msg(msg):
DATALAD_PREFIX = "[DATALAD]"
return DATALAD_PREFIX if not msg else "%s %s" % (DATALAD_PREFIX, msg)
def configure_fake_dates(self):
"""Configure repository to use fake dates.
"""
lgr.debug("Enabling fake dates")
self.config.set("datalad.fake-dates", "true")
@property
def fake_dates_enabled(self):
"""Is the repository configured to use fake dates?
"""
# this turned into a private property of the CoreGitRepo
return self._fake_dates_enabled
def add_fake_dates(self, env):
# was renamed in CoreGitRepo
return self.add_fake_dates_to_env(env)
def commit(self, msg=None, options=None, _datalad_msg=False, careless=True,
files=None, date=None, index_file=None):
"""Commit changes to git.
Parameters
----------
msg: str, optional
commit-message
options: list of str, optional
cmdline options for git-commit
_datalad_msg: bool, optional
To signal that commit is automated commit by datalad, so
it would carry the [DATALAD] prefix
careless: bool, optional
if False, raise when there's nothing actually committed;
if True, don't care
files: list of str, optional
path(s) to commit
date: str, optional
Date in one of the formats git understands
index_file: str, optional
An alternative index to use
"""
self.precommit()
# assemble commandline
cmd = self._git_cmd_prefix + ['commit']
options = ensure_list(options)
if date:
options += ["--date", date]
orig_msg = msg
if not msg:
if '--amend' in options:
if '--no-edit' not in options:
# don't overwrite old commit message with our default
# message by default, but re-use old one. In other words:
# Make --no-edit the default:
options += ["--no-edit"]
else:
msg = 'Recorded changes'
_datalad_msg = True
if _datalad_msg:
msg = self._get_prefixed_commit_msg(msg)
if msg:
options += ["-m", msg]
cmd.extend(options)
# set up env for commit
env = self.add_fake_dates(None) \
if self.fake_dates_enabled else os.environ.copy()
if index_file:
env['GIT_INDEX_FILE'] = index_file
lgr.debug("Committing via direct call of git: %s", cmd)
file_chunks = generate_file_chunks(files, cmd) if files else [[]]
# store pre-commit state to be able to check if anything was committed
prev_sha = self.get_hexsha()
try:
for i, chunk in enumerate(file_chunks):
cur_cmd = cmd.copy()
# if this is an explicit dry-run, there is no point in
# amending, because no commit was ever made
# otherwise, amend the first commit, and prevent
# leaving multiple commits behind
if i > 0 and '--dry-run' not in cmd:
if '--amend' not in cmd:
cur_cmd.append('--amend')
if '--no-edit' not in cmd:
cur_cmd.append('--no-edit')
cur_cmd += ['--'] + chunk
self._git_runner.run(
cur_cmd,
protocol=StdOutErrCapture,
stdin=None,
env=env,
)
except CommandError as e:
# real errors first
if "did not match any file(s) known to git" in e.stderr:
raise FileNotInRepositoryError(
cmd=e.cmd,
msg="File(s) unknown to git",
code=e.code,
filename=linesep.join([
l for l in e.stderr.splitlines()
if l.startswith("error: pathspec")
])
)
# behavior choices now
elif not careless:
# not willing to compromise at all
raise
elif 'nothing to commit' in e.stdout:
lgr.debug("nothing to commit in %s. Ignored.", self)
elif 'no changes added to commit' in e.stdout or \
'nothing added to commit' in e.stdout:
lgr.debug("no changes added to commit in %s. Ignored.", self)
else:
raise
if orig_msg \
or '--dry-run' in cmd \
or prev_sha == self.get_hexsha() \
or ('--amend' in cmd and '--no-edit' in cmd) \
or (not is_interactive()) \
or self.config.obtain('datalad.save.no-message') != 'interactive':
# we had a message given, or nothing was committed, or prev. commit
# was amended, or we are not connected to a terminal, or no
# interactive message input is desired:
# we can go home
return
# handle interactive message entry by running another `git-commit`
self._git_runner.run(
cmd + ['--amend', '--edit'],
protocol=NoCapture,
stdin=None,
env=env,
)
# TODO usage is only in the tests, consider making a test helper and
# remove from GitRepo API
def get_indexed_files(self):
"""Get a list of files in git's index
Returns
-------
list
list of paths rooting in git's base dir
"""
return [
str(r.relative_to(self.pathobj))
for r in self.get_content_info(
paths=None, ref=None, untracked='no')
]
def format_commit(self, fmt, commitish=None):
"""Return `git show` output for `commitish`.
Parameters
----------
fmt : str
A format string accepted by `git show`.
commitish: str, optional
Any commit identifier (defaults to "HEAD").
Returns
-------
str or, if there are not commits yet, None.
"""
# use git-log and not git-show due to faster performance with
# complex commits (e.g. octopus merges)
# https://github.com/datalad/datalad/issues/4801
cmd = ['log', '-1', '-z', '--format=' + fmt]
if commitish is not None:
cmd.append(commitish + "^{commit}")
# make sure Git takes our argument as a revision
cmd.append('--')
try:
stdout = self.call_git(
cmd, expect_stderr=True, expect_fail=True,
read_only=True)
except CommandError as e:
if 'bad revision' in e.stderr:
raise ValueError("Unknown commit identifier: %s" % commitish)
elif 'does not have any commits yet' in e.stderr:
return None
else:
raise e
# This trailing null is coming from the -z above, which avoids the
# newline that Git would append to the output. We could drop -z and
# strip the newline directly, but then we'd have to worry about
# compatibility across platforms.
return stdout.rsplit("\0", 1)[0]
def get_hexsha(self, commitish=None, short=False):
"""Return a hexsha for a given commitish.
Parameters
----------
commitish : str, optional
Any identifier that refers to a commit (defaults to "HEAD").
short : bool, optional
Return the abbreviated form of the hexsha.
Returns
-------
str or, if no commitish was given and there are no commits yet, None.
Raises
------
ValueError
If a commitish was given, but no corresponding commit could be
determined.
"""
# use --quiet because the 'Needed a single revision' error message
# that is the result of running this in a repo with no commits
# isn't useful to report
cmd = ['rev-parse', '--quiet', '--verify', '{}^{{commit}}'.format(
commitish if commitish else 'HEAD')
]
if short:
cmd.append('--short')
try:
return self.call_git_oneline(cmd, read_only=True)
except CommandError as e:
if commitish is None:
return None
raise ValueError("Unknown commit identifier: %s" % commitish)
@normalize_paths(match_return_type=False)
def get_last_commit_hexsha(self, files):
"""Return the hash of the last commit the modified any of the given
paths"""
try:
commit = self.call_git(
['rev-list', '-n1', 'HEAD'],
files=files,
expect_fail=True,
read_only=True,
)
commit = commit.strip()
return commit if commit else None
except CommandError:
if self.get_hexsha() is None:
# unborn branch, don't freak out
return None
raise
def get_revisions(self, revrange=None, fmt="%H", options=None):
"""Return list of revisions in `revrange`.
Parameters
----------
revrange : str or list of str or None, optional
Revisions or revision ranges to walk. If None, revision defaults to
HEAD unless a revision-modifying option like `--all` or
`--branches` is included in `options`.
fmt : string, optional
Format accepted by `--format` option of `git log`. This should not
contain new lines because the output is split on new lines.
options : list of str, optional
Options to pass to `git log`. This should not include `--format`.
Returns
-------
List of revisions (str), formatted according to `fmt`.
"""
if revrange is None:
revrange = []
elif isinstance(revrange, str):
revrange = [revrange]
cmd = ["log", "--format={}".format(fmt)]
cmd.extend((options or []) + revrange + ["--"])
try:
stdout = self.call_git(cmd, expect_fail=True, read_only=True)
except CommandError as e:
if "does not have any commits" in e.stderr:
return []
raise
return stdout.splitlines()
def commit_exists(self, commitish):
"""Does `commitish` exist in the repo?
Parameters
----------
commitish : str
A commit or an object that can be dereferenced to one.
Returns
-------
bool
"""
# Note: The peeling operator "^{commit}" is required so that rev-parse
# doesn't succeed if passed a full hexsha that is valid but doesn't
# exist.
return self.call_git_success(
["rev-parse", "--verify", commitish + "^{commit}"],
read_only=True,
)
def get_merge_base(self, commitishes):
"""Get a merge base hexsha
Parameters
----------
commitishes: str or list of str
List of commitishes (branches, hexshas, etc) to determine the merge
base of. If a single value provided, returns merge_base with the
current branch.
Returns
-------
str or None
If no merge-base for given commits, or specified treeish doesn't
exist, None returned
"""
if isinstance(commitishes, str):
commitishes = [commitishes]
if not commitishes:
raise ValueError("Provide at least a single value")
elif len(commitishes) == 1:
commitishes = commitishes + [self.get_active_branch()]
try:
base = self.call_git_oneline(['merge-base'] + commitishes,
read_only=True)
except CommandError as exc:
if exc.code == 1 and not (exc.stdout or exc.stderr):
# No merge base was found (unrelated commits).
return None
if "fatal: Not a valid object name" in exc.stderr:
return None
raise
return base
def is_ancestor(self, reva, revb):
"""Is `reva` an ancestor of `revb`?
Parameters
----------
reva, revb : str
Revisions.
Returns
-------
bool
"""
return self.call_git_success(
["merge-base", "--is-ancestor", reva, revb],
read_only=True)
def get_commit_date(self, branch=None, date='authored'):
"""Get the date stamp of the last commit (in a branch or head otherwise)
Parameters
----------
date: {'authored', 'committed'}
Which date to return. "authored" will be the date shown by "git show"
and the one possibly specified via --date to `git commit`
Returns
-------
int or None
None if no commit
"""
if date == 'committed':
format = '%ct'
elif date == 'authored':
format = '%at'
else:
raise ValueError('unknow date type: {}'.format(date))
d = self.format_commit(format, commitish=branch)
return int(d) if d else None
def get_active_branch(self):
"""Get the name of the active branch
Returns
-------
str or None
Returns None if there is no active branch, i.e. detached HEAD,
and the branch name otherwise.
"""
try:
out = self.call_git(["symbolic-ref", "HEAD"], expect_fail=True,
read_only=True)
except CommandError as e:
if 'HEAD is not a symbolic ref' in e.stderr:
lgr.debug("detached HEAD in {0}".format(self))
return None
else:
raise e
return out.strip()[11:] # strip refs/heads/
def get_corresponding_branch(self, branch=None):
"""Always returns None, a plain GitRepo has no managed branches"""
return None
def get_branches(self):
"""Get all branches of the repo.
Returns
-------
[str]
Names of all branches of this repository.
"""
return [
b['refname:strip=2']
for b in self.for_each_ref_(fields='refname:strip=2', pattern='refs/heads')
]
def get_remote_branches(self):
"""Get all branches of all remotes of the repo.
Returns
-----------
[str]
Names of all remote branches.
"""
# TODO: Reconsider melting with get_branches()
# TODO: treat entries like this: origin/HEAD -> origin/master'
# currently this is done in collection
return [
b['refname:strip=2']
for b in self.for_each_ref_(fields='refname:strip=2', pattern='refs/remotes')
]
def get_remotes(self, with_urls_only=False):
"""Get known remotes of the repository
Parameters
----------
with_urls_only : bool, optional
return only remotes which have urls
Returns
-------
remotes : list of str
List of names of the remotes
"""
from datalad.utils import unique
self.config.reload()
remotes = unique([x[7:] for x in self.config.sections()
if x.startswith("remote.")])
if with_urls_only:
remotes = [
r for r in remotes
if self.config.get('remote.%s.url' % r)
]
return remotes
# TODO this is practically unused outside the tests, consider turning
# into a test helper and trim from the API
def get_files(self, branch=None):
"""Get a list of files in git.
Lists the files in the (remote) branch.
Parameters
----------
branch: str
Name of the branch to query. Default: active branch.
Returns
-------
[str]
list of files.
"""
return [
str(p.relative_to(self.pathobj))
for p in self.get_content_info(
paths=None, ref=branch, untracked='no')
]
def add_remote(self, name, url, options=None):
"""Register remote pointing to a url
"""
cmd = ['remote', 'add']
if options:
cmd += options
cmd += [name, url]
# for historical reasons this method returns stdout and
# stderr, keeping that for now
result = self._call_git(cmd)
self.config.reload()
return result
def remove_remote(self, name):
"""Remove existing remote
"""
# TODO: testing and error handling!
from .exceptions import RemoteNotAvailableError
try:
self.call_git(['remote', 'remove', name])
except CommandError as e:
if 'No such remote' in e.stderr:
raise RemoteNotAvailableError(name,
cmd="git remote remove",
msg="No such remote",
stdout=e.stdout,
stderr=e.stderr)
else:
raise e
# config.reload necessary, because the associated remote config
# will vanish
self.config.reload()
return
def _maybe_open_ssh_connection(self, remote, prefer_push=True):
"""Open connection if `remote` has an SSH URL.
Doing so enables SSH caching, preventing datalad-sshrun subprocesses
from opening (and then closing) their own.
Parameters
----------
remote : str
prefer_push : bool, optional
Use `remote.<remote>.pushurl` if there is one, falling back to
`remote.<remote>.url`.
"""
if remote:
url = None
if prefer_push:
url = self.get_remote_url(remote, push=True)
url = url or self.get_remote_url(remote)
if url and is_ssh(url):
ssh_manager.get_connection(url).open()
def update_remote(self, name=None, verbose=False):
"""
"""
options = ["-v"] if verbose else []
self._maybe_open_ssh_connection(name)
name = [name] if name else []
self.call_git(
['remote'] + name + ['update'] + options,
expect_stderr=True
)
def fetch(self, remote=None, refspec=None, all_=False, git_options=None,
**kwargs):
"""Fetches changes from a remote (or all remotes).
Parameters
----------
remote : str, optional
name of the remote to fetch from. If no remote is given and
`all_` is not set, the tracking branch is fetched.
refspec : str or list, optional
refspec(s) to fetch.
all_ : bool, optional
fetch all remotes (and all of their branches).
Fails if `remote` was given.
git_options : list, optional
Additional command line options for git-fetch.
kwargs :
Deprecated. GitPython-style keyword argument for git-fetch.
Will be appended to any git_options.
"""
git_options = ensure_list(git_options)
if kwargs:
git_options.extend(to_options(**kwargs))
return list(
self.fetch_(
remote=remote,
refspec=refspec,
all_=all_,
git_options=git_options,
)
)
def fetch_(self, remote=None, refspec=None, all_=False, git_options=None):
"""Like `fetch`, but returns a generator"""
yield from self._fetch_push_helper(
base_cmd=self._git_cmd_prefix + ['fetch', '--verbose', '--progress'],
action='fetch',
urlvars=('remote.{}.url', 'remote.{}.url'),
protocol=GitProgress,
info_cls=FetchInfo,
info_from='stderr',
add_remote=False,
remote=remote,
refspec=refspec,
all_=all_,
git_options=git_options)
def push(self, remote=None, refspec=None, all_remotes=False,
all_=False, git_options=None, **kwargs):
"""Push changes to a remote (or all remotes).
If remote and refspec are specified, and remote has
`remote.{remote}.datalad-push-default-first` configuration variable
set (e.g. by `create-sibling-github`), we will first push the first
refspec separately to possibly ensure that the first refspec is chosen
by remote as the "default branch".
See https://github.com/datalad/datalad/issues/4997
Upon successful push if this variable was set in the local git config,
we unset it, so subsequent pushes would proceed normally.
Parameters
----------
remote : str, optional
name of the remote to push to. If no remote is given and
`all_` is not set, the tracking branch is pushed.
refspec : str or list, optional
refspec(s) to push.
all_ : bool, optional
push to all remotes. Fails if `remote` was given.
git_options : list, optional
Additional command line options for git-push.
kwargs :
Deprecated. GitPython-style keyword argument for git-push.
Will be appended to any git_options.
"""
git_options = ensure_list(git_options)
if kwargs:
git_options.extend(to_options(**kwargs))
if all_remotes:
# be nice to the elderly
all_ = True
push_refspecs = [refspec]
cfg = self.config # shortcut
cfg_push_var = "remote.{}.datalad-push-default-first".format(remote)
if remote and refspec and cfg.obtain(cfg_push_var, default=False, valtype=bool):
refspec = ensure_list(refspec)
lgr.debug("As indicated by %s pushing first refspec %s separately first",
cfg_push_var, refspec[0])
push_refspecs = [[refspec[0]], refspec[1:]]
push_res = []
for refspecs in push_refspecs:
push_res.extend(
self.push_(
remote=remote,
refspec=refspecs,
all_=all_,
git_options=git_options,
)
)
# note: above push_ should raise exception if errors out
if '--dry-run' not in git_options \
and cfg.get_from_source('local', cfg_push_var) is not None:
lgr.debug("Removing %s variable from local git config after successful push", cfg_push_var)
cfg.unset(cfg_push_var, 'local')
return push_res
def push_(self, remote=None, refspec=None, all_=False, git_options=None):
"""Like `push`, but returns a generator"""
yield from self._fetch_push_helper(
base_cmd=self._git_cmd_prefix + ['push', '--progress', '--porcelain'],
action='push',
urlvars=('remote.{}.pushurl', 'remote.{}.url'),
protocol=StdOutCaptureWithGitProgress,
info_cls=PushInfo,
info_from='stdout',
add_remote=True,
remote=remote,
refspec=refspec,
all_=all_,
git_options=git_options)
def _fetch_push_helper(
self,
base_cmd, # arg list
action, # label fetch|push
urlvars, # variables to query for URLs
protocol, # processor for output
info_cls, # Push|FetchInfo
info_from, # stdout, stderr
add_remote, # whether to add a 'remote' field to the info dict
remote=None, refspec=None, all_=False, git_options=None):
git_options = ensure_list(git_options)
cmd = base_cmd + git_options
if remote is None:
if refspec:
# conflicts with using tracking branch or push all remotes
# For now: Just fail.
# TODO: May be check whether it fits to tracking branch
raise ValueError(
"refspec specified without a remote. ({})".format(refspec))
if all_:
remotes_to_process = self.get_remotes(with_urls_only=True)
else:
# No explicit remote to fetch.
# => get tracking branch:
tb_remote, refspec = self.get_tracking_branch()
if tb_remote is not None:
remotes_to_process = [tb_remote]
else:
# No remote, no tracking branch
# => fail
raise ValueError(
"Neither a remote is specified to {} "
"from nor a tracking branch is set up.".format(action))
else:
if all_:
raise ValueError(
"Option 'all_' conflicts with specified remote "
"'{}'.".format(remote))
remotes_to_process = [remote]
if refspec:
# prep for appending to cmd
refspec = ensure_list(refspec)
# no need for progress report, when there is just one remote
log_remote_progress = len(remotes_to_process) > 1
if log_remote_progress:
pbar_id = '{}remotes-{}'.format(action, id(self))
log_progress(
lgr.info,
pbar_id,
'Start %sing remotes for %s', action, self,
total=len(remotes_to_process),
label=action.capitalize(),
unit=' Remotes',
)
try:
for remote in remotes_to_process:
r_cmd = cmd + [remote]
if refspec:
r_cmd += refspec
if log_remote_progress:
log_progress(
lgr.info,
pbar_id,
'{}ing remote %s'.format(action.capitalize()),
remote,
update=1,
increment=True,
)
# best effort to enable SSH connection caching
url = self.config.get(
# make two attempts to get a URL
urlvars[0].format(remote),
self.config.get(
urlvars[1].format(remote),
None)
)
if url and is_ssh(url):
ssh_manager.get_connection(url).open()
try:
out = self._git_runner.run(
r_cmd,
protocol=protocol,
)
output = out[info_from] or ''
except CommandError as e:
output = None
# intercept some errors that we express as an error report
# in the info dicts
if re.match(
'.*^error: failed to (push|fetch) some refs',
e.stderr,
re.DOTALL | re.MULTILINE):
output = getattr(e, info_from)
hints = ' '.join([l[6:] for l in e.stderr.splitlines()
if l.startswith('hint: ')])
if output is None:
output = ''
if not output:
raise
for line in output.splitlines():
try:
# push info doesn't identify a remote, add it here
pi = info_cls._from_line(line)
if add_remote:
pi['remote'] = remote
# There were errors, but Git provided hints
if 'error' in pi['operations']:
pi['hints'] = hints or None
yield pi
except Exception:
# it is not progress and no push info
# don't hide it completely
lgr.debug('git-%s reported: %s', action, line)
finally:
if log_remote_progress:
log_progress(
lgr.info,
pbar_id,
'Finished %sing remotes for %s', action, self,
)
def get_remote_url(self, name, push=False):
"""Get the url of a remote.
Reads the configuration of remote `name` and returns its url or None,
if there is no url configured.
Parameters
----------
name: str
name of the remote
push: bool
if True, get the pushurl instead of the fetch url.
"""
var = 'remote.{0}.{1}'.format(name, 'pushurl' if push else 'url')
return self.config.get(var, None)
def set_remote_url(self, name, url, push=False):
"""Set the URL a remote is pointing to
Sets the URL of the remote `name`. Requires the remote to already exist.
Parameters
----------
name: str
name of the remote
url: str
push: bool
if True, set the push URL, otherwise the fetch URL
"""
var = 'remote.{0}.{1}'.format(name, 'pushurl' if push else 'url')
self.config.set(var, url, scope='local', reload=True)
def get_branch_commits_(self, branch=None, limit=None, stop=None):
"""Return commit hexshas for a branch
Parameters
----------
branch: str, optional
If not provided, assumes current branch
limit: None | 'left-only', optional
Limit which commits to report. If None -- all commits (merged or not),
if 'left-only' -- only the commits from the left side of the tree upon
merges
stop: str, optional
hexsha of the commit at which stop reporting (matched one is not
reported either)
Yields
------
str
"""
cmd = ['rev-list']
if limit == 'left-only':
cmd.append('--left-only')
if not branch:
branch = self.get_active_branch()
cmd.append(branch)
# and trailing -- marker to make sure that Git never confused the branch
# with a potentially existing directory of the same name
cmd.append('--')
for r in self.call_git_items_(cmd):
if stop and stop == r:
return
yield r
def checkout(self, name, options=None):
"""
"""
# TODO: May be check for the need of -b options herein?
cmd = ['checkout']
if options:
cmd += options
cmd += [str(name)]
self.call_git(cmd, expect_stderr=True)
# checkout can change committed config, or create branch config
self.config.reload()
# TODO: Before implementing annex merge, find usages and check for a needed
# change to call super().merge
def merge(self, name, options=None, msg=None, allow_unrelated=False, **kwargs):
if options is None:
options = []
if msg:
options = options + ["-m", msg]
options += ['--allow-unrelated-histories']
self.call_git(
['merge'] + options + [name],
**kwargs
)
def remove_branch(self, branch):
self.call_git(['branch', '-D', branch])
def cherry_pick(self, commit):
"""Cherry pick `commit` to the current branch.
Parameters
----------
commit : str
A single commit.
"""
self.call_git(["cherry-pick", commit])
@property
def dirty(self):
"""Is the repository dirty?
Note: This provides a quick answer when you simply want to know if
there are any untracked changes or modifications in this repository or
its submodules. For finer-grained control and more detailed reporting,
use status() instead.
"""
stdout = self.call_git(
["status", "--porcelain",
# Ensure the result isn't influenced by status.showUntrackedFiles.
"--untracked-files=normal",
# Ensure the result isn't influenced by diff.ignoreSubmodules.
"--ignore-submodules=none"])
if bool(stdout.strip()):
# The quick `git status`-based check can give a different answer
# than `datalad status` for submodules on an adjusted branch.
st = self.diffstatus(fr="HEAD" if self.get_hexsha() else None,
to=None, untracked="normal")
return any(r.get("state") != "clean" for r in st.values())
return False
@property
def untracked_files(self):
"""Legacy interface, do not use! Use the status() method instead.
Despite its name, it also reports on untracked datasets, and
yields their names with trailing path separators.
"""
return [
'{}{}'.format(
str(p.relative_to(self.pathobj)),
os.sep if props['type'] != 'file' else ''
)
for p, props in self.status(
untracked='all', eval_submodule_state='no').items()
if props.get('state', None) == 'untracked'
]
def gc(self, allow_background=False, auto=False):
"""Perform house keeping (garbage collection, repacking)"""
cmd_options = []
if not allow_background:
cmd_options += ['-c', 'gc.autodetach=0']
cmd_options += ['gc', '--aggressive']
if auto:
cmd_options += ['--auto']
self.call_git(cmd_options)
def _parse_gitmodules(self):
# TODO read .gitconfig from Git blob?
gitmodules = self.pathobj / '.gitmodules'
if not gitmodules.exists():
return {}
# pull out file content
out = self.call_git(
['config', '-z', '-l', '--file', '.gitmodules'],
read_only=True)
# abuse our config parser
# disable multi-value report, because we could not deal with them
# anyways, and they should not appear in a normal .gitmodules file
# but could easily appear when duplicates are included. In this case,
# we better not crash
db, _ = parse_gitconfig_dump(out, cwd=self.path, multi_value=False)
mods = {}
for k, v in db.items():
if not k.startswith('submodule.'):
# we don't know what this is
lgr.warning("Skip unrecognized .gitmodule specification: %s=%s", k, v)
continue
k_l = k.split('.')
# module name is everything after 'submodule.' that is not the variable
# name
mod_name = '.'.join(k_l[1:-1])
mod = mods.get(mod_name, {})
# variable name is the last 'dot-free' segment in the key
mod[k_l[-1]] = v
mods[mod_name] = mod
out = {}
# bring into traditional shape
for name, props in mods.items():
if 'path' not in props:
lgr.warning("Failed to get '%s.path', skipping this submodule", name)
continue
modprops = {'gitmodule_{}'.format(k): v
for k, v in props.items()
if not (k.startswith('__') or k == 'path')}
modpath = self.pathobj / PurePosixPath(props['path'])
modprops['gitmodule_name'] = name
out[modpath] = modprops
return out
def get_submodules_(self, paths=None):
"""Yield submodules in this repository.
Parameters
----------
paths : list(pathlib.PurePath), optional
Restrict submodules to those under `paths`.
Returns
-------
A generator that yields a dictionary with information for each
submodule.
"""
if not (self.pathobj / ".gitmodules").exists():
return
modinfo = self._parse_gitmodules()
for path, props in self.get_content_info(
paths=paths,
ref=None,
untracked='no').items():
if props.get('type', None) != 'dataset':
# make sure this method never talks about non-dataset
# content
continue
props["path"] = path
props.update(modinfo.get(path, {}))
yield props
def get_submodules(self, sorted_=True, paths=None):
"""Return list of submodules.
Parameters
----------
sorted_ : bool, optional
Sort submodules by path name.
paths : list(pathlib.PurePath), optional
Restrict submodules to those under `paths`.
Returns
-------
List of submodule namedtuples if `compat` is true or otherwise a list
of dictionaries as returned by `get_submodules_`.
"""
xs = self.get_submodules_(paths=paths)
if sorted_:
xs = sorted(xs, key=lambda x: x["path"])
return list(xs)
def update_ref(self, ref, value, oldvalue=None, symbolic=False):
"""Update the object name stored in a ref "safely".
Just a shim for `git update-ref` call if not symbolic, and
`git symbolic-ref` if symbolic
Parameters
----------
ref : str
Reference, such as `ref/heads/BRANCHNAME` or HEAD.
value : str
Value to update to, e.g. hexsha of a commit when updating for a
branch ref, or branch ref if updating HEAD
oldvalue: str
Value to update from. Safeguard to be verified by git. This is only
valid if `symbolic` is not True.
symbolic : None
To instruct if ref is symbolic, e.g. should be used in case of
ref=HEAD
"""
if symbolic:
if oldvalue:
raise ValueError("oldvalue and symbolic must not be given both")
cmd = ['symbolic-ref', ref, value]
else:
cmd = ['update-ref', ref, value] + ([oldvalue] if oldvalue else [])
self.call_git(cmd)
def tag(self, tag, message=None, commit=None, options=None):
"""Tag a commit
Parameters
----------
tag : str
Custom tag label. Must be a valid tag name.
message : str, optional
If provided, adds ['-m', <message>] to the list of `git tag`
arguments.
commit : str, optional
If provided, will be appended as last argument to the `git tag` call,
and can be used to identify the commit that shall be tagged, if
not HEAD.
options : list, optional
Additional command options, inserted prior a potential `commit`
argument.
"""
# TODO: call in save.py complains about extensive logging. When does it
# happen in what way? Figure out, whether to just silence it or raise or
# whatever else.
args = ['tag']
if message:
args += ['-m', message]
if options is not None:
args.extend(options)
args.append(tag)
if commit:
args.append(commit)
self.call_git(args)
def get_tags(self, output=None):
"""Get list of tags
Parameters
----------
output : str, optional
If given, limit the return value to a list of values matching that
particular key of the tag properties.
Returns
-------
list
Each item is a dictionary with information on a tag. At present
this includes 'hexsha', and 'name', where the latter is the string
label of the tag, and the former the hexsha of the object the tag
is attached to. The list is sorted by the creator date (committer
date for lightweight tags and tagger date for annotated tags), with
the most recent commit being the last element.
"""
tags = [
dict(
name=t['refname:strip=2'],
hexsha=t['object'] if t['object'] else t['objectname'],
)
for t in self.for_each_ref_(
fields=['refname:strip=2', 'objectname', 'object'],
pattern='refs/tags',
sort='creatordate')
]
if output:
return [t[output] for t in tags]
else:
return tags
def describe(self, commitish=None, **kwargs):
""" Quick and dirty implementation to call git-describe
Parameters
----------
kwargs:
transformed to cmdline options for git-describe;
see __init__ for description of the transformation
"""
# TODO: be more precise what failure to expect when and raise actual
# errors
cmd = ['describe'] + to_options(**kwargs)
if commitish is not None:
cmd.append(commitish)
try:
describe = self.call_git(cmd, expect_fail=True)
return describe.strip()
# TODO: WTF "catch everything"?
except:
return None
def get_tracking_branch(self, branch=None, remote_only=False):
"""Get the tracking branch for `branch` if there is any.
Parameters
----------
branch: str
local branch to look up. If none is given, active branch is used.
remote_only : bool
Don't return a value if the upstream remote is set to "." (meaning
this repository).
Returns
-------
tuple
(remote or None, refspec or None) of the tracking branch
"""
if branch is None:
branch = self.get_corresponding_branch() or self.get_active_branch()
if branch is None:
return None, None
track_remote = self.config.get('branch.{0}.remote'.format(branch), None)
if remote_only and track_remote == ".":
return None, None
track_branch = self.config.get('branch.{0}.merge'.format(branch), None)
return track_remote, track_branch
@property
def count_objects(self):
"""return dictionary with count, size(in KiB) information of git objects
"""
count_cmd = ['count-objects', '-v']
count_str = self.call_git(count_cmd)
count = {key: int(value)
for key, value in [item.split(': ')
for item in count_str.split('\n')
if len(item.split(': ')) == 2]}
return count
def get_git_attributes(self):
"""Query gitattributes which apply to top level directory
It is a thin compatibility/shortcut wrapper around more versatile
get_gitattributes which operates on a list of paths and returns
a dictionary per each path
Returns
-------
dict:
a dictionary with attribute name and value items relevant for the
top ('.') directory of the repository, and thus most likely the
default ones (if not overwritten with more rules) for all files within
repo.
"""
return self.get_gitattributes('.')['.']
def get_gitattributes(self, path, index_only=False):
"""Query gitattributes for one or more paths
Parameters
----------
path: path or list
Path(s) to query. Paths may be relative or absolute.
index_only: bool
Flag whether to consider only gitattribute setting that are reflected
in the repository index, not just in the work tree content.
Returns
-------
dict:
Each key is a queried path (always relative to the repository root),
each value is a dictionary with attribute
name and value items. Attribute values are either True or False,
for set and unset attributes, or are the literal attribute value.
"""
path = ensure_list(path)
cmd = ["check-attr", "-z", "--all"]
if index_only:
cmd.append('--cached')
# make sure we have one entry for each query path to
# simplify work with the result
attributes = {p: {} for p in path}
attr = []
for item in self.call_git_items_(cmd, files=path, sep='\0',
read_only=True):
attr.append(item)
if len(attr) < 3:
continue
# we have a full record
p, name, value = attr
attrs = attributes[p]
attrs[name] = \
True if value == 'set' else False if value == 'unset' else value
# done, reset item
attr = []
return {relpath(k, self.path) if isabs(k) else k: v
for k, v in attributes.items()}
def set_gitattributes(self, attrs, attrfile='.gitattributes', mode='a'):
"""Set gitattributes
By default appends additional lines to `attrfile`. Note, that later
lines in `attrfile` overrule earlier ones, which may or may not be
what you want. Set `mode` to 'w' to replace the entire file by
what you provided in `attrs`.
Parameters
----------
attrs : list
Each item is a 2-tuple, where the first element is a path pattern,
and the second element is a dictionary with attribute key/value
pairs. The attribute dictionary must use the same semantics as those
returned by `get_gitattributes()`. Path patterns can use absolute paths,
in which case they will be normalized relative to the directory
that contains the target .gitattributes file (see `attrfile`).
attrfile: path
Path relative to the repository root of the .gitattributes file the
attributes shall be set in.
mode: str
'a' to append .gitattributes, 'w' to replace it
"""
git_attributes_file = op.join(self.path, attrfile)
attrdir = op.dirname(git_attributes_file)
if not op.exists(attrdir):
os.makedirs(attrdir)
with open(git_attributes_file, mode + '+') as f:
# for append, fix existing files that do not end with \n
if mode == 'a' and f.tell():
f.seek(max(0, f.tell() - len(os.linesep)))
if not f.read().endswith('\n'):
f.write('\n')
for pattern, attr in sorted(attrs, key=lambda x: x[0]):
# normalize the pattern relative to the target .gitattributes file
npath = _normalize_path(
op.join(self.path, op.dirname(attrfile)), pattern)
# paths in gitattributes always have to be POSIX
npath = Path(npath).as_posix()
attrline = u''
if npath.count(' '):
# quote patterns with spaces
attrline += u'"{}"'.format(npath.replace('"', '\\"'))
else:
attrline += npath
for a in sorted(attr):
val = attr[a]
if val is True:
attrline += ' {}'.format(a)
elif val is False:
attrline += ' -{}'.format(a)
else:
attrline += ' {}={}'.format(a, val)
f.write('{}\n'.format(attrline))
def get_content_info(self, paths=None, ref=None, untracked='all'):
"""Get identifier and type information from repository content.
This is simplified front-end for `git ls-files/tree`.
Both commands differ in their behavior when queried about subdataset
paths. ls-files will not report anything, ls-tree will report on the
subdataset record. This function uniformly follows the behavior of
ls-tree (report on the respective subdataset mount).
Parameters
----------
paths : list(pathlib.PurePath) or None
Specific paths, relative to the resolved repository root, to query
info for. Paths must be normed to match the reporting done by Git,
i.e. no parent dir components (ala "some/../this").
If `None`, info is reported for all content.
ref : gitref or None
If given, content information is retrieved for this Git reference
(via ls-tree), otherwise content information is produced for the
present work tree (via ls-files). With a given reference, the
reported content properties also contain a 'bytesize' record,
stating the size of a file in bytes.
untracked : {'no', 'normal', 'all'}
If and how untracked content is reported when no `ref` was given:
'no': no untracked files are reported; 'normal': untracked files
and entire untracked directories are reported as such; 'all': report
individual files even in fully untracked directories.
Returns
-------
dict
Each content item has an entry under a pathlib `Path` object instance
pointing to its absolute path inside the repository (this path is
guaranteed to be underneath `Repo.path`).
Each value is a dictionary with properties:
`type`
Can be 'file', 'symlink', 'dataset', 'directory'
`gitshasum`
SHASUM of the item as tracked by Git, or None, if not
tracked. This could be different from the SHASUM of the file
in the worktree, if it was modified.
Raises
------
ValueError
In case of an invalid Git reference (e.g. 'HEAD' in an empty
repository)
"""
lgr.debug('%s.get_content_info(...)', self)
# TODO limit by file type to replace code in subdatasets command
info = OrderedDict()
if paths:
# path matching will happen against what Git reports
# and Git always reports POSIX paths
# any incoming path has to be relative already, so we can simply
# convert unconditionally
# note: will be list-ified below
paths = map(ut.PurePosixPath, paths)
elif paths is not None:
return info
path_strs = list(map(str, paths)) if paths else None
if path_strs and (not ref or external_versions["cmd:git"] >= "2.29.0"):
# If a path points within a submodule, we need to map it to the
# containing submodule before feeding it to ls-files or ls-tree.
#
# Before Git 2.29.0, ls-tree and ls-files differed in how they
# reported paths within submodules: ls-files provided no output,
# and ls-tree listed the submodule. Now they both return no output.
submodules = [str(s["path"].relative_to(self.pathobj))
for s in self.get_submodules_()]
path_strs = get_parent_paths(path_strs, submodules)
# this will not work in direct mode, but everything else should be
# just fine
if not ref:
# make sure no operations are pending before we figure things
# out in the worktree
self.precommit()
# --exclude-standard will make sure to honor and standard way
# git can be instructed to ignore content, and will prevent
# crap from contaminating untracked file reports
cmd = ['ls-files', '--stage', '-z']
# untracked report mode, using labels from `git diff` option style
if untracked == 'all':
cmd += ['--exclude-standard', '-o']
elif untracked == 'normal':
cmd += ['--exclude-standard', '-o', '--directory', '--no-empty-directory']
elif untracked == 'no':
pass
else:
raise ValueError(
'unknown value for `untracked`: {}'.format(untracked))
props_re = re.compile(
r'(?P<type>[0-9]+) (?P<sha>.*) (.*)\t(?P<fname>.*)$')
else:
cmd = ['ls-tree', ref, '-z', '-r', '--full-tree', '-l']
props_re = re.compile(
r'(?P<type>[0-9]+) ([a-z]*) (?P<sha>[^ ]*) [\s]*(?P<size>[0-9-]+)\t(?P<fname>.*)$')
lgr.debug('Query repo: %s', cmd)
try:
stdout = self.call_git(
cmd,
files=path_strs,
expect_fail=True,
read_only=True)
except CommandError as exc:
if "fatal: Not a valid object name" in exc.stderr:
raise InvalidGitReferenceError(ref)
raise
lgr.debug('Done query repo: %s', cmd)
self._get_content_info_line_helper(
ref,
info,
stdout.split('\0'),
props_re)
lgr.debug('Done %s.get_content_info(...)', self)
return info
def _get_content_info_line_helper(self, ref, info, lines, props_re):
"""Internal helper of get_content_info() to parse Git output"""
mode_type_map = {
'100644': 'file',
'100755': 'file',
'120000': 'symlink',
'160000': 'dataset',
}
for line in lines:
if not line:
continue
inf = {}
props = props_re.match(line)
if not props:
# Kludge: Filter out paths starting with .git/ to work around
# an `ls-files -o` bug that was fixed in Git 2.25.
#
# TODO: Drop this condition when GIT_MIN_VERSION is at least
# 2.25.
if line.startswith(".git/"):
lgr.debug("Filtering out .git/ file: %s", line)
continue
# not known to Git, but Git always reports POSIX
path = ut.PurePosixPath(line)
inf['gitshasum'] = None
else:
# again Git reports always in POSIX
path = ut.PurePosixPath(props.group('fname'))
# revisit the file props after this path has not been rejected
if props:
inf['gitshasum'] = props.group('sha')
inf['type'] = mode_type_map.get(
props.group('type'), props.group('type'))
if ref and inf['type'] == 'file':
inf['bytesize'] = int(props.group('size'))
# join item path with repo path to get a universally useful
# path representation with auto-conversion and tons of other
# stuff
path = self.pathobj.joinpath(path)
if 'type' not in inf:
# be nice and assign types for untracked content
inf['type'] = 'symlink' if path.is_symlink() \
else 'directory' if path.is_dir() else 'file'
info[path] = inf
def status(self, paths=None, untracked='all', eval_submodule_state='full'):
"""Simplified `git status` equivalent.
Parameters
----------
paths : list or None
If given, limits the query to the specified paths. To query all
paths specify `None`, not an empty list. If a query path points
into a subdataset, a report is made on the subdataset record
within the queried dataset only (no recursion).
untracked : {'no', 'normal', 'all'}
If and how untracked content is reported:
'no': no untracked files are reported; 'normal': untracked files
and entire untracked directories are reported as such; 'all': report
individual files even in fully untracked directories.
eval_submodule_state : {'full', 'commit', 'no'}
If 'full' (the default), the state of a submodule is evaluated by
considering all modifications, with the treatment of untracked files
determined by `untracked`. If 'commit', the modification check is
restricted to comparing the submodule's HEAD commit to the one
recorded in the superdataset. If 'no', the state of the subdataset is
not evaluated.
Returns
-------
dict
Each content item has an entry under a pathlib `Path` object instance
pointing to its absolute path inside the repository (this path is
guaranteed to be underneath `Repo.path`).
Each value is a dictionary with properties:
`type`
Can be 'file', 'symlink', 'dataset', 'directory'
`state`
Can be 'added', 'untracked', 'clean', 'deleted', 'modified'.
"""
lgr.debug('Query status of %r for %s paths',
self, len(paths) if paths else 'all')
return self.diffstatus(
fr='HEAD' if self.get_hexsha() else None,
to=None,
paths=paths,
untracked=untracked,
eval_submodule_state=eval_submodule_state)
def diff(self, fr, to, paths=None, untracked='all',
eval_submodule_state='full'):
"""Like status(), but reports changes between to arbitrary revisions
Parameters
----------
fr : str or None
Revision specification (anything that Git understands). Passing
`None` considers anything in the target state as new.
to : str or None
Revision specification (anything that Git understands), or None
to compare to the state of the work tree.
paths : list or None
If given, limits the query to the specified paths. To query all
paths specify `None`, not an empty list.
untracked : {'no', 'normal', 'all'}
If and how untracked content is reported when `to` is None:
'no': no untracked files are reported; 'normal': untracked files
and entire untracked directories are reported as such; 'all': report
individual files even in fully untracked directories.
eval_submodule_state : {'full', 'commit', 'no'}
If 'full' (the default), the state of a submodule is evaluated by
considering all modifications, with the treatment of untracked files
determined by `untracked`. If 'commit', the modification check is
restricted to comparing the submodule's HEAD commit to the one
recorded in the superdataset. If 'no', the state of the subdataset is
not evaluated.
Returns
-------
dict
Each content item has an entry under a pathlib `Path` object instance
pointing to its absolute path inside the repository (this path is
guaranteed to be underneath `Repo.path`).
Each value is a dictionary with properties:
`type`
Can be 'file', 'symlink', 'dataset', 'directory'
`state`
Can be 'added', 'untracked', 'clean', 'deleted', 'modified'.
"""
return {k: v for k, v in self.diffstatus(
fr=fr, to=to, paths=paths,
untracked=untracked,
eval_submodule_state=eval_submodule_state).items()
if v.get('state', None) != 'clean'}
def diffstatus(self, fr, to, paths=None, untracked='all',
eval_submodule_state='full', _cache=None):
"""Like diff(), but reports the status of 'clean' content too.
It supports an additional submodule evaluation state 'global'.
If given, it will return a single 'modified'
(vs. 'clean') state label for the entire repository, as soon as
it can.
"""
def _get_cache_key(label, paths, ref, untracked=None):
return self.path, label, tuple(paths) if paths else None, \
ref, untracked
if _cache is None:
_cache = {}
if paths:
# at this point we must normalize paths to the form that
# Git would report them, to easy matching later on
paths = map(ut.Path, paths)
paths = [
p.relative_to(self.pathobj) if p.is_absolute() else p
for p in paths
]
# TODO report more info from get_content_info() calls in return
# value, those are cheap and possibly useful to a consumer
# we need (at most) three calls to git
if to is None:
# everything we know about the worktree, including os.stat
# for each file
key = _get_cache_key('ci', paths, None, untracked)
if key in _cache:
to_state = _cache[key]
else:
to_state = self.get_content_info(
paths=paths, ref=None, untracked=untracked)
_cache[key] = to_state
# we want Git to tell us what it considers modified and avoid
# reimplementing logic ourselves
key = _get_cache_key('mod', paths, None)
if key in _cache:
modified = _cache[key]
else:
# from Git 2.31.0 onwards ls-files has --deduplicate
# by for backward compatibility keep doing deduplication here
modified = set(
self.pathobj.joinpath(ut.PurePosixPath(p))
for p in self.call_git_items_(
# we must also look for deleted files, for the logic
# below to work. Only from Git 2.31.0 would they be
# included with `-m` alone
['ls-files', '-z', '-m', '-d'],
# low-level code cannot handle pathobjs
files=[str(p) for p in paths] if paths else None,
sep='\0',
read_only=True)
if p)
_cache[key] = modified
else:
key = _get_cache_key('ci', paths, to)
if key in _cache:
to_state = _cache[key]
else:
to_state = self.get_content_info(paths=paths, ref=to)
_cache[key] = to_state
# we do not need worktree modification detection in this case
modified = None
# origin state
key = _get_cache_key('ci', paths, fr)
if key in _cache:
from_state = _cache[key]
else:
if fr:
from_state = self.get_content_info(paths=paths, ref=fr)
else:
# no ref means from nothing
from_state = {}
_cache[key] = from_state
status = OrderedDict()
for f, to_state_r in to_state.items():
props = self._diffstatus_get_state_props(
f,
from_state.get(f, None),
to_state_r,
# are we comparing against a recorded commit or the worktree
to is not None,
# if we have worktree modification info, report if
# path is reported as modified in it
modified and f in modified,
eval_submodule_state)
# potential early exit in "global" eval mode
if eval_submodule_state == 'global' and \
props.get('state', None) not in ('clean', None):
# any modification means globally 'modified'
return 'modified'
status[f] = props
for f, from_state_r in from_state.items():
if f not in to_state:
# we new this, but now it is gone and Git is not complaining
# about it being missing -> properly deleted and deletion
# stages
status[f] = dict(
state='deleted',
type=from_state_r['type'],
# report the shasum to distinguish from a plainly vanished
# file
gitshasum=from_state_r['gitshasum'],
)
if eval_submodule_state == 'global':
return 'modified'
if to is not None or eval_submodule_state == 'no':
# if we have `to` we are specifically comparing against
# a recorded state, and this function only attempts
# to label the state of a subdataset, not investigate
# specifically what the changes in subdatasets are
# this is done by a high-level command like rev-diff
# so the comparison within this repo and the present
# `state` label are all we need, and they are done already
if eval_submodule_state == 'global':
return 'clean'
else:
return status
# loop over all subdatasets and look for additional modifications
for f, st in status.items():
f = str(f)
if 'state' in st or not st['type'] == 'dataset':
# no business here
continue
if not GitRepo.is_valid_repo(f):
# submodule is not present, no chance for a conflict
st['state'] = 'clean'
continue
# we have to recurse into the dataset and get its status
subrepo = repo_from_path(f)
# get the HEAD commit, or the one of the corresponding branch
# only that one counts re super-sub relationship
# save() syncs the corresponding branch each time
subrepo_commit = subrepo.get_hexsha(subrepo.get_corresponding_branch())
st['gitshasum'] = subrepo_commit
# subdataset records must be labeled clean up to this point
# test if current commit in subdataset deviates from what is
# recorded in the dataset
st['state'] = 'modified' \
if st['prev_gitshasum'] != subrepo_commit \
else 'clean'
if eval_submodule_state == 'global' and st['state'] == 'modified':
return 'modified'
if eval_submodule_state == 'commit':
continue
# the recorded commit did not change, so we need to make
# a more expensive traversal
st['state'] = subrepo.diffstatus(
# we can use 'HEAD' because we know that the commit
# did not change. using 'HEAD' will facilitate
# caching the result
fr='HEAD',
to=None,
paths=None,
untracked=untracked,
eval_submodule_state='global',
_cache=_cache) if st['state'] == 'clean' else 'modified'
if eval_submodule_state == 'global' and st['state'] == 'modified':
return 'modified'
if eval_submodule_state == 'global':
return 'clean'
else:
return status
def _diffstatus_get_state_props(self, f, from_state, to_state,
against_commit,
modified_in_worktree,
eval_submodule_state):
"""Helper to determine diff properties for a single path
Parameters
----------
f : Path
from_state : dict
to_state : dict
against_commit : bool
Flag whether `to_state` reflects a commit or the worktree.
modified_in_worktree : bool
Flag whether a worktree modification is reported. This is ignored
when `against_commit` is True.
eval_submodule_state : {'commit', 'no', ...}
"""
if against_commit:
# we can ignore any worktree modification reported when
# comparing against a commit
modified_in_worktree = False
props = {}
if 'type' in to_state:
props['type'] = to_state['type']
to_sha = to_state['gitshasum']
from_sha = from_state['gitshasum'] if from_state else None
# determine the state of `f` from from_state and to_state records, if
# it can be determined conclusively from it. If not, it will
# stay None for now
state = None
if not from_state:
# this is new, or rather not known to the previous state
state = 'added' if to_sha else 'untracked'
elif to_sha == from_sha and not modified_in_worktree:
# something that is seemingly unmodified, based on the info
# gathered so far
if to_state['type'] == 'dataset':
if against_commit or eval_submodule_state == 'commit':
# we compare against a recorded state, just based on
# the shas we can be confident, otherwise the state
# of a subdataset isn't fully known yet, because
# `modified_in_worktree` will only reflect changes
# in the commit of a subdataset without looking into
# it for uncommitted changes. Such tests are done
# later and based on further conditionals for
# performance reasons
state = 'clean'
else:
# no change in git record, and no change on disk
# at this point we know that the reported object ids
# for this file are identical in the to and from
# records. If to is None, we're comparing to the
# working tree and a deleted file will still have an
# identical id, so we need to check whether the file is
# gone before declaring it clean. This working tree
# check is irrelevant and wrong if to is a ref.
state = 'clean' \
if against_commit or (f.exists() or f.is_symlink()) \
else 'deleted'
else:
# change in git record, or on disk
# for subdatasets leave the 'modified' judgement to the caller
# for supporting corner cases, such as adjusted branch
# which require inspection of a subdataset
# TODO we could have a new file that is already staged
# but had subsequent modifications done to it that are
# unstaged. Such file would presently show up as 'added'
# ATM I think this is OK, but worth stating...
state = ('modified'
if against_commit or to_state['type'] != 'dataset'
else None
) if f.exists() or f.is_symlink() else 'deleted'
# TODO record before and after state for diff-like use
# cases
if state in ('clean', 'added', 'modified', None):
# assign present gitsha to any record
# state==None can only happen for subdatasets that
# already existed, so also assign a sha for them
props['gitshasum'] = to_sha
if 'bytesize' in to_state:
# if we got this cheap, report it
props['bytesize'] = to_state['bytesize']
elif state == 'clean' and 'bytesize' in from_state:
# no change, we can take this old size info
props['bytesize'] = from_state['bytesize']
if state in ('clean', 'modified', 'deleted', None):
# assign previous gitsha to any record
# state==None can only happen for subdatasets that
# already existed, so also assign a sha for them
props['prev_gitshasum'] = from_sha
if state:
# only report a state if we could determine any
# outside code tests for existence of the property
# and not (always) for the value
props['state'] = state
return props
def _save_pre(self, paths, _status, **kwargs):
# helper to get an actionable status report
if paths is not None and not paths and not _status:
return
if _status is None:
if 'untracked' not in kwargs:
kwargs['untracked'] = 'normal'
status = self.status(
paths=paths,
**{k: kwargs[k] for k in kwargs
if k in ('untracked', 'eval_submodule_state')})
else:
# we want to be able to add items down the line
# make sure to detach from prev. owner
status = _status.copy()
return status
def get_staged_paths(self):
"""Returns a list of any stage repository path(s)
This is a rather fast call, as it will not depend on what is going on
in the worktree.
"""
try:
return list(self.call_git_items_(
['diff', '--name-only', '--staged'],
expect_stderr=True))
except CommandError as e:
lgr.debug(CapturedException(e))
return []
def _save_post(self, message, files, partial_commit, amend=False,
allow_empty=False):
# helper to commit changes reported in status
# TODO remove pathobj stringification when commit() can
# handle it
to_commit = [str(f.relative_to(self.pathobj))
for f in files] \
if partial_commit else None
if not partial_commit or to_commit or allow_empty or \
(amend and message):
# we directly call GitRepo.commit() to avoid a whole slew
# if direct-mode safeguards and workarounds in the AnnexRepo
# implementation (which also run an additional dry-run commit
GitRepo.commit(
self,
files=to_commit,
msg=message,
options=to_options(amend=amend, allow_empty=allow_empty),
# do not raise on empty commit
# it could be that the `add` in this save-cycle has already
# brought back a 'modified' file into a clean state
careless=True,
)
def save(self, message=None, paths=None, _status=None, **kwargs):
"""Save dataset content.
Parameters
----------
message : str or None
A message to accompany the changeset in the log. If None,
a default message is used.
paths : list or None
Any content with path matching any of the paths given in this
list will be saved. Matching will be performed against the
dataset status (GitRepo.status()), or a custom status provided
via `_status`. If no paths are provided, ALL non-clean paths
present in the repo status or `_status` will be saved.
_status : dict or None
If None, Repo.status() will be queried for the given `ds`. If
a dict is given, its content will be used as a constraint.
For example, to save only modified content, but no untracked
content, set `paths` to None and provide a `_status` that has
no entries for untracked content.
**kwargs :
Additional arguments that are passed to underlying Repo methods.
Supported:
- git : bool (passed to Repo.add()
- eval_submodule_state : {'full', 'commit', 'no'}
passed to Repo.status()
- untracked : {'no', 'normal', 'all'} - passed to Repo.status()
- amend : bool (passed to GitRepo.commit)
"""
return list(
self.save_(
message=message,
paths=paths,
_status=_status,
**kwargs
)
)
def save_(self, message=None, paths=None, _status=None, **kwargs):
"""Like `save()` but working as a generator."""
from datalad.interface.results import get_status_dict
status = self._save_pre(paths, _status, **kwargs) or {}
amend = kwargs.get('amend', False)
# Sort status into status by state with explicit list of states
# (excluding clean we do not care about) we expect to be present
# and which we know of (unless None), and modified_or_untracked hybrid
# since it is used below
status_state = {
k: {}
for k in (None, # not cared of explicitly here
'added', # not cared of explicitly here
# 'clean' # not even wanted since nothing to do about those
'deleted',
'modified',
'untracked',
'modified_or_untracked', # hybrid group created here
)}
for f, props in status.items():
state = props.get('state', None)
if state == 'clean':
# we don't care about clean
continue
status_state[state][f] = props
# The hybrid one to retain the same order as in original status
if state in ('modified', 'untracked'):
status_state['modified_or_untracked'][f] = props
del status # to ensure it is no longer used
# TODO: check on those None's -- may be those are also "nothing to worry about"
# and we could just return?
if not any(status_state.values()) and not (message and amend):
# all clean, nothing todo
lgr.debug('Nothing to save in %r, exiting early', self)
return
# three things are to be done:
# - remove (deleted if not already staged)
# - add (modified/untracked)
# - commit (with all paths that have been touched, to bypass
# potential pre-staged bits)
need_partial_commit = True if self.get_staged_paths() else False
# remove first, because removal of a subds would cause a
# modification of .gitmodules to be added to the todo list
to_remove = [
# TODO remove pathobj stringification when delete() can
# handle it
str(f.relative_to(self.pathobj))
for f, props in status_state['deleted'].items()
# staged deletions have a gitshasum reported for them
# those should not be processed as git rm will error
# due to them being properly gone already
if not props.get('gitshasum', None)]
vanished_subds = any(
props.get('type', None) == 'dataset'
for props in status_state['deleted'].values())
if to_remove:
for r in self.remove(
to_remove,
# we would always see individual files
recursive=False):
# TODO normalize result
yield get_status_dict(
action='delete',
refds=self.pathobj,
# TODO make remove() report the type
# for now it claims to report on files only
type='file',
path=(self.pathobj / ut.PurePosixPath(r)),
# make remove() report on failures too
status='ok',
logger=lgr)
# TODO this additional query should not be, based on status as given
# if anyhow possible, however, when paths are given, status may
# not contain all required information. In case of path=None AND
# _status=None, we should be able to avoid this, because
# status should have the full info already
# looks for contained repositories
submodule_change = False
untracked_dirs = [
f.relative_to(self.pathobj)
for f, props in status_state['untracked'].items()
if props.get('type', None) == 'directory']
to_add_submodules = []
if untracked_dirs:
to_add_submodules = [
sm for sm, sm_props in
self.get_content_info(
untracked_dirs,
ref=None,
# request exhaustive list, so that everything that is
# still reported as a directory must be its own repository
untracked='all').items()
if sm_props.get('type', None) == 'directory']
to_add_submodules = _prune_deeper_repos(to_add_submodules)
if to_add_submodules:
for r in self._save_add_submodules(to_add_submodules):
if r.get('status', None) == 'ok':
submodule_change = True
yield r
to_stage_submodules = {
f: props
for f, props in status_state['modified_or_untracked'].items()
if props.get('type', None) == 'dataset'}
if to_stage_submodules:
lgr.debug(
'%i submodule path(s) to stage in %r %s',
len(to_stage_submodules), self,
to_stage_submodules
if len(to_stage_submodules) < 10 else '')
for r in self._save_add_submodules(to_stage_submodules):
if r.get('status', None) == 'ok':
submodule_change = True
yield r
if submodule_change or vanished_subds:
# the config has changed too
self.config.reload()
# need to include .gitmodules in what needs saving
f = self.pathobj.joinpath('.gitmodules')
status_state['modified_or_untracked'][f] = \
status_state['modified'][f] = \
dict(type='file', state='modified')
if hasattr(self, 'uuid') and not kwargs.get('git', False):
# we cannot simply hook into the coming add-call
# as this would go to annex, so make a dedicted git-add
# call to ensure .gitmodules is not annexed
# in any normal DataLad dataset .gitattributes will
# prevent this, but in a plain repo it won't
# https://github.com/datalad/datalad/issues/3306
for r in GitRepo._save_add(
self,
{op.join(self.path, '.gitmodules'): None}):
yield r
to_add = {
# TODO remove pathobj stringification when add() can
# handle it
str(f.relative_to(self.pathobj)): props
for f, props in status_state['modified_or_untracked'].items()
if not (f in to_add_submodules or f in to_stage_submodules)}
if to_add:
compat_config = \
self.config.obtain("datalad.save.windows-compat-warning")
to_add, problems = self._check_for_win_compat(to_add, compat_config)
lgr.debug(
'%i path(s) to add to %s %s',
len(to_add), self, to_add if len(to_add) < 10 else '')
if to_add:
yield from self._save_add(
to_add,
git_opts=None,
**{k: kwargs[k] for k in kwargs
if k in (('git',) if hasattr(self, 'uuid')
else tuple())})
if problems:
from datalad.interface.results import get_status_dict
msg = \
'Incompatible name for Windows systems; disable with ' \
'datalad.save.windows-compat-warning.',
for path in problems:
yield get_status_dict(
action='save',
refds=self.pathobj,
type='file',
path=(self.pathobj / ut.PurePosixPath(path)),
status='impossible',
message=msg,
logger=lgr)
# https://github.com/datalad/datalad/issues/6558
# file could have become a directory. Unfortunately git
# would then mistakenly refuse to commit if that old path is also
# given to commit, so we better filter it out
if status_state['deleted'] and status_state['added']:
# check if any "deleted" is a directory now. Then for those
# there should be some other path under that directory in 'added'
for f in [_ for _ in status_state['deleted'] if _.is_dir()]:
# this could potentially be expensive if lots of files become
# directories, but it is unlikely to happen often
# Note: PurePath.is_relative_to was added in 3.9 and seems slowish
# path_is_subpath faster, also if comparing to "in f.parents"
f_str = str(f)
if any(path_is_subpath(str(f2), f_str) for f2 in status_state['added']):
status_state['deleted'].pop(f) # do not bother giving it to commit below in _save_post
# Note, that allow_empty is always ok when we amend. Required when we
# amend an empty commit while the amendment is empty, too (though
# possibly different message). If an empty commit was okay before, it's
# okay now.
status_state.pop('modified_or_untracked') # pop the hybrid state
self._save_post(message, chain(*status_state.values()), need_partial_commit, amend=amend,
allow_empty=amend)
# TODO yield result for commit, prev helper checked hexsha pre
# and post...
def _check_for_win_compat(self, files, config):
"""Check file names for illegal characters or reserved names on Windows
In the case that a non-Windows-compatible file is detected, warn users
about potential interoperability issues.
Parameters
----------
files
list of files to add
config
value of self.config.obtain("datalad.save.windows-compat-warning"),
used to choose appropriate behavior. "none" performs no check,
"warning" warns in case of incompatibilities, and "error" results in
an error result in case of incompatibilities
"""
# don't perform any check when the configuration is set to 'none'
if config == 'none':
return files, None
from collections import defaultdict
problems = defaultdict(list)
for file in files:
for part in Path(file).parts:
# check every component of the path for incompatibilities
if Path(part).stem.upper() in RESERVED_NAMES_WIN:
problems['Elements using a reserved filename:'].append(part)
problems['paths'].append(file)
if re.search(ILLEGAL_CHARS_WIN, part):
problems['Elements with illegal characters:'].append(part)
problems['paths'].append(file)
if part.endswith('.'):
problems['Elements ending with a dot:'].append(part)
problems['paths'].append(file)
if part.endswith(' '):
problems['Elements ending with a space:'].append(part)
problems['paths'].append(file)
if not problems:
return files, None
msg = \
"Some elements of your dataset are not compatible with " \
"Windows systems. Disable this check by changing " \
"datalad.save.windows-compat-warning or consider renaming " \
"the following elements: "
for k, v in problems.items():
# use the key as an explanation, and report filenames only once
msg += f"\n{k} {[*{*v}]}" if k != 'paths' else ''
if config == 'warning':
lgr.warning(msg)
return files, None
elif config == 'error':
# take the problematic files out of to_add
for path in [*{*problems['paths']}]:
files.pop(path)
return files, [*{*problems['paths']}]
def _save_add(self, files, git_opts=None):
"""Simple helper to add files in save()"""
from datalad.interface.results import get_status_dict
try:
# without --verbose git 2.9.3 add does not return anything
add_out = self._call_git(
# Set annex.largefiles to prevent storing files in
# annex with a v6+ annex repo.
['-c', 'annex.largefiles=nothing', 'add'] +
ensure_list(git_opts) + ['--verbose'],
files=list(files.keys()),
)
# get all the entries
for r in self._process_git_get_output(*add_out):
yield get_status_dict(
action=r.get('command', 'add'),
refds=self.pathobj,
type='file',
path=(self.pathobj / ut.PurePosixPath(r['file']))
if 'file' in r else None,
status='ok' if r.get('success', None) else 'error',
key=r.get('key', None),
# while there is no git-annex underneath here, we
# tend to fake its behavior, so we can also support
# this type of messaging
message='\n'.join(r['error-messages'])
if 'error-messages' in r else None,
logger=lgr)
except OSError as e:
lgr.error("add: %s", e)
raise
def _save_add_submodules(self, paths):
"""Add new submodules, or updates records of existing ones
This method does not use `git submodule add`, but aims to be more
efficient by limiting the scope to mere in-place registration of
multiple already present repositories.
Parameters
----------
paths : list(Path)
Yields
------
dict
Result records
"""
from datalad.interface.results import get_status_dict
# first gather info from all datasets in read-only fashion, and then
# update index, .gitmodules and .git/config at once
info = []
for path in paths:
rpath = str(path.relative_to(self.pathobj).as_posix())
subm = repo_from_path(path)
# if there is a corresponding branch, we want to record it's state.
# we rely on the corresponding branch being synced already.
# `save` should do that each time it runs.
subm_commit = subm.get_hexsha(subm.get_corresponding_branch())
if not subm_commit:
yield get_status_dict(
action='add_submodule',
ds=self,
path=path,
status='error',
message=('cannot add subdataset %s with no commits', subm),
logger=lgr)
continue
# make an attempt to configure a submodule source URL based on the
# discovered remote configuration
remote, branch = subm.get_tracking_branch()
url = subm.get_remote_url(remote) if remote else None
if url is None:
url = './{}'.format(rpath)
subm_id = subm.config.get('datalad.dataset.id', None)
info.append(
dict(
# if we have additional information on this path, pass it on.
# if not, treat it as an untracked directory
paths[path] if isinstance(paths, dict)
else dict(type='directory', state='untracked'),
path=path, rpath=rpath, commit=subm_commit, id=subm_id,
url=url))
# bypass any convenience or safe-manipulator for speed reasons
# use case: saving many new subdatasets in a single run
with (self.pathobj / '.gitmodules').open('a') as gmf, \
(self.pathobj / '.git' / 'config').open('a') as gcf:
for i in info:
# we update the subproject commit unconditionally
self.call_git([
'update-index', '--add', '--replace', '--cacheinfo', '160000',
i['commit'], i['rpath']
])
# only write the .gitmodules/.config changes when this is not yet
# a subdataset
# TODO: we could update the URL, and branch info at this point,
# even for previously registered subdatasets
if i['type'] != 'dataset':
gmprops = dict(path=i['rpath'], url=i['url'])
if i['id']:
gmprops['datalad-id'] = i['id']
write_config_section(
gmf, 'submodule', i['rpath'], gmprops)
write_config_section(
gcf, 'submodule', i['rpath'], dict(active='true', url=i['url']))
# This mirrors the result structure yielded for
# to_stage_submodules below.
yield get_status_dict(
action='add',
refds=self.pathobj,
# should become type='dataset'
# https://github.com/datalad/datalad/pull/4793#discussion_r464515331
type='file',
key=None,
path=i['path'],
status='ok',
logger=lgr)
# used in in the get command and GitRepo.add_submodule(), the
# latter is not used outside the tests
def _fixup_submodule_dotgit_setup(ds, relativepath):
"""Implementation of our current of .git in a subdataset
Each subdataset/module has its own .git directory where a standalone
repository would have it. No gitdir files, no symlinks.
"""
# move .git to superrepo's .git/modules, remove .git, create
# .git-file
path = opj(ds.path, relativepath)
subds_dotgit = opj(path, ".git")
repo = GitRepo(path, create=False)
if repo.dot_git.parent == repo.pathobj:
# this is what we want
return
# first we want to remove any conflicting worktree setup
# done by git to find the checkout at the mountpoint of the
# submodule, if we keep that, any git command will fail
# after we move .git
# Ben: Shouldn't we re-setup a possible worktree afterwards?
repo.config.unset('core.worktree', scope='local')
# what we have here is some kind of reference, remove and
# replace by the target
os.remove(subds_dotgit)
# make absolute
src_dotgit = str(repo.dot_git)
# move .git
from os import (
listdir,
rename,
rmdir,
)
ensure_dir(subds_dotgit)
for dot_git_entry in listdir(src_dotgit):
rename(opj(src_dotgit, dot_git_entry),
opj(subds_dotgit, dot_git_entry))
assert not listdir(src_dotgit)
rmdir(src_dotgit)
# try retro-fitting GitRepo with deprecated functionality
# must be done last in this file
try:
from datalad_deprecated.gitrepo import DeprecatedGitRepoMethods
for symbol in dir(DeprecatedGitRepoMethods):
if symbol.startswith('__'):
# ignore Python internals
continue
if hasattr(GitRepo, symbol):
lgr.debug(
'Not retro-fitted GitRepo with deprecated %s, '
'name-space conflict', symbol)
# do not override existing symbols
continue
# assign deprecated symbol to GitRepo
setattr(GitRepo, symbol, getattr(DeprecatedGitRepoMethods, symbol))
lgr.debug('Retro-fitted GitRepo with deprecated %s', symbol)
except ImportError as e:
ce = CapturedException(e)
lgr.debug(
'Not retro-fitting GitRepo with deprecated symbols, '
'datalad-deprecated package not found')
| 38.543972 | 116 | 0.55683 |
3fe2b4508dcedca262e3bdf54a7f9fd2c3c5580f | 39,043 | py | Python | QGraphViz/QGraphViz.py | Phrongorre/QGraphViz | 41a06e79d97699036b3e09b17055b9d7d9d4f8fa | [
"MIT"
] | null | null | null | QGraphViz/QGraphViz.py | Phrongorre/QGraphViz | 41a06e79d97699036b3e09b17055b9d7d9d4f8fa | [
"MIT"
] | null | null | null | QGraphViz/QGraphViz.py | Phrongorre/QGraphViz | 41a06e79d97699036b3e09b17055b9d7d9d4f8fa | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Author: Saifeddine ALOUI
Description:
Main Class to QGraphViz tool
"""
from PyQt5.QtWidgets import QApplication, QWidget, QScrollArea, QSizePolicy
from PyQt5.QtGui import QPainter, QPen, QBrush, QColor, QPainterPath, QImage, QLinearGradient
from PyQt5.QtCore import Qt, QRect, QRectF
import os
import sys
import enum
import datetime
from QGraphViz.DotParser import DotParser, Node, Edge, Graph, GraphType
import math
class QGraphVizManipulationMode(enum.Enum):
Static=0
Nodes_Move_Mode=1
Edges_Connect_Mode=2
Node_remove_Mode=3
Edge_remove_Mode=4
Subgraph_remove_Mode=5
class QGraphViz_Core(QWidget):
"""
Main graphviz widget to draw and interact with graphs
"""
def __init__(
self,
parent=None,
engine=None,
auto_freeze=False,
show_subgraphs = True,
manipulation_mode=QGraphVizManipulationMode.Nodes_Move_Mode,
# Callbacks
new_edge_beingAdded_callback=None, # A callback called when a new connection is being added (should return True or False to accept or not the edge, as well as return the edge parameters)
new_edge_created_callback=None, # A callbakc called when a new connection is created between two nodes using the GUI
node_selected_callback=None, # A callback called when a node is clicked
edge_selected_callback=None, # A callback called when an edge is clicked
node_invoked_callback=None, # A callback called when a node is double clicked
edge_invoked_callback=None, # A callback called when an edge is double clicked
node_removed_callback=None, # A callback called when a node is removed
edge_removed_callback=None, # A callback called when an edge is removed
# Custom options
min_cursor_edge_dist=3,
hilight_Nodes=False,
hilight_Edges=False
):
"""
QGraphViz widget Constructor
:param parent: A QWidget parent of the QGraphViz widget
:param engine: The graph processing engine (exemple Dot engine)
:param show_subgraphs: Tells whether to show the content of subgraphs or not
:param manipulation_mode: Sets the current graph manipulations mode
:param new_edge_beingAdded_callback: A callback issued when a new edge is being added. This callback should return a boolean to accept or refuse adding the edge.
:param new_edge_created_callback: A callback issued when a new edge is added.
:param node_selected_callback: A callback issued when a node is selected.
:param edge_selected_callback: A callback issued when an edge is selected.
:param node_removed_callback: A callback issued when an node is removed.
:param edge_removed_callback: A callback issued when an edge is removed.
:param min_cursor_edge_dist: Minimal distance between sursor edge.
:param hilight_Nodes: If True, whenever mouse is hovered on a node, it is hilighted.
:param hilight_Edges: If True, whenever mouse is hovered on an edge, it is hilighted.
"""
QWidget.__init__(self,parent)
self.setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
# Set core stuff
self.parser = DotParser()
self.engine=engine
# Set autofreeze status
self.auto_freeze = auto_freeze
# Pfrepare lists
self.qnodes=[]
self.qedges=[]
# Nodes manipulation
self.manipulation_mode = manipulation_mode
self.selected_Node = None
self.hovered_Node = None
self.hovered_Edge = None
self.hovered_Edge_id = None
self.current_pos = [0,0]
self.mouse_down=False
self.min_cursor_edge_dist=min_cursor_edge_dist
self.show_subgraphs=show_subgraphs
# Set callbacks
self.new_edge_beingAdded_callback = new_edge_beingAdded_callback
self.new_edge_created_callback = new_edge_created_callback
self.node_selected_callback = node_selected_callback
self.edge_selected_callback = edge_selected_callback
self.node_invoked_callback = node_invoked_callback
self.edge_invoked_callback = edge_invoked_callback
self.node_removed_callback=node_removed_callback
self.edge_removed_callback=edge_removed_callback
self.hilight_Nodes=hilight_Nodes
self.hilight_Edges=hilight_Edges
self.setAutoFillBackground(True)
self.setAttribute(Qt.WA_StyledBackground, True)
self.setMouseTracking(True)
# =================== Exposed methods =======================
def build(self):
self.engine.build()
self.updateSize()
def freeze(self):
"""
freezes the graph and saves the current nodes positions
to the node parameters. When loading from JSON, the previous
position will be reloaded
"""
for node in self.engine.graph.nodes:
node.kwargs["pos"]=node.pos
node.kwargs["size"]=node.size
def unfreeze(self):
"""
This removes the effect of the freeze function
If called, the nodes position can be recomputed in the future
"""
for node in self.engine.graph.nodes:
if("pos" in node.kwargs):
del node.kwargs["pos"]
del node.kwargs["size"]
def new(self, engine):
"""
Creates a new engine
:param engine: An engine object (for example a Dot engine)
"""
self.engine=engine
def addNode(self, graph, node_name, **kwargs):
"""
Adds a node to a graph or subgraph
"""
node = Node(node_name, graph, **kwargs)
graph.nodes.append(node)
return node
def addEdge(self, source, dest, kwargs):
"""
Connects two nodes from the same subgraph or
from two different subgraphs
If source and dest nodes belong to the same
Subgraph, the connection added to the subgraph
if the connection is between different subgraph notes
the connection is added to the main subgraph
"""
edge = Edge(source, dest)
edge.kwargs=kwargs
if(source.parent_graph == dest.parent_graph):
source.parent_graph.edges.append(edge)
else:
self.engine.graph.edges.append(edge)
return edge
def addSubgraph(self, parent_graph, subgraph_name, subgraph_type= GraphType.SimpleGraph, **kwargs):
subgraph = Graph(subgraph_name,subgraph_type, parent_graph, **kwargs)
subgraph.name = subgraph_name
subgraph.parent_graph = parent_graph
parent_graph.nodes.append(subgraph)
return subgraph
def removeNode(self, node):
graph = node.parent_graph
if(node in graph.nodes):
idx = graph.nodes.index(node)
node = graph.nodes[idx]
if(self.node_removed_callback is not None):
self.node_removed_callback(node)
for edge in node.in_edges:
del edge.source.out_edges[edge.source.out_edges.index(edge)]
if edge.source.parent_graph == edge.dest.parent_graph:
del edge.source.parent_graph.edges[edge.source.parent_graph.edges.index(edge)]
else:
del self.engine.graph.edges[self.engine.graph.edges.index(edge)]
for edge in node.out_edges:
del edge.source.out_edges[edge.source.out_edges.index(edge)]
if edge.source.parent_graph == edge.dest.parent_graph:
del edge.source.parent_graph.edges[edge.source.parent_graph.edges.index(edge)]
else:
del self.engine.graph.edges[self.engine.graph.edges.index(edge)]
del graph.nodes[idx]
self.repaint()
def removeSubgraph(self, subgraph):
graph = subgraph.parent_graph
if(subgraph in graph.subgraphs):
idx = graph.subgraphs.index(subgraph)
subgraph = graph.subgraphs[idx]
if(self.node_removed_callback is not None):
self.node_removed_callback(subgraph)
del graph.subgraphs[idx]
self.repaint()
def removeEdge(self, edge):
if(edge in self.engine.graph.edges):
source = edge.source
dest = edge.dest
if(self.edge_removed_callback is not None):
self.edge_removed_callback(edge)
idx = source.out_edges.index(edge)
del source.out_edges[idx]
idx = dest.in_edges.index(edge)
del dest.in_edges[idx]
if edge.source.parent_graph == edge.dest.parent_graph:
del edge.source.parent_graph.edges[edge.source.parent_graph.edges.index(edge)]
else:
del self.engine.graph.edges[self.engine.graph.edges.index(edge)]
self.repaint()
def load_file(self, filename):
self.engine.graph = self.parser.parseFile(filename)
self.engine.current_path = filename
self.build()
self.update()
def loadAJson(self, filename):
self.engine.graph = self.parser.fromJSON(filename)
self.engine.current_path = filename
self.build()
self.update()
def save(self, filename):
#BUGFIX : unhilight node before saving
if(self.hovered_Node is not None):
self.hovered_Node.kwargs["width"] = self.hovered_Node_Back_width
self.hovered_Node = None
self.parser.save(filename, self.engine.graph)
self.engine.current_path=filename
def saveAsJson(self, filename):
self.parser.toJSON(filename, self.engine.graph)
self.engine.current_path=filename
# ================== Helper methods ==================
def findSubNode(self, graph, x, y):
for node in graph.nodes:
gpos=node.global_pos
if(
type(node)==Graph and
gpos[0]-node.size[0]/2<x and gpos[0]+node.size[0]/2>x and
gpos[1]-node.size[1]/2<y and gpos[1]+node.size[1]/2>y
):
return node
return None
def isNodeHovered(self, n, x, y):
gpos=n.global_pos
if(
gpos[0]-n.size[0]/2<x and gpos[0]+n.size[0]/2>x and
gpos[1]-n.size[1]/2<y and gpos[1]+n.size[1]/2>y
):
return True
else:
return False
def isEdgeHovered(self, graph, i, e, x, y):
nb_next=0
for j in range(i, len(graph.edges)):
if(graph.edges[j].source==e.source and graph.edges[j].dest==e.dest):
nb_next+=1
offset=[0,0]
if(nb_next%2==1):
offset[0]=20*(nb_next/2)
else:
offset[0]=-20*(nb_next/2)
sx=e.source.pos[0] if e.source.pos[0]< e.dest.pos[0] else e.dest.pos[0]
sy=e.source.pos[1] if e.source.pos[1]< e.dest.pos[1] else e.dest.pos[1]
ex=e.source.pos[0] if e.source.pos[0]> e.dest.pos[0] else e.dest.pos[0]
ey=e.source.pos[1] if e.source.pos[1]> e.dest.pos[1] else e.dest.pos[1]
sx += +offset[0]
ex += +offset[0]
if(x>sx-self.min_cursor_edge_dist and x<ex+self.min_cursor_edge_dist and
y>sy-self.min_cursor_edge_dist and y<ey+self.min_cursor_edge_dist):
x2 = x-sx
y2 = y-sy
dx = (ex-sx)
dy = (ey-sy)
if(dx == 0):
if(abs(x2)<self.min_cursor_edge_dist):
return True
elif(dy == 0):
if(abs(y2)<self.min_cursor_edge_dist):
return True
else:
a = -dy/dx
if(abs(a*x2+y2)/math.sqrt(a**2)<self.min_cursor_edge_dist):
return True
return False
def findNode(self, graph, x, y):
for n in graph.nodes:
if(self.isNodeHovered(n, x, y)):
return n
return None
def findEdge(self, graph, x, y):
for i,e in enumerate(graph.edges):
if(self.isEdgeHovered(graph, i, e, x, y)):
return e,i
return None,0
def getRect_Size(self):
return self.engine.graph.getRect()
# ================== Mouse events section ===========================
def mouseDoubleClickEvent(self, event):
x = event.x()
y = event.y()
n = self.findNode(self.engine.graph, x,y)
if n is not None:
if(self.node_invoked_callback is not None):
self.node_invoked_callback(n)
else:
e,_ = self.findEdge(self.engine.graph, x, y)
if e is not None:
if(self.edge_invoked_callback is not None):
self.edge_invoked_callback(e)
QWidget.mouseDoubleClickEvent(self, event)
self.leaveEvent()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
x = event.x()
y = event.y()
self.current_pos = [x,y]
self.mouse_down=True
n = self.findNode(self.engine.graph, x, y)
self.selected_Node = n
if(n is None):
n = self.findSubNode(self.engine.graph, x,y)
self.selected_Node = n
QWidget.mousePressEvent(self, event)
def leaveEvent(self, event=None):
"""
Used to reset some parameters when the mouse leaves the QWidget
"""
self.selected_Node=None
self.mouse_down=False
if(self.hovered_Node is not None):
self.hovered_Node.kwargs["width"] = self.hovered_Node_Back_width
self.hovered_Node = None
if(self.hovered_Edge is not None):
self.hovered_Edge.kwargs["width"] = self.hovered_Edge_Back_width
self.hovered_Edge = None
self.update()
if(event!=None):
event.accept()
def mouseMoveEvent(self, event):
if self.selected_Node is not None and self.mouse_down:
x = event.x()
y = event.y()
if(self.manipulation_mode==QGraphVizManipulationMode.Nodes_Move_Mode):
self.selected_Node.pos[0] += x-self.current_pos[0]
self.selected_Node.pos[1] += y-self.current_pos[1]
self.current_pos = [x,y]
self.repaint()
else:
x = event.x()
y = event.y()
if(self.hilight_Nodes):
if(self.hovered_Node is None):
self.hovered_Node = self.findNode(self.engine.graph, x, y)
if(self.hovered_Node is not None):
if "width" in list(self.hovered_Node.kwargs.keys()):
self.hovered_Node_Back_width=self.hovered_Node.kwargs["width"]
else:
self.hovered_Node_Back_width=1
self.hovered_Node.kwargs["width"] = self.hovered_Node_Back_width+3
self.update()
else:
if not(self.isNodeHovered(self.hovered_Node, x, y)):
self.hovered_Node.kwargs["width"] = self.hovered_Node_Back_width
self.hovered_Node = None
self.update()
if(self.hilight_Edges):
if(self.hovered_Edge is None):
self.hovered_Edge, self.hovered_Edge_id = self.findEdge(self.engine.graph, x, y)
if(self.hovered_Edge is not None):
if "width" in list(self.hovered_Edge.kwargs.keys()):
self.hovered_Edge_Back_width=self.hovered_Edge.kwargs["width"]
else:
self.hovered_Edge_Back_width=1
self.hovered_Edge.kwargs["width"] = self.hovered_Edge_Back_width+3
self.update()
else:
if not(self.isEdgeHovered(self.engine.graph, self.hovered_Edge_id, self.hovered_Edge, x, y)):
self.hovered_Edge.kwargs["width"] = self.hovered_Edge_Back_width
self.hovered_Edge = None
self.update()
QWidget.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
x = event.x()
y = event.y()
n = self.findNode(self.engine.graph, x, y)
if n is None:
s = self.findSubNode(self.engine.graph, x,y)
if n is None:
e, _ = self.findEdge(self.engine.graph, x,y)
else:
e = None
# Manipulating nodes
if(self.manipulation_mode==QGraphVizManipulationMode.Nodes_Move_Mode):
if self.selected_Node is not None and self.mouse_down:
selected_Node = self.selected_Node
s = self.findSubNode(self.engine.graph, x,y)
if(s is not None and s!=selected_Node):
if(type(selected_Node)==Node):
del selected_Node.parent_graph.nodes[selected_Node.parent_graph.nodes.index(selected_Node)]
s.nodes.append(selected_Node)
selected_Node.parent_graph = s
if(type(selected_Node)==Graph):
del selected_Node.parent_graph.nodes[selected_Node.parent_graph.nodes.index(selected_Node)]
s.nodes.append(selected_Node)
selected_Node.parent_graph = s
if(self.auto_freeze):
self.freeze()
if(self.auto_freeze):
self.freeze()
self.build()
self.repaint()
# Connecting edges
if(self.manipulation_mode==QGraphVizManipulationMode.Edges_Connect_Mode):
if self.selected_Node is not None and self.mouse_down:
selected_Node = self.selected_Node
d = n if n is not None else s
if(d!=selected_Node and d is not None):
add_the_edge=True
if(self.new_edge_beingAdded_callback is not None):
add_the_edge, kwargs=self.new_edge_beingAdded_callback(selected_Node, d)
else:
kwargs={}
if add_the_edge:
edge = self.addEdge(selected_Node, d, kwargs)
if(add_the_edge):
if(self.new_edge_created_callback is not None):
self.new_edge_created_callback(edge)
self.build()
self.selected_Node=None
# Removing node
elif(self.manipulation_mode==QGraphVizManipulationMode.Node_remove_Mode):
if(n is not None):
self.removeNode(n)
self.build()
self.repaint()
#Removing edge
elif(self.manipulation_mode==QGraphVizManipulationMode.Edge_remove_Mode):
if(e is not None):
self.removeEdge(e)
self.build()
self.repaint()
# Remiving Subgraph
elif(self.manipulation_mode==QGraphVizManipulationMode.Subgraph_remove_Mode):
if(s is not None):
self.removeSubgraph(s)
self.build()
self.repaint()
# Inform application
if(n is not None):
if(self.node_selected_callback is not None):
self.node_selected_callback(n)
if( e is not None):
if(self.edge_selected_callback is not None):
self.edge_selected_callback(e)
self.updateSize()
QWidget.mouseReleaseEvent(self, event)
self.mouse_down=False
self.repaint()
# ============= Painting section ===============
def paintSubgraph(self, subgraph, painter, pen, brush):
if("color" in subgraph.kwargs.keys()):
pen.setColor(QColor(subgraph.kwargs["color"]))
else:
pen.setColor(QColor("black"))
if("fillcolor" in subgraph.kwargs.keys()):
if(":" in subgraph.kwargs["fillcolor"]):
gradient=QLinearGradient(subgraph.pos[0]-subgraph.size[0]/2, subgraph.pos[1], subgraph.pos[0]+subgraph.size[0]/2, subgraph.pos[1])
c=subgraph.kwargs["fillcolor"].split(":")
for i, col in enumerate(c):
stop = i/(len(c)-1)
gradient.setColorAt(stop, QColor(col))
brush = QBrush(gradient)
else:
brush=QBrush(QColor(subgraph.kwargs["fillcolor"]))
else:
brush=QBrush(QColor("white"))
if("width" in subgraph.kwargs.keys()):
pen.setWidth(int(subgraph.kwargs["width"]))
else:
pen.setWidth(1)
painter.setPen(pen)
painter.setBrush(brush)
gpos = subgraph.global_pos
painter.drawRect(
gpos[0]-subgraph.size[0]/2,
gpos[1]-subgraph.size[1]/2,
subgraph.size[0], subgraph.size[1])
if("label" in subgraph.kwargs.keys()):
painter.drawText(
gpos[0]-subgraph.size[0]/2,
gpos[1]-subgraph.size[1]/2,
subgraph.size[0], subgraph.size[1],
Qt.AlignCenter|Qt.AlignTop,subgraph.kwargs["label"])
def paintGraph(self, graph, painter):
brush = QBrush(Qt.SolidPattern)
pen=QPen()
brush.setColor(Qt.white)
for i,edge in enumerate(graph.edges):
if("color" in edge.kwargs.keys()):
pen.setColor(QColor(edge.kwargs["color"]))
else:
pen.setColor(QColor("black"))
if("width" in edge.kwargs.keys()):
pen.setWidth(int(edge.kwargs["width"]))
else:
pen.setWidth(1)
painter.setPen(pen)
painter.setBrush(brush)
if(edge.source.parent_graph !=graph and not self.show_subgraphs):
gspos = edge.source.parent_graph.global_pos
else:
gspos = edge.source.global_pos
if(edge.dest.parent_graph !=graph and not self.show_subgraphs):
gspos = edge.dest.parent_graph.global_pos
else:
gdpos = edge.dest.global_pos
nb_next=0
for j in range(i, len(graph.edges)):
if(graph.edges[j].source==edge.source and graph.edges[j].dest==edge.dest):
nb_next+=1
offset=[0,0]
if(nb_next%2==1):
offset[0]=20*(nb_next/2)
else:
offset[0]=-20*(nb_next/2)
path = QPainterPath()
path.moveTo(gspos[0],gspos[1])
path.cubicTo(gspos[0],gspos[1],offset[0]+(gspos[0]+gdpos[0])/2,(gspos[1]+gdpos[1])/2,gdpos[0],gdpos[1])
painter.strokePath(path, pen)
"""
painter.drawLine(gspos[0],gspos[1],
gdpos[0],
gdpos[1])
"""
if(self.show_subgraphs):
for node in graph.nodes:
if type(node)==Graph:
subgraph = node
self.paintSubgraph(subgraph, painter, pen, brush)
# TODO : add more painting parameters
for node in graph.nodes:
if type(node)!=Graph:
if("color" in node.kwargs.keys()):
pen.setColor(QColor(node.kwargs["color"]))
else:
pen.setColor(QColor("black"))
if("fillcolor" in node.kwargs.keys()):
if(":" in node.kwargs["fillcolor"]):
gradient=QLinearGradient(node.pos[0]-node.size[0]/2, node.pos[1], node.pos[0]+node.size[0]/2, node.pos[1])
c=node.kwargs["fillcolor"].split(":")
for i, col in enumerate(c):
stop = i/(len(c)-1)
gradient.setColorAt(stop, QColor(col))
brush = QBrush(gradient)
else:
brush=QBrush(QColor(node.kwargs["fillcolor"]))
else:
brush=QBrush(QColor("white"))
if("width" in node.kwargs.keys()):
pen.setWidth(int(node.kwargs["width"]))
else:
pen.setWidth(1)
gpos = node.global_pos
painter.setPen(pen)
painter.setBrush(brush)
if("shape" in node.kwargs.keys()):
if(node.kwargs["shape"]=="box"):
painter.drawRect(
gpos[0]-node.size[0]/2,
gpos[1]-node.size[1]/2,
node.size[0], node.size[1])
elif(node.kwargs["shape"]=="circle"):
painter.drawEllipse(
gpos[0]-node.size[0]/2,
gpos[1]-node.size[1]/2,
node.size[0], node.size[1])
elif(node.kwargs["shape"]=="triangle"):
rect = QRect(gpos[0]-node.size[0]/2, gpos[1]-2*node.size[1]/3, node.size[0], node.size[1])
path = QPainterPath()
path.moveTo(rect.left() + (rect.width() / 2), rect.top())
path.lineTo(rect.bottomLeft())
path.lineTo(rect.bottomRight())
path.lineTo(rect.left() + (rect.width() / 2), rect.top())
painter.fillPath(path, brush)
painter.drawPath(path)
elif(node.kwargs["shape"]=="polygon"):
rect = QRect(gpos[0]-node.size[0]/2, gpos[1]-node.size[1]/2, node.size[0], node.size[1])
path = QPainterPath()
path.moveTo(rect.left() + (rect.width() / 4), rect.top())
path.lineTo(rect.left() + 3*rect.width()/4, rect.top())
path.lineTo(rect.left() + rect.width(), rect.top() + rect.height()/2)
path.lineTo(rect.left() + 3*rect.width()/4, rect.top() + rect.height())
path.lineTo(rect.left() + rect.width()/4, rect.top() + rect.height())
path.lineTo(rect.left(), rect.top() + rect.height()/2)
path.lineTo(rect.left() + (rect.width() / 4), rect.top())
painter.fillPath(path, brush)
painter.drawPath(path)
elif(node.kwargs["shape"]=="diamond"):
rect = QRect(gpos[0]-node.size[0]/2, gpos[1]-node.size[1]/2, node.size[0], node.size[1])
path = QPainterPath()
path.moveTo(rect.left() + (rect.width() / 2), rect.top())
path.lineTo(rect.left() + rect.width(), rect.top() + rect.height()/2)
path.lineTo(rect.left() + rect.width()/2, rect.top() + rect.height())
path.lineTo(rect.left(), rect.top() + rect.height()/2)
path.lineTo(rect.left() + (rect.width() / 2), rect.top())
painter.fillPath(path, brush)
painter.drawPath(path)
else: # assuming this is an image
# this parameter can be either direct image path
# or a relative path (relative to the file path)
# It can contain the format path,width,height
# or simple path in which case the image file size will be used
image = None
width = 0
height = 0
if("," in node.kwargs["shape"]): # if there is a , in the shape, the first part is the path, then width, then height
img_params = node.kwargs["shape"].split(",")
if len(img_params)==3:# img:width:height
img_path = img_params[0]
width = int(img_params[1])
height = int(img_params[2])
img_path2 = os.path.join(os.path.dirname(self.engine.current_path),img_path)
if(os.path.isfile(img_path)):
image = QImage(img_path)
elif(os.path.isfile(img_path2)):
image = QImage(img_path2)
else:
img_path = node.kwargs["shape"]
img_path2 = os.path.join(os.path.dirname(self.engine.current_path),img_path)
if(os.path.isfile(img_path)):
image = QImage(img_path)
width = image.size().width()
height = image.size().height()
elif(os.path.isfile(img_path2)):
image = QImage(img_path2)
width = image.size().width()
height = image.size().height()
if width==0:
width=100
if height==0:
height=100
if image is not None:
node.size[0] = width if width>node.size[0] else node.size[0]
node.size[1] = height if height>node.size[1] else node.size[1]
painter.drawImage(
QRect(
gpos[0]-node.size[0]/2,
gpos[1]-node.size[1]/2,
node.size[0],
node.size[1]),
image)
else:
painter.drawEllipse(
gpos[0]-node.size[0]/2,
gpos[1]-node.size[1]/2,
node.size[0], node.size[1])
if("label" in node.kwargs.keys()):
txt = node.kwargs["label"].split("\n")
width = 0
height = 0
for t in txt:
if(t==""):
t="A"
rect = self.engine.fm.boundingRect(t)
width=rect.width() if rect.width()>width else width
height+=rect.height()
width+=self.engine.margins[0]
height+self.engine.margins[1]
painter.drawText(
gpos[0]-width/2,
gpos[1]-height/2,
width, height,
Qt.AlignCenter|Qt.AlignTop,node.kwargs["label"])
else:
if(self.show_subgraphs):
self.paintGraph(subgraph, painter)
else:
subgraph = node
self.paintSubgraph(subgraph, painter, pen, brush)
def paintEvent(self, event):
painter = QPainter(self)
painter.setFont(self.engine.font)
self.paintGraph(self.engine.graph,painter)
if( self.manipulation_mode==QGraphVizManipulationMode.Edges_Connect_Mode and
self.mouse_down and
self.selected_Node is not None):
bkp = painter.pen()
pen=QPen(Qt.DashLine)
painter.setPen(pen)
painter.drawLine(self.selected_Node.pos[0], self.selected_Node.pos[1],
self.current_pos[0],self.current_pos[1])
painter.setPen(bkp)
painter.end()
def updateSize(self):
x,y,w,h = self.getRect_Size()
w=x+w
h=y+h
self.setMinimumWidth(w)
self.setMinimumHeight(h)
if(self.parent is not None):
if(self.minimumWidth()<self.parent().width()):
self.setMinimumWidth(self.parent().width())
if(self.minimumHeight()<self.parent().height()):
self.setMinimumHeight(self.parent().height())
class QGraphViz(QScrollArea):
def __init__(
self,
parent=None,
engine=None,
auto_freeze=False,
show_subgraphs = True,
manipulation_mode=QGraphVizManipulationMode.Nodes_Move_Mode,
# Callbacks
new_edge_beingAdded_callback=None, # A callback called when a new connection is being added (should return True or False to accept or not the edge, as well as return the edge parameters)
new_edge_created_callback=None, # A callbakc called when a new connection is created between two nodes using the GUI
node_selected_callback=None, # A callback called when a node is clicked
edge_selected_callback=None, # A callback called when an edge is clicked
node_invoked_callback=None, # A callback called when a node is double clicked
edge_invoked_callback=None, # A callback called when an edge is double clicked
node_removed_callback=None, # A callback called when a node is removed
edge_removed_callback=None, # A callback called when an edge is removed
# Custom options
min_cursor_edge_dist=3,
hilight_Nodes=False,
hilight_Edges=False
):
QScrollArea.__init__(self, parent)
self.core = QGraphViz_Core(
parent=parent,
engine=engine,
auto_freeze=auto_freeze,
show_subgraphs = show_subgraphs,
manipulation_mode=manipulation_mode,
# Callbacks
new_edge_beingAdded_callback=new_edge_beingAdded_callback, # A callback called when a new connection is being added (should return True or False to accept or not the edge, as well as return the edge parameters)
new_edge_created_callback=new_edge_created_callback, # A callbakc called when a new connection is created between two nodes using the GUI
node_selected_callback=node_selected_callback, # A callback called when a node is clicked
edge_selected_callback=edge_selected_callback, # A callback called when an edge is clicked
node_invoked_callback=node_invoked_callback, # A callback called when a node is double clicked
edge_invoked_callback=edge_invoked_callback, # A callback called when an edge is double clicked
node_removed_callback=node_removed_callback, # A callback called when a node is removed
edge_removed_callback=edge_removed_callback, # A callback called when an edge is removed
# Custom options
min_cursor_edge_dist=min_cursor_edge_dist,
hilight_Nodes=hilight_Nodes,
hilight_Edges=hilight_Edges
)
self.setWidgetResizable(True)
self.setWidget(self.core)
# ======== core forward properties ============
@property
def engine(self):
return self.core.engine
@engine.setter
def engine(self, engine):
self.core.engine=engine
@property
def manipulation_mode(self):
return self.core.manipulation_mode
@manipulation_mode.setter
def manipulation_mode(self, manipulation_mode):
self.core.manipulation_mode = manipulation_mode
# ======== core forward functions ============
# =================== Exposed methods =======================
def build(self):
self.core.build()
def freeze(self):
"""
freezes the graph and saves the current nodes positions
to the node parameters. When loading from JSON, the previous
position will be reloaded
"""
self.core.freeze()
def unfreeze(self):
"""
This removes the effect of the freeze function
If called, the nodes position can be recomputed in the future
"""
self.core.unfreeze()
def new(self, engine):
"""
Creates a new engine
:param engine: An engine object (for example a Dot engine)
"""
self.core.new(engine)
def addNode(self, graph, node_name, **kwargs):
"""
Adds a node to a graph or subgraph
"""
return self.core.addNode(graph, node_name, **kwargs)
def addEdge(self, source, dest, kwargs):
"""
Connects two nodes from the same subgraph or
from two different subgraphs
If source and dest nodes belong to the same
Subgraph, the connection added to the subgraph
if the connection is between different subgraph notes
the connection is added to the main subgraph
"""
return self.core.addEdge(source, dest, kwargs)
def addSubgraph(self, parent_graph, subgraph_name, subgraph_type= GraphType.SimpleGraph, **kwargs):
return self.core.addSubgraph(parent_graph, subgraph_name, subgraph_type= subgraph_type, **kwargs)
def removeNode(self, node):
self.core.removeNode(node)
def removeSubgraph(self, subgraph):
self.core.removeSubgraph(subgraph)
def removeEdge(self, edge):
self.core.removeEdge(edge)
def load_file(self, filename):
self.core.load_file(filename)
def loadAJson(self, filename):
self.core.loadAJson(filename)
def save(self, filename):
self.core.save(filename)
def saveAsJson(self, filename):
self.core.saveAsJson(filename)
# ===== events
def resizeEvent(self, event):
self.core.updateSize()
if(self.core.minimumWidth()<self.width()):
self.core.setMinimumWidth(self.width())
if(self.core.minimumHeight()<self.height()):
self.core.setMinimumHeight(self.height())
| 41.35911 | 230 | 0.539354 |
f64cce0ffd1ce5f1e3baddf79293be4290d79f09 | 1,492 | py | Python | mmcls/models/necks/gap.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1,190 | 2020-07-10T01:16:01.000Z | 2022-03-31T09:48:38.000Z | mmcls/models/necks/gap.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 702 | 2020-07-13T13:31:33.000Z | 2022-03-31T06:48:04.000Z | mmcls/models/necks/gap.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 502 | 2020-07-10T02:40:55.000Z | 2022-03-31T02:07:09.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import NECKS
@NECKS.register_module()
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
Args:
dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}.
Default: 2
"""
def __init__(self, dim=2):
super(GlobalAveragePooling, self).__init__()
assert dim in [1, 2, 3], 'GlobalAveragePooling dim only support ' \
f'{1, 2, 3}, get {dim} instead.'
if dim == 1:
self.gap = nn.AdaptiveAvgPool1d(1)
elif dim == 2:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AdaptiveAvgPool3d((1, 1, 1))
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple(
[out.view(x.size(0), -1) for out, x in zip(outs, inputs)])
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs
| 32.434783 | 78 | 0.597185 |
88039dc084cb4b2ee33dc4dd6a2c4262f5360fd9 | 5,525 | py | Python | flask_website/database.py | AutoAllenWu/flask-website | 70c1ea729a6b81deca4726d505dc019d7139f0bc | [
"BSD-3-Clause"
] | null | null | null | flask_website/database.py | AutoAllenWu/flask-website | 70c1ea729a6b81deca4726d505dc019d7139f0bc | [
"BSD-3-Clause"
] | null | null | null | flask_website/database.py | AutoAllenWu/flask-website | 70c1ea729a6b81deca4726d505dc019d7139f0bc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String, DateTime, \
ForeignKey, event
from sqlalchemy.orm import scoped_session, sessionmaker, backref, relation
from sqlalchemy.ext.declarative import declarative_base
from werkzeug import cached_property, http_date
from flask import url_for, Markup
from flask_website import app, search
engine = create_engine(app.config['DATABASE_URI'],
convert_unicode=True,
**app.config['DATABASE_CONNECT_OPTIONS'])
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
def init_db():
Model.metadata.create_all(bind=engine)
Model = declarative_base(name='Model')
Model.query = db_session.query_property()
class User(Model):
__tablename__ = 'users'
id = Column('user_id', Integer, primary_key=True)
openid = Column('openid', String(200))
name = Column(String(200))
def __init__(self, name, openid):
self.name = name
self.openid = openid
def to_json(self):
return dict(name=self.name, is_admin=self.is_admin)
@property
def is_admin(self):
return self.openid in app.config['ADMINS']
def __eq__(self, other):
return type(self) is type(other) and self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
class Category(Model):
__tablename__ = 'categories'
id = Column('category_id', Integer, primary_key=True)
name = Column(String(50))
slug = Column(String(50))
def __init__(self, name):
self.name = name
self.slug = '-'.join(name.split()).lower()
def to_json(self):
return dict(name=self.name, slug=self.slug, count=self.count)
@cached_property
def count(self):
return self.snippets.count()
@property
def url(self):
return url_for('snippets.category', slug=self.slug)
class Snippet(Model, search.Indexable):
__tablename__ = 'snippets'
id = Column('snippet_id', Integer, primary_key=True)
author_id = Column(Integer, ForeignKey('users.user_id'))
category_id = Column(Integer, ForeignKey('categories.category_id'))
title = Column(String(200))
body = Column(String)
pub_date = Column(DateTime)
author = relation(User, backref=backref('snippets', lazy='dynamic'))
category = relation(Category, backref=backref('snippets', lazy='dynamic'))
search_document_kind = 'snippet'
def __init__(self, author, title, body, category):
self.author = author
self.title = title
self.body = body
self.category = category
self.pub_date = datetime.utcnow()
def to_json(self):
return dict(id=self.id, title=self.title,
body=unicode(self.rendered_body),
pub_date=http_date(self.pub_date),
comments=[c.to_json() for c in self.comments],
author=self.author.to_json(),
category=self.category.slug)
def get_search_document(self):
return dict(
id=unicode(self.id),
title=self.title,
keywords=[self.category.name],
content=self.body
)
@classmethod
def describe_search_result(cls, result):
obj = cls.query.get(int(result['id']))
if obj is not None:
text = obj.rendered_body.striptags()
return Markup(result.highlights('content', text=text)) or None
@property
def url(self):
return url_for('snippets.show', id=self.id)
@property
def rendered_body(self):
from flask_website.utils import format_creole
return format_creole(self.body)
class Comment(Model):
__tablename__ = 'comments'
id = Column('comment_id', Integer, primary_key=True)
snippet_id = Column(Integer, ForeignKey('snippets.snippet_id'))
author_id = Column(Integer, ForeignKey('users.user_id'))
title = Column(String(200))
text = Column(String)
pub_date = Column(DateTime)
snippet = relation(Snippet, backref=backref('comments', lazy=True))
author = relation(User, backref=backref('comments', lazy='dynamic'))
def __init__(self, snippet, author, title, text):
self.snippet = snippet
self.author = author
self.title = title
self.text = text
self.pub_date = datetime.utcnow()
def to_json(self):
return dict(author=self.author.to_json(),
title=self.title,
pub_date=http_date(self.pub_date),
text=unicode(self.rendered_text))
@property
def rendered_text(self):
from flask_website.utils import format_creole
return format_creole(self.text)
class OpenIDAssociation(Model):
__tablename__ = 'openid_associations'
id = Column('association_id', Integer, primary_key=True)
server_url = Column(String(1024))
handle = Column(String(255))
secret = Column(String(255))
issued = Column(Integer)
lifetime = Column(Integer)
assoc_type = Column(String(64))
class OpenIDUserNonce(Model):
__tablename__ = 'openid_user_nonces'
id = Column('user_nonce_id', Integer, primary_key=True)
server_url = Column(String(1024))
timestamp = Column(Integer)
salt = Column(String(40))
event.listen(db_session, 'after_flush', search.update_model_based_indexes)
| 30.865922 | 78 | 0.646335 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.