text
stringlengths 2
999k
|
|---|
#!/usr/bin/env python3
# Responder.py
# ~~~~~~~~~~~~
# This file is tasked with compiling a response given a list
# of call items (i.e. a list of results of DataPulls.getInfo)
def xstr(x):
if x is None:
return ''
return str(x)
def xget(source, prop):
if type(source) == dict:
if prop in source:
return xstr(source[prop])
elif type(source) == list:
if prop < len(source) and prop >= 0:
return xstr(source[prop])
return ''
def xlist(L):
if L is None:
return []
if not isinstance(L, list):
L = [L]
return [xstr(x) for x in L if x is not None]
# respondPokemon - function returns the response body
# of pokemon type objects (as returned by DataPulls.getInfo)
def respondPokemon(call_item):
response = ''
basename = xget(call_item, 'basename')
en_name = xget(call_item['name'], 'english')
jp_name = xget(call_item['name'], 'kana')
rj_name = xget(call_item['name'], 'japanese')
fdex_no = xget(call_item, 'dex_no').zfill(3)
dex_entry = ' '.join(xlist(call_item['dex_entry']))
pokeclass = xget(call_item, 'classification')
types = xlist(call_item['types'])
abilities = xlist(call_item['abilities'][:2])
hidden_ability = xget(call_item["abilities"], 2)
base_stats = xlist(call_item['base_stats'])
evolutions = xlist(call_item['evolutions'])
response += '**#' + fdex_no + ' ' + en_name + '** '
response += '(Japanese ' + jp_name + ' ' + rj_name + ')'
response += "\n\n"
response += pokeclass + ' | Types: ' + (', '.join(types))
if any(evolutions):
response += ' | Evolutions: ' + (' '.join(evolutions))
response += "\n\n"
response += '_'+dex_entry+'_'
response += "\n\n"
response += '/'.join(base_stats)
if any(abilities):
response += ' | Abilities: '
response += ' / '.join(abilities)
if hidden_ability is not None and len(hidden_ability) > 0:
response += ' / HA: ' + hidden_ability
response += "\n\n"
response += '[Bulbapedia](http://bulbapedia.bulbagarden.net/wiki/'+basename+') | '
response += '[Serebii](http://www.serebii.net/pokedex-sm/'+fdex_no+'.shtml) | '
response += '[Smogon](http://www.smogon.com/dex/sm/pokemon/'+basename+'/) | '
response += '[Pokemon.com](http://www.pokemon.com/us/pokedex/'+basename+')'
response += "\n\n"
return response
# respondPokemon - function returns the response body
# of ability type objects (as returned by DataPulls.getInfo)
def respondAbility(call_item):
response = ''
name = xget(call_item, "name")
basename = xget(call_item, "term").replace(' ', '')
generation = xget(call_item, "introduced")
description = xget(call_item, "description")
response += '**' + name + '** '
response += '(Introduced: gen ' + generation + ')'
response += "\n\n"
response += description
response += "\n\n"
response += '[Bulbapedia](http://bulbapedia.bulbagarden.net/wiki/'+name+') | '
response += '[Serebii](https://www.serebii.net/abilitydex/'+basename+'.shtml) | '
response += '[Smogon](http://www.smogon.com/dex/sm/abilities/'+name+'/)'
response += "\n\n"
return response
# respondPokemon - function returns the response body
# of move type objects (as returned by DataPulls.getInfo)
def respondMove(call_item):
response = ''
name = xget(call_item, "name")
basename = xget(call_item, "term").replace(' ', '')
move_type = xget(call_item, "typing")
pp = xget(call_item, "pp")
category = xget(call_item, "category")
power = "varies" if category == "varies" else (xget(call_item, "power") or "-")
accuracy = xget(call_item, "accuracy") or "-"
description = xget(call_item, "description")
response += '**' + name + '** '
response += '(Category: ' + category + ')'
response += "\n\n"
response += move_type + ' | PP: ' + pp + ' | Power: ' + power + " | Accuracy: " + accuracy
response += "\n\n"
response += description
response += "\n\n"
response += '[Bulbapedia](http://bulbapedia.bulbagarden.net/wiki/'+name+') | '
response += '[Serebii](https://www.serebii.net/attackdex-sm/'+basename+'.shtml) | '
response += '[Smogon](http://www.smogon.com/dex/sm/abilities/'+name+'/)'
response += "\n\n"
return response
# respondPokemon - function returns the response body
# of item type objects (as returned by DataPulls.getInfo)
def respondItem(call_item):
response = ''
name = xget(call_item, "name")
basename = xget(call_item, "term").replace(' ', '')
description = xget(call_item, "description")
response += '**' + name + '** '
response += "\n\n"
response += description
response += "\n\n"
response += '[Bulbapedia](http://bulbapedia.bulbagarden.net/wiki/'+name+') | '
response += '[Serebii](https://www.serebii.net/attackdex-sm/'+basename+'.shtml) | '
response += '[Smogon](http://www.smogon.com/dex/sm/items/'+name+'/)'
response += "\n\n"
return response
# getResponse - this function should return the response body
# for the given call item (as returned by DataPulls.getInfo)
def getResponse(item, is_last = False):
response_types = {
"pokemon": respondPokemon,
"ability": respondAbility,
"move": respondMove,
"item": respondItem,
}
try:
response = response_types[item.type]( item.get() )
except KeyError:
response = ""
if not is_last:
response += '---'
response += "\n\n"
return response
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from collections import defaultdict
import pyqrcode
from paginate_sqlalchemy import SqlalchemyOrmPage as SQLAlchemyORMPage
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPSeeOther
from pyramid.response import Response
from pyramid.view import view_config, view_defaults
from sqlalchemy import func
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.exc import NoResultFound
import warehouse.utils.otp as otp
from warehouse.accounts.interfaces import IPasswordBreachedService, IUserService
from warehouse.accounts.models import Email, User
from warehouse.accounts.views import logout
from warehouse.email import (
send_account_deletion_email,
send_added_as_collaborator_email,
send_collaborator_added_email,
send_email_verification_email,
send_password_change_email,
send_primary_email_change_email,
)
from warehouse.manage.forms import (
AddEmailForm,
ChangePasswordForm,
ChangeRoleForm,
CreateRoleForm,
DeleteTOTPForm,
ProvisionTOTPForm,
SaveAccountForm,
)
from warehouse.packaging.models import File, JournalEntry, Project, Release, Role
from warehouse.utils.paginate import paginate_url_factory
from warehouse.utils.project import confirm_project, destroy_docs, remove_project
def user_projects(request):
""" Return all the projects for which the user is a sole owner """
projects_owned = (
request.db.query(Project.id)
.join(Role.project)
.filter(Role.role_name == "Owner", Role.user == request.user)
.subquery()
)
with_sole_owner = (
request.db.query(Role.project_id)
.join(projects_owned)
.filter(Role.role_name == "Owner")
.group_by(Role.project_id)
.having(func.count(Role.project_id) == 1)
.subquery()
)
return {
"projects_owned": (
request.db.query(Project)
.join(projects_owned, Project.id == projects_owned.c.id)
.order_by(Project.name)
.all()
),
"projects_sole_owned": (
request.db.query(Project).join(with_sole_owner).order_by(Project.name).all()
),
}
@view_defaults(
route_name="manage.account",
renderer="manage/account.html",
uses_session=True,
require_csrf=True,
require_methods=False,
permission="manage:user",
)
class ManageAccountViews:
def __init__(self, request):
self.request = request
self.user_service = request.find_service(IUserService, context=None)
self.breach_service = request.find_service(
IPasswordBreachedService, context=None
)
@property
def active_projects(self):
return user_projects(request=self.request)["projects_sole_owned"]
@property
def default_response(self):
return {
"save_account_form": SaveAccountForm(name=self.request.user.name),
"add_email_form": AddEmailForm(user_service=self.user_service),
"change_password_form": ChangePasswordForm(
user_service=self.user_service, breach_service=self.breach_service
),
"active_projects": self.active_projects,
}
@view_config(request_method="GET")
def manage_account(self):
return self.default_response
@view_config(request_method="POST", request_param=SaveAccountForm.__params__)
def save_account(self):
form = SaveAccountForm(self.request.POST)
if form.validate():
self.user_service.update_user(self.request.user.id, **form.data)
self.request.session.flash("Account details updated", queue="success")
return {**self.default_response, "save_account_form": form}
@view_config(request_method="POST", request_param=AddEmailForm.__params__)
def add_email(self):
form = AddEmailForm(self.request.POST, user_service=self.user_service)
if form.validate():
email = self.user_service.add_email(self.request.user.id, form.email.data)
send_email_verification_email(self.request, (self.request.user, email))
self.request.session.flash(
f"Email {email.email} added - check your email for "
+ "a verification link",
queue="success",
)
return self.default_response
return {**self.default_response, "add_email_form": form}
@view_config(request_method="POST", request_param=["delete_email_id"])
def delete_email(self):
try:
email = (
self.request.db.query(Email)
.filter(
Email.id == self.request.POST["delete_email_id"],
Email.user_id == self.request.user.id,
)
.one()
)
except NoResultFound:
self.request.session.flash("Email address not found", queue="error")
return self.default_response
if email.primary:
self.request.session.flash(
"Cannot remove primary email address", queue="error"
)
else:
self.request.user.emails.remove(email)
self.request.session.flash(
f"Email address {email.email} removed", queue="success"
)
return self.default_response
@view_config(request_method="POST", request_param=["primary_email_id"])
def change_primary_email(self):
previous_primary_email = self.request.user.primary_email
try:
new_primary_email = (
self.request.db.query(Email)
.filter(
Email.user_id == self.request.user.id,
Email.id == self.request.POST["primary_email_id"],
Email.verified.is_(True),
)
.one()
)
except NoResultFound:
self.request.session.flash("Email address not found", queue="error")
return self.default_response
self.request.db.query(Email).filter(
Email.user_id == self.request.user.id, Email.primary.is_(True)
).update(values={"primary": False})
new_primary_email.primary = True
self.request.session.flash(
f"Email address {new_primary_email.email} set as primary", queue="success"
)
if previous_primary_email is not None:
send_primary_email_change_email(
self.request, (self.request.user, previous_primary_email)
)
return self.default_response
@view_config(request_method="POST", request_param=["reverify_email_id"])
def reverify_email(self):
try:
email = (
self.request.db.query(Email)
.filter(
Email.id == self.request.POST["reverify_email_id"],
Email.user_id == self.request.user.id,
)
.one()
)
except NoResultFound:
self.request.session.flash("Email address not found", queue="error")
return self.default_response
if email.verified:
self.request.session.flash("Email is already verified", queue="error")
else:
send_email_verification_email(self.request, (self.request.user, email))
self.request.session.flash(
f"Verification email for {email.email} resent", queue="success"
)
return self.default_response
@view_config(request_method="POST", request_param=ChangePasswordForm.__params__)
def change_password(self):
form = ChangePasswordForm(
**self.request.POST,
username=self.request.user.username,
full_name=self.request.user.name,
email=self.request.user.email,
user_service=self.user_service,
breach_service=self.breach_service,
check_password_metrics_tags=["method:new_password"],
)
if form.validate():
self.user_service.update_user(
self.request.user.id, password=form.new_password.data
)
send_password_change_email(self.request, self.request.user)
self.request.session.flash("Password updated", queue="success")
return {**self.default_response, "change_password_form": form}
@view_config(request_method="POST", request_param=["confirm_username"])
def delete_account(self):
username = self.request.params.get("confirm_username")
if not username:
self.request.session.flash("Confirm the request", queue="error")
return self.default_response
if username != self.request.user.username:
self.request.session.flash(
f"Could not delete account - {username!r} is not the same as "
f"{self.request.user.username!r}",
queue="error",
)
return self.default_response
if self.active_projects:
self.request.session.flash(
"Cannot delete account with active project ownerships", queue="error"
)
return self.default_response
# Update all journals to point to `deleted-user` instead
deleted_user = (
self.request.db.query(User).filter(User.username == "deleted-user").one()
)
journals = (
self.request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.filter(JournalEntry.submitted_by == self.request.user)
.all()
)
for journal in journals:
journal.submitted_by = deleted_user
# Send a notification email
send_account_deletion_email(self.request, self.request.user)
# Actually delete the user
self.request.db.delete(self.request.user)
return logout(self.request)
@view_defaults(
route_name="manage.account.totp-provision",
renderer="manage/account/totp-provision.html",
uses_session=True,
require_csrf=True,
require_methods=False,
permission="manage:user",
http_cache=0,
)
class ProvisionTOTPViews:
def __init__(self, request):
self.request = request
self.user_service = request.find_service(IUserService, context=None)
@property
def default_response(self):
totp_secret = self.request.session.get_totp_secret()
return {
"provision_totp_form": ProvisionTOTPForm(totp_secret=totp_secret),
"provision_totp_uri": otp.generate_totp_provisioning_uri(
totp_secret,
self.request.user.username,
issuer_name=self.request.registry.settings["site.name"],
),
}
@view_config(route_name="manage.account.totp-provision.image", request_method="GET")
def generate_totp_qr(self):
totp_secret = self.user_service.get_totp_secret(self.request.user.id)
if totp_secret:
return Response(status=403)
totp_qr = pyqrcode.create(self.default_response["provision_totp_uri"])
qr_buffer = io.BytesIO()
totp_qr.svg(qr_buffer, scale=5)
return Response(content_type="image/svg+xml", body=qr_buffer.getvalue())
@view_config(request_method="GET")
def totp_provision(self):
totp_secret = self.user_service.get_totp_secret(self.request.user.id)
if totp_secret:
self.request.session.flash("TOTP already provisioned.", queue="error")
return HTTPSeeOther(self.request.route_path("manage.account"))
return self.default_response
@view_config(request_method="POST", request_param=ProvisionTOTPForm.__params__)
def validate_totp_provision(self):
totp_secret = self.user_service.get_totp_secret(self.request.user.id)
if totp_secret:
self.request.session.flash("TOTP already provisioned.", queue="error")
return HTTPSeeOther(self.request.route_path("manage.account"))
form = ProvisionTOTPForm(
**self.request.POST, totp_secret=self.request.session.get_totp_secret()
)
if form.validate():
self.user_service.update_user(
self.request.user.id, totp_secret=self.request.session.get_totp_secret()
)
self.request.session.clear_totp_secret()
self.request.session.flash(
"TOTP application successfully provisioned.", queue="success"
)
return HTTPSeeOther(self.request.route_path("manage.account"))
return {**self.default_response, "provision_totp_form": form}
@view_config(request_method="POST", request_param=DeleteTOTPForm.__params__)
def delete_totp(self):
totp_secret = self.user_service.get_totp_secret(self.request.user.id)
if not totp_secret:
self.request.session.flash("No TOTP application to delete.", queue="error")
return HTTPSeeOther(self.request.route_path("manage.account"))
form = DeleteTOTPForm(
**self.request.POST,
username=self.request.user.username,
user_service=self.user_service,
)
if form.validate():
self.user_service.update_user(self.request.user.id, totp_secret=None)
self.request.session.flash("TOTP application deleted.", queue="success")
else:
self.request.session.flash("Invalid credentials.", queue="error")
return HTTPSeeOther(self.request.route_path("manage.account"))
@view_config(
route_name="manage.projects",
renderer="manage/projects.html",
uses_session=True,
permission="manage:user",
)
def manage_projects(request):
def _key(project):
if project.releases:
return project.releases[0].created
return project.created
all_user_projects = user_projects(request)
projects_owned = set(
project.name for project in all_user_projects["projects_owned"]
)
projects_sole_owned = set(
project.name for project in all_user_projects["projects_sole_owned"]
)
return {
"projects": sorted(request.user.projects, key=_key, reverse=True),
"projects_owned": projects_owned,
"projects_sole_owned": projects_sole_owned,
}
@view_config(
route_name="manage.project.settings",
context=Project,
renderer="manage/settings.html",
uses_session=True,
permission="manage:project",
)
def manage_project_settings(project, request):
return {"project": project}
@view_config(
route_name="manage.project.delete_project",
context=Project,
uses_session=True,
require_methods=["POST"],
permission="manage:project",
)
def delete_project(project, request):
confirm_project(project, request, fail_route="manage.project.settings")
remove_project(project, request)
return HTTPSeeOther(request.route_path("manage.projects"))
@view_config(
route_name="manage.project.destroy_docs",
context=Project,
uses_session=True,
require_methods=["POST"],
permission="manage:project",
)
def destroy_project_docs(project, request):
confirm_project(project, request, fail_route="manage.project.documentation")
destroy_docs(project, request)
return HTTPSeeOther(
request.route_path(
"manage.project.documentation", project_name=project.normalized_name
)
)
@view_config(
route_name="manage.project.releases",
context=Project,
renderer="manage/releases.html",
uses_session=True,
permission="manage:project",
)
def manage_project_releases(project, request):
return {"project": project}
@view_defaults(
route_name="manage.project.release",
context=Release,
renderer="manage/release.html",
uses_session=True,
require_csrf=True,
require_methods=False,
permission="manage:project",
)
class ManageProjectRelease:
def __init__(self, release, request):
self.release = release
self.request = request
@view_config(request_method="GET")
def manage_project_release(self):
return {
"project": self.release.project,
"release": self.release,
"files": self.release.files.all(),
}
@view_config(request_method="POST", request_param=["confirm_version"])
def delete_project_release(self):
version = self.request.POST.get("confirm_version")
if not version:
self.request.session.flash("Confirm the request", queue="error")
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
if version != self.release.version:
self.request.session.flash(
"Could not delete release - "
+ f"{version!r} is not the same as {self.release.version!r}",
queue="error",
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
self.request.db.add(
JournalEntry(
name=self.release.project.name,
action="remove release",
version=self.release.version,
submitted_by=self.request.user,
submitted_from=self.request.remote_addr,
)
)
self.request.db.delete(self.release)
self.request.session.flash(
f"Deleted release {self.release.version!r}", queue="success"
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.releases", project_name=self.release.project.name
)
)
@view_config(
request_method="POST", request_param=["confirm_project_name", "file_id"]
)
def delete_project_release_file(self):
def _error(message):
self.request.session.flash(message, queue="error")
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
project_name = self.request.POST.get("confirm_project_name")
if not project_name:
return _error("Confirm the request")
try:
release_file = (
self.request.db.query(File)
.filter(
File.release == self.release,
File.id == self.request.POST.get("file_id"),
)
.one()
)
except NoResultFound:
return _error("Could not find file")
if project_name != self.release.project.name:
return _error(
"Could not delete file - " + f"{project_name!r} is not the same as "
f"{self.release.project.name!r}"
)
self.request.db.add(
JournalEntry(
name=self.release.project.name,
action=f"remove file {release_file.filename}",
version=self.release.version,
submitted_by=self.request.user,
submitted_from=self.request.remote_addr,
)
)
self.request.db.delete(release_file)
self.request.session.flash(
f"Deleted file {release_file.filename!r}", queue="success"
)
return HTTPSeeOther(
self.request.route_path(
"manage.project.release",
project_name=self.release.project.name,
version=self.release.version,
)
)
@view_config(
route_name="manage.project.roles",
context=Project,
renderer="manage/roles.html",
uses_session=True,
require_methods=False,
permission="manage:project",
)
def manage_project_roles(project, request, _form_class=CreateRoleForm):
user_service = request.find_service(IUserService, context=None)
form = _form_class(request.POST, user_service=user_service)
if request.method == "POST" and form.validate():
username = form.username.data
role_name = form.role_name.data
userid = user_service.find_userid(username)
user = user_service.get_user(userid)
if request.db.query(
request.db.query(Role)
.filter(
Role.user == user, Role.project == project, Role.role_name == role_name
)
.exists()
).scalar():
request.session.flash(
f"User '{username}' already has {role_name} role for project",
queue="error",
)
elif user.primary_email is None or not user.primary_email.verified:
request.session.flash(
f"User '{username}' does not have a verified primary email "
f"address and cannot be added as a {role_name} for project.",
queue="error",
)
else:
request.db.add(
Role(user=user, project=project, role_name=form.role_name.data)
)
request.db.add(
JournalEntry(
name=project.name,
action=f"add {role_name} {username}",
submitted_by=request.user,
submitted_from=request.remote_addr,
)
)
owner_roles = (
request.db.query(Role)
.join(Role.user)
.filter(Role.role_name == "Owner", Role.project == project)
)
owner_users = {owner.user for owner in owner_roles}
# Don't send to the owner that added the new role
owner_users.discard(request.user)
# Don't send owners email to new user if they are now an owner
owner_users.discard(user)
send_collaborator_added_email(
request,
owner_users,
user=user,
submitter=request.user,
project_name=project.name,
role=form.role_name.data,
)
send_added_as_collaborator_email(
request,
user,
submitter=request.user,
project_name=project.name,
role=form.role_name.data,
)
request.session.flash(
f"Added collaborator '{form.username.data}'", queue="success"
)
form = _form_class(user_service=user_service)
roles = request.db.query(Role).join(User).filter(Role.project == project).all()
# TODO: The following lines are a hack to handle multiple roles for a
# single user and should be removed when fixing GH-2745
roles_by_user = defaultdict(list)
for role in roles:
roles_by_user[role.user.username].append(role)
return {"project": project, "roles_by_user": roles_by_user, "form": form}
@view_config(
route_name="manage.project.change_role",
context=Project,
uses_session=True,
require_methods=["POST"],
permission="manage:project",
)
def change_project_role(project, request, _form_class=ChangeRoleForm):
# TODO: This view was modified to handle deleting multiple roles for a
# single user and should be updated when fixing GH-2745
form = _form_class(request.POST)
if form.validate():
role_ids = request.POST.getall("role_id")
if len(role_ids) > 1:
# This user has more than one role, so just delete all the ones
# that aren't what we want.
#
# TODO: This branch should be removed when fixing GH-2745.
roles = (
request.db.query(Role)
.join(User)
.filter(
Role.id.in_(role_ids),
Role.project == project,
Role.role_name != form.role_name.data,
)
.all()
)
removing_self = any(
role.role_name == "Owner" and role.user == request.user
for role in roles
)
if removing_self:
request.session.flash("Cannot remove yourself as Owner", queue="error")
else:
for role in roles:
request.db.delete(role)
request.db.add(
JournalEntry(
name=project.name,
action=f"remove {role.role_name} {role.user.username}",
submitted_by=request.user,
submitted_from=request.remote_addr,
)
)
request.session.flash("Changed role", queue="success")
else:
# This user only has one role, so get it and change the type.
try:
role = (
request.db.query(Role)
.join(User)
.filter(
Role.id == request.POST.get("role_id"), Role.project == project
)
.one()
)
if role.role_name == "Owner" and role.user == request.user:
request.session.flash(
"Cannot remove yourself as Owner", queue="error"
)
else:
request.db.add(
JournalEntry(
name=project.name,
action="change {} {} to {}".format(
role.role_name, role.user.username, form.role_name.data
),
submitted_by=request.user,
submitted_from=request.remote_addr,
)
)
role.role_name = form.role_name.data
request.session.flash("Changed role", queue="success")
except NoResultFound:
request.session.flash("Could not find role", queue="error")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
@view_config(
route_name="manage.project.delete_role",
context=Project,
uses_session=True,
require_methods=["POST"],
permission="manage:project",
)
def delete_project_role(project, request):
# TODO: This view was modified to handle deleting multiple roles for a
# single user and should be updated when fixing GH-2745
roles = (
request.db.query(Role)
.join(User)
.filter(Role.id.in_(request.POST.getall("role_id")), Role.project == project)
.all()
)
removing_self = any(
role.role_name == "Owner" and role.user == request.user for role in roles
)
if not roles:
request.session.flash("Could not find role", queue="error")
elif removing_self:
request.session.flash("Cannot remove yourself as Owner", queue="error")
else:
for role in roles:
request.db.delete(role)
request.db.add(
JournalEntry(
name=project.name,
action=f"remove {role.role_name} {role.user.username}",
submitted_by=request.user,
submitted_from=request.remote_addr,
)
)
request.session.flash("Removed role", queue="success")
return HTTPSeeOther(
request.route_path("manage.project.roles", project_name=project.name)
)
@view_config(
route_name="manage.project.history",
context=Project,
renderer="manage/history.html",
uses_session=True,
permission="manage:project",
)
def manage_project_history(project, request):
try:
page_num = int(request.params.get("page", 1))
except ValueError:
raise HTTPBadRequest("'page' must be an integer.")
journals_query = (
request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.filter(JournalEntry.name == project.name)
.order_by(JournalEntry.submitted_date.desc(), JournalEntry.id.desc())
)
journals = SQLAlchemyORMPage(
journals_query,
page=page_num,
items_per_page=25,
url_maker=paginate_url_factory(request),
)
if journals.page_count and page_num > journals.page_count:
raise HTTPNotFound
return {"project": project, "journals": journals}
@view_config(
route_name="manage.project.documentation",
context=Project,
renderer="manage/documentation.html",
uses_session=True,
permission="manage:project",
)
def manage_project_documentation(project, request):
return {"project": project}
|
from __future__ import unicode_literals
import warnings
from django.db import models
from cms.models import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from filer.fields.folder import FilerFolderField
from .conf import settings
from cmsplugin_filer_utils import FilerPluginManager
class FilerFolder(CMSPlugin):
"""
Plugin for storing any type of Folder.
Default template displays files store inside this folder.
"""
STYLE_CHOICES = settings.CMSPLUGIN_FILER_FOLDER_STYLE_CHOICES
DEFAULT_STYLE = settings.CMSPLUGIN_FILER_FOLDER_DEFAULT_STYLE
title = models.CharField(_("title"), max_length=255, null=True, blank=True)
folder = FilerFolderField(null=True, on_delete=models.SET_NULL)
style = models.CharField(
_('Style'), choices=STYLE_CHOICES, default=DEFAULT_STYLE, max_length=50)
cmsplugin_ptr = models.OneToOneField(
to=CMSPlugin,
related_name='%(app_label)s_%(class)s',
parent_link=True,
)
objects = FilerPluginManager(select_related=('folder',))
@property
def view_option(self):
warnings.warn("view_option on cmsplugin_filer_folder.FilderFolder is deprecated. Use .style instead.",
DeprecationWarning)
return self.style
def __str__(self):
return self.get_display_name()
def get_display_name(self):
if self.title:
return self.title
elif self.folder_id and self.folder.name:
return self.folder.name
return "<empty>"
search_fields = ('title',)
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import queue
from typing import List, Callable, Optional, Tuple, Dict
import numpy as np
import torch
from torch import nn
from copy import deepcopy, copy
from functools import partial
from torch import optim
from nncf.config.config import NNCFConfig
from nncf.torch.utils import get_filters_num
class EvolutionOptimizer:
"""
Class for optimizing ranking coefficients for the model with evolution algorithm (agent).
The evolution algorithm works as follows:
1. For the first population_size steps it generates and returns random actions (generated with some prior
information). For every action, it gets a reward (some measure whether this action is good or not). During these
generations all action - reward pairs saving to the population.
2. During remaining (generations - population_size) generations it predict action by next scheme:
- Choosing random num_samples actions from population
- Choosing the best one from sampled and mutate it
- Return the resulting action
During this generation's action - reward pairs saving by updating oldest actions in population.
After all generations, the best action (with the best reward value) is returned.
"""
def __init__(self, initial_filter_norms: Dict, hparams: Dict, random_seed: int):
"""
:param initial_filter_norms: Initial filter norms needed to get std and var of filter norms in each leyer.
:param hparams: hyperparams of the Optimizer, can contain population_size, num_generations, num_samples,
mutate_percent, sigma_scale
:param random_seed: random seed, thet should be set during action generation for reproducibility
"""
self.random_seed = random_seed
# Optimizer hyper-params
self.population_size = hparams.get('population_size', 64)
self.num_generations = hparams.get('num_generations', 400)
self.num_samples = hparams.get('num_samples', 16)
self.mutate_percent = hparams.get('mutate_percent', 0.1)
self.scale_sigma = hparams.get('sigma_scale', 1)
self.max_reward = -np.inf
self.mean_rewards = []
self.indexes_queue = queue.Queue(self.population_size)
self.oldest_index = None
self.population_rewards = np.zeros(self.population_size)
self.population = [None for i in range(self.population_size)]
self.best_action = None
self.last_action = None
self.num_layers = len(initial_filter_norms)
self.layer_keys = np.array(list(initial_filter_norms.keys()))
self.initial_norms_stats = {}
for key in self.layer_keys:
layer_norms = initial_filter_norms[key].cpu().detach().numpy()
self.initial_norms_stats[key] = {'mean': np.mean(layer_norms), 'std': np.std(layer_norms)}
self.cur_state = None
self.cur_reward = None
self.cur_episode = None
self.cur_info = None
def get_best_action(self):
return self.best_action
def _save_episode_info(self, reward: float) -> None:
"""
Saving episode information: action-reward pairs and updating best_action/reward variables if needed.
:param reward: reward for the current episode
"""
# Update best action and reward if needed
if reward > self.max_reward:
self.best_action = self.last_action
self.max_reward = reward
if self.cur_episode < self.population_size:
self.indexes_queue.put(self.cur_episode)
self.population[self.cur_episode] = self.last_action
self.population_rewards[self.cur_episode] = reward
else:
self.indexes_queue.put(self.oldest_index)
self.population[self.oldest_index] = self.last_action
self.population_rewards[self.oldest_index] = reward
def _predict_action(self) -> Dict:
"""
Predict action for the current episode. Works as described above.
:return: new generated action
"""
np.random.seed(self.random_seed)
episode_num = self.cur_episode
action = {}
if episode_num < self.population_size - 1:
# During first population_size generations, generates random actions
for key in self.layer_keys:
scale = np.exp(np.random.normal(0, self.scale_sigma))
shift = np.random.normal(0, self.initial_norms_stats[key]['std'])
action[key] = (scale, shift)
elif episode_num == self.population_size - 1:
# Adding identity action to population
for key in self.layer_keys:
action[key] = (1, 0)
else:
step_size = 1 - (float(episode_num) / (self.num_generations * 1.25)) # Rename this
self.mean_rewards.append(np.mean(self.population_rewards))
# 1. Sampling num_samples actions from population and choosing the best one
sampled_idxs = np.random.choice(len(self.population_rewards), self.num_samples)
sampled_rewards = self.population_rewards[sampled_idxs]
best_action = self.population[sampled_idxs[np.argmax(sampled_rewards)]]
# 2. Mutate best action
mutate_num = int(self.mutate_percent * self.num_layers)
mutate_idxs = np.random.choice(self.layer_keys, mutate_num)
for key in self.layer_keys:
scale, shift = 1, 0
if key in mutate_idxs:
scale = np.exp(np.random.normal(0, self.scale_sigma * step_size))
shift = np.random.normal(0, self.initial_norms_stats[key]['std'])
action[key] = (scale * best_action[key][0], shift + best_action[key][1])
self.oldest_index = self.indexes_queue.get()
return action
def ask(self, episode_num: int) -> Dict:
"""
Predict and returns action for the last told episode information: state, reward, episode_num and info
:return: predicted action
"""
self.cur_episode = episode_num
action = self._predict_action()
self.last_action = action
return action
def tell(self, state: torch.Tensor, reward: float, end_of_episode: bool, episode_num: int, info: List) -> None:
"""
Getting info about episode step and save it every end of episode
"""
# Saving state, reward and info from the current step
self.cur_state = state
self.cur_reward = reward
self.cur_episode = episode_num
self.cur_info = info
if end_of_episode:
self._save_episode_info(reward)
class LeGREvolutionEnv:
"""
Environment class for optimizing the accuracy of the pruned model with different ranking coefficients.
During 'step' environment doing step with received action calculates current reward and useful info and return it
During 'reset' resetting Pruner and environment params changed during iteration.
"""
def __init__(self, filter_pruner: 'LeGRPruner', model: nn.Module, train_loader: torch.utils.data.DataLoader,
val_loader: torch.utils.data.DataLoader, train_fn: Callable,
train_optimizer: Optional[torch.optim.Optimizer], val_fn: Callable, config: NNCFConfig,
train_steps: int, pruning_max: float):
"""
:param filter_pruner: LeGRPruner, should have an interface for pruning model and resetting pruner.
:param model: target model for which ranking coefficients are trained
:param train_loader: data loader for training the model
:param val_loader: data loader for validating the model
:param train_fn: callable for training the model
:param train_optimizer: optional, optimizer for training the model
:param val_fn: callable for validation of the model, returns acc, loss
:param config: NNCF config for model compression
:param train_steps: number of training steps to evaluate action (ranking coefficients set)
:param pruning_max: pruning level for the model
"""
self.loss_as_reward = True
self.prune_target = pruning_max
self.steps = train_steps
# Train/test params
self.train_loader, self.val_loader = train_loader, val_loader
self.train_fn = train_fn
self.train_optimizer = train_optimizer
if self.train_optimizer is None:
# Default optimizer when the user did not provide a custom optimizer
self.train_optimizer = partial(optim.SGD, lr=1e-2, momentum=0.9, weight_decay=5e-4, nesterov=True)
self.validate_fn = val_fn
self.config = config
self.filter_pruner = filter_pruner
self.model = model
def reset(self) -> Tuple[torch.Tensor, List]:
"""
Resetting pruner params (all changes in the model made by training) and environment params changed during
the step.
:return: tuple with state and info : full flops in the model and number of flops that is rest in the model
"""
self.filter_pruner.reset()
self.model.eval()
self.full_flops = self.filter_pruner.get_full_flops_number_in_model()
self.rest = self.full_flops
self.last_act = None
return torch.zeros(1), [self.full_flops, self.rest]
def _train_steps(self, steps: int) -> None:
"""
Training model with train_fn for received steps number.
:param steps: number of model training steps
"""
optimizer = self.train_optimizer(self.model.parameters())
self.train_fn(self.train_loader, self.model, optimizer, self.filter_pruner, steps)
def _get_reward(self) -> Tuple[float, float, float]:
"""
Validating model with validate_fn and return result in format: (acc, loss)
"""
return self.validate_fn(self.model, self.val_loader)
def step(self, action: Dict) -> Tuple[torch.Tensor, float, bool, List]:
"""
1. Getting action (ranking coefficients)
2. Making step with this action - prune model with ranking coefficients
3. Getting a reward for this action- train model for some steps and validate it
4. Returning new state (for current settings state is default and not used), reward,
whether the episode is over or not (for current settings an episode is over after every step) and additional
info (full flops in model and flops left in the model)
:param action: ranking coefficients
"""
self.last_act = action
new_state = torch.zeros(1)
reduced = self.filter_pruner.prune(self.prune_target, action)
self._train_steps(self.steps)
acc, loss = self._get_reward()
if self.loss_as_reward:
reward = -loss
else:
reward = acc
done = 1
info = [self.full_flops, reduced]
return new_state, reward, done, info
class LeGRPruner:
"""
Wrapper for pruning controller with a simplified interface, allowing prune model with received ranking coefficients
and resetting all changes in the model made by the environment.
"""
def __init__(self, filter_pruner_ctrl: 'FilterPruningController', model: nn.Module):
self.filter_pruner = filter_pruner_ctrl
self.scheduler = copy(self.filter_pruner.scheduler)
self.model = model
self.model_params_copy = None
self._save_model_weights()
self.init_filter_norms = {node.node_name: self.filter_pruner.filter_importance(node.module.weight)
for node in self.filter_pruner.pruned_module_groups_info.get_all_nodes()}
def loss(self) -> float:
"""
:return: loss for pruning algorithm
"""
return self.filter_pruner.loss()
def _save_model_weights(self) -> None:
"""
Saving copy of all model parameters
"""
self.model_params_copy = deepcopy(self.model.state_dict())
def _restore_model_weights(self):
"""
Restoring saved original model parameters to discard any changes in model weights.
"""
self.model.load_state_dict(self.model_params_copy)
def _reset_masks(self) -> None:
"""
Resetting masks for all pruned nodes
"""
for minfo in self.filter_pruner.pruned_module_groups_info.get_all_nodes():
new_mask = torch.ones(get_filters_num(minfo.module)).to(
minfo.module.weight.device)
self.filter_pruner.set_mask(minfo, new_mask)
def reset(self) -> None:
"""
Resetting all changes made in the model (and model masks during environment step) by restoring the original
model weights, resetting masks.
"""
self._restore_model_weights()
self._reset_masks()
self.scheduler = copy(self.filter_pruner.scheduler)
def get_full_flops_number_in_model(self) -> float:
return self.filter_pruner.full_flops
def prune(self, flops_pruning_target: float, ranking_coeffs: Dict) -> None:
"""
Prune target model to flops pruning target with ranking_coeffs.
:param flops_pruning_target: pruning target for the model pruning
:param ranking_coeffs: ranking coefficients, that will be used for layers ranking during pruning
"""
self.filter_pruner.ranking_coeffs = ranking_coeffs
self.filter_pruner.set_pruning_level(flops_pruning_target)
|
# -*- coding: utf-8 -*-
""" ymir.schema.data
"""
from voluptuous import Required, Optional
from ymir.schema import validators
AWS_DATA = {
Required("username"): unicode,
Required("pem"): unicode,
Optional("aws_region"): unicode,
Optional("s3_buckets", default=[]): validators.list_of_strings,
Optional("elastic_ips", default=[]): validators.list_of_strings,
Optional("tags", default=[]): validators.list_of_strings,
Optional("reservation_extras", default={}): dict,
Required("security_groups", default=[]): validators._validate_sg_field,
Required("key_name"): unicode,
}
PROVISION_DATA = {
Required("setup_list", default=[]): validators._validate_sl_field,
Required("provision_list", default=[]): validators._validate_pl_field,
Optional("puppet_parser", default="future"): validators._validate_puppet_parser,
}
BASE_DATA = {
Required("name"): unicode,
Optional("port", default='22'): unicode,
Required("service_name"): unicode,
Required("service_description"): unicode,
Required("instance_type"): unicode,
Required("health_checks"): dict,
Optional("logs", default=[]): validators.list_of_strings,
Optional("ymir_debug", default=False): bool,
Optional("ymir_build_puppet", default=False): bool,
Optional("volumes", default=[]): dict,
Optional("org_name", default="org"): unicode,
Optional("app_name", default="app"): unicode,
Optional("service_defaults", default={}): dict,
Optional("env_name", default='env'): unicode,
}
EXTENSION_DATA = {Required("extends"): validators._validate_extends_field}
VAGRANT_DATA = BASE_DATA.copy()
VAGRANT_DATA.update(PROVISION_DATA)
VAGRANT_DATA.update(
{Required("vagrant"): validators.nested_vagrant_validator})
|
"""
test_sgc_input_phaselocking.py
Test phase locking from an input sgc to a target cell type. Runs simulations
with AN input, and plots the results, including PSTH and phase histogram.
usage: test_sgc_input_phaselocking.py [-h]
[-c {bushy,tstellate,octopus,dstellate}]
[-S {tone,SAM,clicks}]
[-s {guineapig,rat,mouse}]
test sgc input phaselocking
optional arguments:
-h, --help show this help message and exit
-c {bushy,tstellate,octopus,dstellate}, --celltype {bushy,tstellate,octopus,dstellate}
cell type
-S {tone,SAM,clicks}, --stimulus {tone,SAM,clicks}
stimulus type
-s {guineapig,rat,mouse}, --species {guineapig,rat,mouse}
species
Note: Not all combinations of inputs are valid (not all cell types are
known for each species)
"""
import sys
import numpy as np
import argparse
from neuron import h
import pyqtgraph as pg
from cnmodel.protocols import Protocol
from cnmodel import cells
from cnmodel.util import sound
import cnmodel.util.pynrnutilities as PU
from cnmodel import data
class SGCInputTestPL(Protocol):
def set_cell(self, cell="bushy"):
self.cell = cell
def run(
self, args, temp=34.0, dt=0.025, seed=575982035, dB=None,
):
if self.cell == "bushy":
postCell = cells.Bushy.create(species=args.species)
elif self.cell == "tstellate":
postCell = cells.TStellate.create(species=args.species)
elif self.cell == "octopus":
postCell = cells.Octopus.create(species=args.species)
elif self.cell == "dstellate":
postCell = cells.DStellate.create(species=args.species)
else:
raise ValueError(
"cell %s is not yet implemented for phaselocking" % self.cell
)
self.post_cell = postCell
self.species = args.species
self.stimulus = args.stimulus
self.run_duration = 1.0 # in seconds
self.pip_duration = 0.8 # in seconds
self.pip_start = [0.02] # in seconds
self.Fs = 100e3 # in Hz
self.f0 = args.CF # stimulus in Hz
self.cf = args.CF # SGCs in Hz
self.fMod = args.fmod # mod freq, Hz
self.dMod = args.dmod # % mod depth, percentage
if dB is None:
self.dbspl = args.dB
else:
self.dbspl = dB
if self.stimulus == "SAM":
self.stim = sound.SAMTone(
rate=self.Fs,
duration=self.run_duration,
f0=self.f0,
fmod=self.fMod,
dmod=self.dMod,
dbspl=self.dbspl,
ramp_duration=2.5e-3,
pip_duration=self.pip_duration,
pip_start=self.pip_start,
)
self.vs_freq = self.fMod
if self.stimulus == "tone":
self.f0 = 1000.0
self.cf = 1000.0
self.stim = sound.TonePip(
rate=self.Fs,
duration=self.run_duration,
f0=self.f0,
dbspl=self.dbspl,
ramp_duration=2.5e-3,
pip_duration=self.pip_duration,
pip_start=self.pip_start,
)
self.vs_freq = self.f0
if self.stimulus == "clicks":
self.click_rate = 0.020 # msec
self.stim = sound.ClickTrain(
rate=self.Fs,
duration=self.run_duration,
dbspl=self.dbspl,
click_starts=np.linspace(
0.01,
self.run_duration - 0.01,
int((self.run_duration) / self.click_rate),
),
click_duration=100.0e-6,
# click_interval=self.click_rate, nclicks=int((self.run_duration-0.01)/self.click_rate),
ramp_duration=2.5e-3,
)
n_sgc = data.get(
"convergence", species=self.species, post_type=postCell.celltype, pre_type="sgc"
)[0]
self.n_sgc = int(np.round(n_sgc))
self.pre_cells = []
self.synapses = []
j = 0
for k in range(self.n_sgc):
seed = seed + k
preCell = cells.DummySGC(cf=self.cf, sr=2)
synapse = preCell.connect(postCell)
for i in range(synapse.terminal.n_rzones):
self["xmtr%03d" % j] = synapse.terminal.relsite._ref_XMTR[i]
j = j + 1
synapse.terminal.relsite.Dep_Flag = False
preCell.set_sound_stim(self.stim, seed=seed)
self.pre_cells.append(preCell)
self.synapses.append(synapse)
self["vm"] = postCell.soma(0.5)._ref_v
# self['prevm'] = preCell.soma(0.5)._ref_v
self["t"] = h._ref_t
postCell.cell_initialize()
h.tstop = 1e3 * self.run_duration # duration of a run
h.celsius = temp
h.dt = dt
self.custom_init()
h.run()
def window_spikes(self, spiketrain):
phasewin = [
self.pip_start[0] + 0.25 * self.pip_duration,
self.pip_start[0] + self.pip_duration,
]
spkin = spiketrain[np.where(spiketrain > phasewin[0] * 1e3)]
spikesinwin = spkin[np.where(spkin <= phasewin[1] * 1e3)]
return spikesinwin
def compute_vs(self):
self.post_spikes = PU.findspikes(self["t"], self["vm"], -30.0)
self.post_spikes_win = self.window_spikes(self.post_spikes)
self.an_spikes_win = self.window_spikes(self.pre_cells[0]._spiketrain) # just sample one...
# set freq for VS calculation
if self.stimulus == "tone":
f0 = self.f0
print(
"Tone: f0=%.3f at %3.1f dbSPL, cell CF=%.3f"
% (self.f0, self.dbspl, self.cf)
)
if self.stimulus == "SAM":
f0 = self.fMod
print(
(
"SAM Tone: f0=%.3f at %3.1f dbSPL, fMod=%3.1f dMod=%5.2f, cell CF=%.3f"
% (self.f0, self.dbspl, self.fMod, self.dMod, self.cf)
)
)
if self.stimulus == "clicks":
f0 = 1.0 / self.click_rate
print(
"Clicks: interval %.3f at %3.1f dbSPL, cell CF=%.3f "
% (self.click_rate, self.dbspl, self.cf)
)
self.an_vs = PU.vector_strength(self.an_spikes_win*1e-3, f0)
andiff = self.an_spikes_win*1e-3
print(
"AN Vector Strength at %.1f: %7.3f, d=%.2f (us) Rayleigh: %7.3f p = %.3e n = %d"
% (f0, self.an_vs["r"], self.an_vs["d"] * 1e6, self.an_vs["R"], self.an_vs["p"], self.an_vs["n"])
)
self.post_cell_vs = PU.vector_strength(self.post_spikes_win*1e-3, f0)
print(
"%s Vector Strength: %7.3f, d=%.2f (us) Rayleigh: %7.3f p = %.3e n = %d"
% (self.cell, self.post_cell_vs["r"], self.post_cell_vs["d"] * 1e6,
self.post_cell_vs["R"], self.post_cell_vs["p"], self.post_cell_vs["n"])
)
def show(self):
self.compute_vs()
self.win = pg.GraphicsWindow()
p1 = self.win.addPlot(title="stim", row=0, col=0)
p1.plot(self.stim.time * 1000, self.stim.sound)
p1.setXLink(p1)
p2 = self.win.addPlot(title="AN spikes", row=1, col=0)
vt = pg.VTickGroup(self.pre_cells[0]._spiketrain)
p2.addItem(vt)
p2.setXLink(p1)
p3 = self.win.addPlot(title="%s Spikes" % self.cell, row=2, col=0)
bspktick = pg.VTickGroup(self.post_spikes)
p3.addItem(bspktick)
p3.setXLink(p1)
p4 = self.win.addPlot(title="%s Vm" % self.cell, row=3, col=0)
p4.plot(self["t"], self["vm"])
p4.setXLink(p1)
p5 = self.win.addPlot(title="xmtr", row=0, col=1)
j = 0
for k in range(self.n_sgc):
synapse = self.synapses[k]
for i in range(synapse.terminal.n_rzones):
p5.plot(self["t"], self["xmtr%03d" % j], pen=(i, 15))
j = j + 1
p5.setXLink(p1)
p6 = self.win.addPlot(title="AN phase", row=1, col=1)
# phasewin = [
# self.pip_start[0] + 0.25 * self.pip_duration,
# self.pip_start[0] + self.pip_duration,
# ]
print("\nCell type: %s" % self.cell)
print("Stimulus: ")
(hist, binedges) = np.histogram(self.an_vs["ph"])
p6.plot(
binedges, hist, stepMode=True, fillBrush=(100, 100, 255, 150), fillLevel=0
)
p6.setXRange(0.0, 2 * np.pi)
p7 = self.win.addPlot(title="%s phase" % self.cell, row=2, col=1)
(hist, binedges) = np.histogram(self.post_cell_vs["ph"])
p7.plot(
binedges, hist, stepMode=True, fillBrush=(255, 100, 100, 150), fillLevel=0
)
p7.setXRange(0.0, 2 * np.pi)
p7.setXLink(p6)
self.win.show()
def main():
parser = argparse.ArgumentParser(description="test sgc input phaselocking")
parser.add_argument(
"-c",
"--celltype",
type=str,
choices=["bushy", "tstellate", "octopus", "dstellate"],
default="bushy",
help="cell type",
)
parser.add_argument(
"--species",
type=str,
choices=["guineapig", "mouse", "rat"],
default="mouse",
help="Species",
)
parser.add_argument(
"-S",
"--stimulus",
type=str,
choices=["tone", "SAM", "clicks",],
default="tone",
help="stimulus type",
)
parser.add_argument(
"--dB",
"--dBSPL",
type=float,
default=60.,
help="Sound pressure level, SPL",
)
parser.add_argument(
"--dmod",
type=float,
default=100.,
help="Modulation depth for SAM (percent)",
)
parser.add_argument(
"--fmod",
type=float,
default=200.0,
help="Modulation Frequency for SAM (Hz)",
)
parser.add_argument(
"--CF",
type=float,
default=16000.,
help="Carrier Frequency for SAM (Hz)",
)
parser.add_argument(
"--RI",
action="store_true",
default=False,
dest="RI",
help="Run Rate-intensity with these parameters",
)
args = parser.parse_args()
prot = SGCInputTestPL()
prot.set_cell(args.celltype)
if not args.RI:
prot.run(args) # stimulus=args.stimulus, species=args.species)
prot.show()
else:
an_vs = []
post_vs = []
dbrange = np.linspace(0, 70, 15)
for db in dbrange:
prot.run(args, dB=db)
prot.compute_vs()
an_vs.append(prot.an_vs["r"])
post_vs.append(prot.post_cell_vs["r"])
print(f" {'dB':3s} {'vsan':6s} {'vsbu':6s}")
for i, db in enumerate(dbrange):
print(f" {int(db):3d} {an_vs[i]:5.2f} {post_vs[i]:5.2f}")
import sys
if sys.flags.interactive == 0:
pg.QtGui.QApplication.exec_()
if __name__ == "__main__":
main()
|
# 성적 처리 프로그램 함수로 작성
# 총점 - getTotal()
# 평균 - getAverage()
# 학점 - getGrade()
print('-- 성적 처리 프로그램 v2 --')
name = input('이름을 입력하세요')
kor = int(input('국어점수를 입력하세요'))
eng = int(input('영어점수를 입력하세요'))
mat = int(input('수학점수를 입력하세요'))
# 함수만 선언하고 내용작성을 나중에 하고 싶을때 - pass 명령어 사용 (pass - dummy code)
def getTotal():
tot = kor + eng + mat
return tot #출력을 밖에서 할 수 있게 return 사용
def getAverage():
avg = getTotal()/3
return avg
def getGrade():
avg = getAverage()
grd = '가'
if avg >= 90:
grd ='수'
elif avg >= 80:
grd = '우'
elif avg >= 70:
grd = '미'
elif avg >= 60:
grd = '양'
return grd
fmt = '%s %d %d %d %d %.1f %s'
print(fmt % (name,kor,eng,mat,getTotal(),getAverage(),getGrade()))
|
"""
This module acts as a replacement to some of Python's `shutil` module where the blocking operations are done in a
gevent-friendly manner.
"""
from __future__ import absolute_import
import shutil as _shutil
from .deferred import create_threadpool_executed_func
CONSTS = ['Error']
DEFERRED_FUNCTIONS = [
'copyfile',
'copyfileobj',
'copymode',
'copystat',
'copy',
'copy2',
'copytree',
'move',
'rmtree'
]
PASSTHROUGH_FUNCTIONS = [
'ignore_patterns'
]
__all__ = CONSTS + DEFERRED_FUNCTIONS + PASSTHROUGH_FUNCTIONS
module = globals()
for name in DEFERRED_FUNCTIONS:
if name in _shutil.__dict__:
module[name] = create_threadpool_executed_func(_shutil.__dict__[name])
for name in CONSTS + PASSTHROUGH_FUNCTIONS:
if name in _shutil.__dict__:
module[name] = _shutil.__dict__[name]
|
from ... utils.utils import uppercase, strip_non_alphanumeric, convert_brew_number, lowercase
from .. regx import regex_inv
class LastBrew(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(
pattern='^[A-Z0-9]{4,4} [0-9]{5,5}$',
examples='Must Be: LLNN NNNNN',
)
@classmethod
def validate(cls, v):
if not isinstance(v, str):
raise TypeError('string required')
v = uppercase(v)
v = strip_non_alphanumeric(v)
v = convert_brew_number(v)
m = regex_inv.inv_last_brews_regex.fullmatch(v)
if not m:
raise ValueError(
'Must Be: LLNN NNNNN')
return cls(f'{m.group()}')
def __repr__(self):
return f'Role({super().__repr__()})'
class HopLot(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(
pattern='[^A-Za-z0-9]$',
examples='Must Be: Alphanumeric only',
)
@classmethod
def validate(cls, v):
if not isinstance(v, str):
raise TypeError('string required')
v = lowercase(v)
v = strip_non_alphanumeric(v)
m = regex_inv.inv_hop_lot_regex.fullmatch(v)
if not m:
raise ValueError(
'Must Be: Alphanumeric only')
return cls(f'{m.group()}')
def __repr__(self):
return f'Role({super().__repr__()})'
|
from pkgutil import extend_path
__path__= extend_path(__path__, __name__)
__all__=["msg","codec"]
|
import requests
import ntpath
import os
from typing import List, Dict, Union
from ytmusicapi.helpers import *
from ytmusicapi.parsers.library import *
from ytmusicapi.parsers.albums import *
from ytmusicapi.parsers.uploads import *
class UploadsMixin:
def get_library_upload_songs(self, limit: int = 25, order: str = None) -> List[Dict]:
"""
Returns a list of uploaded songs
:param limit: How many songs to return. Default: 25
:param order: Order of songs to return. Allowed values: 'a_to_z', 'z_to_a', 'recently_added'. Default: Default order.
:return: List of uploaded songs.
Each item is in the following format::
{
"entityId": "t_po_CICr2crg7OWpchDpjPjrBA",
"videoId": "Uise6RPKoek",
"artists": [{
'name': 'Coldplay',
'id': 'FEmusic_library_privately_owned_artist_detaila_po_CICr2crg7OWpchIIY29sZHBsYXk',
}],
"title": "A Sky Full Of Stars",
"album": "Ghost Stories",
"likeStatus": "LIKE",
"thumbnails": [...]
}
"""
self._check_auth()
endpoint = 'browse'
body = {"browseId": "FEmusic_library_privately_owned_tracks"}
validate_order_parameter(order)
if order is not None:
body["params"] = prepare_order_params(order)
response = self._send_request(endpoint, body)
results = find_object_by_key(nav(response, SINGLE_COLUMN_TAB + SECTION_LIST),
'itemSectionRenderer')
results = nav(results, ITEM_SECTION)
if 'musicShelfRenderer' not in results:
return []
else:
results = results['musicShelfRenderer']
songs = []
songs.extend(parse_uploaded_items(results['contents'][1:]))
if 'continuations' in results:
request_func = lambda additionalParams: self._send_request(
endpoint, body, additionalParams)
songs.extend(
get_continuations(results, 'musicShelfContinuation', limit - len(songs),
request_func, parse_uploaded_items))
return songs
def get_library_upload_albums(self, limit: int = 25, order: str = None) -> List[Dict]:
"""
Gets the albums of uploaded songs in the user's library.
:param limit: Number of albums to return. Default: 25
:param order: Order of albums to return. Allowed values: 'a_to_z', 'z_to_a', 'recently_added'. Default: Default order.
:return: List of albums as returned by :py:func:`get_library_albums`
"""
self._check_auth()
body = {'browseId': 'FEmusic_library_privately_owned_releases'}
validate_order_parameter(order)
if order is not None:
body["params"] = prepare_order_params(order)
endpoint = 'browse'
response = self._send_request(endpoint, body)
return parse_library_albums(
response,
lambda additionalParams: self._send_request(endpoint, body, additionalParams), limit)
def get_library_upload_artists(self, limit: int = 25, order: str = None) -> List[Dict]:
"""
Gets the artists of uploaded songs in the user's library.
:param limit: Number of artists to return. Default: 25
:param order: Order of artists to return. Allowed values: 'a_to_z', 'z_to_a', 'recently_added'. Default: Default order.
:return: List of artists as returned by :py:func:`get_library_artists`
"""
self._check_auth()
body = {'browseId': 'FEmusic_library_privately_owned_artists'}
validate_order_parameter(order)
if order is not None:
body["params"] = prepare_order_params(order)
endpoint = 'browse'
response = self._send_request(endpoint, body)
return parse_library_artists(
response,
lambda additionalParams: self._send_request(endpoint, body, additionalParams), limit)
def get_library_upload_artist(self, browseId: str, limit: int = 25) -> List[Dict]:
"""
Returns a list of uploaded tracks for the artist.
:param browseId: Browse id of the upload artist, i.e. from :py:func:`get_library_upload_songs`
:param limit: Number of songs to return (increments of 25).
:return: List of uploaded songs.
Example List::
[
{
"entityId": "t_po_CICr2crg7OWpchDKwoakAQ",
"videoId": "Dtffhy8WJgw",
"title": "Hold Me (Original Mix)",
"artists": [
{
"name": "Jakko",
"id": "FEmusic_library_privately_owned_artist_detaila_po_CICr2crg7OWpchIFamFra28"
}
],
"album": null,
"likeStatus": "LIKE",
"thumbnails": [...]
}
]
"""
self._check_auth()
body = {'browseId': browseId}
endpoint = 'browse'
response = self._send_request(endpoint, body)
results = nav(response, SINGLE_COLUMN_TAB + SECTION_LIST_ITEM + MUSIC_SHELF)
if len(results['contents']) > 1:
results['contents'].pop(0)
items = parse_uploaded_items(results['contents'])
if 'continuations' in results:
request_func = lambda additionalParams: self._send_request(
endpoint, body, additionalParams)
parse_func = lambda contents: parse_uploaded_items(contents)
items.extend(
get_continuations(results, 'musicShelfContinuation', limit, request_func,
parse_func))
return items
def get_library_upload_album(self, browseId: str) -> Dict:
"""
Get information and tracks of an album associated with uploaded tracks
:param browseId: Browse id of the upload album, i.e. from i.e. from :py:func:`get_library_upload_songs`
:return: Dictionary with title, description, artist and tracks.
Example album::
{
"title": "18 Months",
"type": "Album",
"thumbnails": [...],
"trackCount": 7,
"duration": "24 minutes",
"audioPlaylistId": "MLPRb_po_55chars",
"tracks": [
{
"entityId": "t_po_22chars",
"videoId": "FVo-UZoPygI",
"title": "Feel So Close",
"duration": "4:15",
"artists": None,
"album": {
"name": "18 Months",
"id": "FEmusic_library_privately_owned_release_detailb_po_55chars"
},
"likeStatus": "INDIFFERENT",
"thumbnails": None
},
"""
self._check_auth()
body = {'browseId': browseId}
endpoint = 'browse'
response = self._send_request(endpoint, body)
album = parse_album_header(response)
results = nav(response, SINGLE_COLUMN_TAB + SECTION_LIST_ITEM + MUSIC_SHELF)
album['tracks'] = parse_uploaded_items(results['contents'])
return album
def upload_song(self, filepath: str) -> Union[str, requests.Response]:
"""
Uploads a song to YouTube Music
:param filepath: Path to the music file (mp3, m4a, wma, flac or ogg)
:return: Status String or full response
"""
self._check_auth()
if not os.path.isfile(filepath):
raise Exception("The provided file does not exist.")
supported_filetypes = ["mp3", "m4a", "wma", "flac", "ogg"]
if os.path.splitext(filepath)[1][1:] not in supported_filetypes:
raise Exception(
"The provided file type is not supported by YouTube Music. Supported file types are "
+ ', '.join(supported_filetypes))
headers = self.headers.copy()
upload_url = "https://upload.youtube.com/upload/usermusic/http?authuser=%s" % headers['x-goog-authuser']
filesize = os.path.getsize(filepath)
body = ("filename=" + ntpath.basename(filepath)).encode('utf-8')
headers.pop('content-encoding', None)
headers['content-type'] = 'application/x-www-form-urlencoded;charset=utf-8'
headers['X-Goog-Upload-Command'] = 'start'
headers['X-Goog-Upload-Header-Content-Length'] = str(filesize)
headers['X-Goog-Upload-Protocol'] = 'resumable'
response = requests.post(upload_url, data=body, headers=headers, proxies=self.proxies)
headers['X-Goog-Upload-Command'] = 'upload, finalize'
headers['X-Goog-Upload-Offset'] = '0'
upload_url = response.headers['X-Goog-Upload-URL']
with open(filepath, 'rb') as file:
response = requests.post(upload_url, data=file, headers=headers, proxies=self.proxies)
if response.status_code == 200:
return 'STATUS_SUCCEEDED'
else:
return response
def delete_upload_entity(self, entityId: str) -> Union[str, Dict]: # pragma: no cover
"""
Deletes a previously uploaded song or album
:param entityId: The entity id of the uploaded song or album,
e.g. retrieved from :py:func:`get_library_upload_songs`
:return: Status String or error
"""
self._check_auth()
endpoint = 'music/delete_privately_owned_entity'
if 'FEmusic_library_privately_owned_release_detail' in entityId:
entityId = entityId.replace('FEmusic_library_privately_owned_release_detail', '')
body = {"entityId": entityId}
response = self._send_request(endpoint, body)
if 'error' not in response:
return 'STATUS_SUCCEEDED'
else:
return response['error']
|
################################################################################
#
# Copyright 2021-2022 Rocco Matano
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import argparse
from ctwin32 import (
ctypes,
user,
advapi,
HWND_BROADCAST,
WM_SETTINGCHANGE,
SMTO_NORMAL,
REG_SZ,
REG_EXPAND_SZ,
KEY_READ,
KEY_WRITE,
)
################################################################################
def env_var_root(system=False, access=KEY_READ):
if system:
pth = r"SYSTEM\CurrentControlSet\Control\Session Manager"
return advapi.RegOpenKeyEx(advapi.HKLM, pth, access)
else:
return advapi.HKCU
################################################################################
def env_var_key(root, access=KEY_READ):
return advapi.RegOpenKeyEx(root, "Environment", access)
################################################################################
def is_persistent_env_var(name, system=False):
with env_var_root(system) as root:
with env_var_key(root) as key:
result = False
try:
val, typ = advapi.RegQueryValueEx(key, name)
result = (typ in (REG_SZ, REG_EXPAND_SZ)) and bool(val)
except OSError:
pass
return result
################################################################################
def broadcast_env_change():
estr = ctypes.create_unicode_buffer("Environment")
user.SendMessageTimeout(
HWND_BROADCAST,
WM_SETTINGCHANGE,
0,
ctypes.addressof(estr),
SMTO_NORMAL,
500
)
################################################################################
def persist_env_var(name, value, system=False, do_broadcast=False):
access = KEY_WRITE | KEY_READ
with env_var_root(system, access) as root:
with env_var_key(root, access) as key:
if not value:
advapi.RegDeleteValue(key, name)
else:
advapi.reg_set_str(key, name, value)
if do_broadcast:
broadcast_env_change()
################################################################################
def persist_user_env_block(nv_dict, system=False):
for n, v in nv_dict.items():
persist_env_var(n, v, system, False)
broadcast_env_change()
################################################################################
def get_env_block(system=False):
result = {}
with env_var_root(system) as root:
with env_var_key(root, KEY_READ) as key:
for name, value, typ in advapi.reg_enum_values(key):
if typ in (REG_SZ, REG_EXPAND_SZ):
result[name] = value
return result
################################################################################
def parse_args():
ape = argparse.ArgumentParser(
description="set environment variables persistently (like setx)"
)
ape.add_argument(
"-s",
"--system",
action="store_true",
help="set system variable (as opposed to user variable)"
)
ape.add_argument(
"-v",
"--verbose",
action="store_true",
help="print final variables"
)
ape.add_argument("name", help="name of variable")
ape.add_argument(
"value",
help="value of variable (omitting it will delete the variable)",
nargs="?",
default="",
)
return ape.parse_args()
################################################################################
def main():
args = parse_args()
persist_env_var(args.name, args.value, args.system, True)
if args.verbose:
print(f"variables for {'system' if args.system else 'user'}:")
for name, value in get_env_block(args.system).items():
print(f" {name} = {value}")
################################################################################
if __name__ == "__main__":
main()
################################################################################
|
import cv2
import time
from modules import handtrackingmodule as htm
import math
import urllib
def main():
cam_width, cam_height = 640, 480
cap = cv2.VideoCapture("http://192.168.1.68:81/stream")
cap.set(3, cam_width)
cap.set(4, cam_height)
tracker = htm.HandTracker()
pr_time = 0
cur_time = 0
previous_time = 0
previous_locations = []
while True:
# get the image from the video capture
success, image = cap.read()
# getting the image from the tracker
image = tracker.find_hands(image, True)
landmarks = tracker.get_landmarks(image)
if len(landmarks) > 0:
# detect hand movement to the left
tracker.draw_landmark(image, 5)
if (cur_time - previous_time) > 0.1:
knuckles = landmarks[5]
print(knuckles[1], knuckles[2])
previous_locations.append([knuckles[1], knuckles[2]])
print(previous_locations)
if len(previous_locations) > 15:
previous_locations.pop(0)
delta_x = previous_locations[len(previous_locations) - 1][0] - previous_locations[0][0]
delta_y = previous_locations[len(previous_locations) - 1][1] - previous_locations[0][1]
findAction(previous_locations, cam_width, cam_height, delta_x, delta_y)
# print("delta_x: ", delta_x, "delta_y: ", delta_y)
# print(previous_time)
previous_time = cur_time
cur_time = time.time()
fps = 1 / (cur_time - pr_time)
pr_time = cur_time
# displaying the current fps
cv2.putText(image, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.imshow("Image", image)
cv2.waitKey(1)
def findAction(previous_locations_list, cam_w, cam_h, delta_x_, delta_y_):
"""Compute the net change in x and y coords to
determine which overall direction the motion went in.
Also, determine which quadrant this delta belongs to, thereby
finding the associated motion."""
# delta_x_ = previous_locations_list[len(previous_locations_list) - 1][0] - previous_locations_list[0][0]
# delta_y_ = previous_locations_list[len(previous_locations_list) - 1][1] - previous_locations_list[0][1]
if delta_x_ != 0:
theta_a = math.degrees(math.atan(delta_y_ / delta_x_))
if delta_y_ > 0 and delta_x_ != 0:
theta_ref = math.degrees(math.atan(cam_h / cam_w))
else:
theta_ref = math.degrees(math.atan(-cam_h / cam_w))
if delta_x_ != 0:
print("theta_a: ", theta_a)
print("theta_ref: ", theta_ref)
action = ""
# the action are flipped because the delta x and y are flipped on the camera
# such that the user get the right coordinates sent to the processing below.
# quadrant I of the trig circle
if delta_y_ > 0 and delta_x_ > 0:
print("Q1")
if 0 < theta_a < theta_ref:
action = "Play Next Song"
elif 0 < theta_ref < theta_a:
action = "Pause Song"
# quadrant II of the trig circle
if delta_y_ > 0 and delta_x_ < 0:
print("Q2")
if 0 > theta_a > theta_ref:
action = "Play Previous Song"
elif 0 > theta_ref > theta_a or theta_ref > 0 > theta_a:
action = "Pause Song"
# quadrant III of the trig circle
if delta_y_ < 0 and delta_x_ < 0:
print("Q3")
if 0 > theta_a > theta_ref or (theta_a > 0 and theta_ref < 0):
action = "Play Previous Song"
elif 0 > theta_ref > theta_a:
action = "Resume Song"
# quadrant IV of the trig circle
if delta_y_ < 0 and delta_x_ > 0:
print("Q4")
if 0 > theta_a > theta_ref:
action = "Play Next Song"
elif 0 > theta_ref > theta_a:
action = "Resume Song"
print("delta_x: ", delta_x_, "delta_y: ", delta_y_)
print(action)
# return action
if __name__ == "__main__":
# downward motion – pause
# prev = [[367, 29], [387, 80], [400, 192], [203, 8], [177, 6], [206, 115], [207, 229], [198, 270], [188, 322], [196, 465], [131, 78], [135, 133], [120, 239], [100, 278], [364, 183], [362, 324], [373, 352], [382, 427], [446, 240], [447, 274], [451, 348]]
# rightward motion – play next
# prev = [[336, 301], [382, 257], [390, 250], [409, 233], [422, 224], [446, 212], [450, 215], [451, 214], [457, 210], [467, 212], [9, 214], [36, 242], [134, 262], [203, 265], [355, 277], [535, 270], [620, 272], [101, 176], [265, 241], [405, 253], [479, 260]]
# upward motion – resume
# prev = [[407, 421], [283, 255], [286, 180], [302, 39], [189, 55], [141, 252], [368, 396], [372, 276], [391, 195], [398, 116], [397, 76], [380, 3], [339, 423], [337, 361], [372, 93], [375, 32]]
# leftward motion – play previous song
# prev = [[444, 369], [444, 369], [444, 370], [446, 370], [445, 367], [447, 369], [448, 368], [447, 368], [446, 367], [506, 223], [419, 222], [354, 234], [305, 245], [209, 246], [135, 234], [565, 297], [510, 281], [437, 279], [343, 270], [200, 250], [16, 239]]
# findAction(prev, 640, 480)
main()
|
from flask import render_template, current_app
from flask_babel import _
from app.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[Naked Wrestling Club] Reset Your Password'),
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
from sklearn.model_selection import train_test_split
data_folder="data_new/"
train_ratio=0.8
test_ratio=0.1
validation_ratio=0.1
titles = []
titles_file = open(data_folder+"titles_all.txt", "r")
titles = titles_file.read().splitlines()
stanzas = []
stanzas_file = open(data_folder+"stanzas_all.txt", "r")
stanzas = stanzas_file.read().splitlines()
emotion_arcs = []
emotion_arcs_file = open(data_folder+"emo_all.txt", "r")
emotion_arcs = emotion_arcs_file.read().splitlines()
title_train = open(data_folder + "train_x1.txt", "w")
title_dev = open(data_folder + "dev_x1.txt", "w")
title_test = open(data_folder + "test_x1.txt", "w")
poem_train = open(data_folder + "train_x4.txt", "w")
poem_dev = open(data_folder + "dev_x4.txt", "w")
poem_test = open(data_folder + "test_x4.txt", "w")
emotion_train = open(data_folder + "train_mapped.txt", "w")
emotion_dev = open(data_folder + "dev_mapped.txt", "w")
emotion_test = open(data_folder + "test_mapped.txt", "w")
title_train_list, title_test_list, poem_train_list, poem_test_list, emotion_train_list, emotion_test_list = train_test_split(titles, stanzas, emotion_arcs, test_size=1 - train_ratio)
title_dev_list, title_test_list, poem_dev_list, poem_test_list, emotion_dev_list, emotion_test_list, = train_test_split(title_test_list, poem_test_list, emotion_test_list, test_size=test_ratio / (test_ratio + validation_ratio))
print("Split:")
print(len(title_train_list), len(title_test_list), len(title_dev_list))
title_train.write("\n".join(title_train_list))
title_train.write("\n")
title_train.close()
title_dev.write("\n".join(title_dev_list))
title_dev.write("\n")
title_dev.close()
title_test.write("\n".join(title_test_list))
title_test.write("\n")
title_test.close()
poem_train.write(" <|endoftext|>\n".join(poem_train_list))
poem_train.write(" <|endoftext|>\n")
poem_train.close()
poem_dev.write(" <|endoftext|>\n".join(poem_dev_list))
poem_dev.write(" <|endoftext|>\n")
poem_dev.close()
poem_test.write(" <|endoftext|>\n".join(poem_test_list))
poem_test.write(" <|endoftext|>\n")
poem_test.close()
emotion_train.write("\n".join(emotion_train_list))
emotion_train.write("\n")
emotion_train.close()
emotion_dev.write("\n".join(emotion_dev_list))
emotion_dev.write("\n")
emotion_dev.close()
emotion_test.write("\n".join(emotion_test_list))
emotion_test.write("\n")
emotion_test.close()
stanzas_file.close()
titles_file.close()
emotion_arcs_file.close()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.tests.helper import pytest
from astropy.coordinates import SkyCoord, Angle
from regions import CircleSkyRegion
from ...datasets import gammapy_extra
from ...data import ObservationList
from ...image import SkyMask
from ...utils.testing import data_manager, requires_dependency, requires_data, run_cli
from ...spectrum.results import SpectrumFitResult
from ...spectrum import SpectrumExtraction
from ..spectrum import cli
@pytest.mark.xfail(reason="Command line tool broken")
@requires_dependency('sherpa')
@requires_dependency('matplotlib')
@requires_data('gammapy-extra')
def test_spectrum_cmd(tmpdir):
# FIXME
os.chdir(str(tmpdir))
configfile = gammapy_extra.filename(
'test_datasets/spectrum/spectrum_analysis_example.yaml')
args = ['all', configfile]
run_cli(cli, args)
sref = gammapy_extra.filename(
'test_datasets/spectrum/total_spectrum_stats_reference.yaml')
sres = 'total_spectrum_stats.yaml'
fref = gammapy_extra.filename(
'test_datasets/spectrum/fit_result_PowerLaw_reference.yaml')
fres = 'fit_result_PowerLaw.yaml'
#actual = SpectrumStats.from_yaml(sres)
#desired = SpectrumStats.from_yaml(sref)
#assert str(actual.to_table(format='.3g')) == str(desired.to_table(format='.3g'))
actual = SpectrumFitResult.from_yaml(fres)
desired = SpectrumFitResult.from_yaml(fref)
print('TEMPDIR', tmpdir)
assert str(actual.to_table(format='.3g')) == str(desired.to_table(format='.3g'))
@requires_dependency('sherpa')
@requires_dependency('matplotlib')
@requires_data('gammapy-extra')
def test_spectrum(tmpdir, data_manager):
# Minimal version executing all steps
# This could go into a script accessible to the user and/or an example
store = data_manager['hess-crab4-hd-hap-prod2']
obs_id = [23523, 23592]
obs = ObservationList([store.obs(_) for _ in obs_id])
center = SkyCoord(83.63, 22.01, unit='deg', frame='icrs')
radius = Angle('0.3 deg')
on_region = CircleSkyRegion(center, radius)
exclusion = SkyMask.read('$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')
bkg_method = dict(method='reflected', exclusion=exclusion)
extraction = SpectrumExtraction(target=on_region, obs=obs, background=bkg_method)
extraction.run(outdir=tmpdir)
|
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class SampleUpdateRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'file_name': 'str',
'thumb_name': 'str',
'exif': 'dict(str, object)',
'meta_data': 'SampleMetaData',
'custom_meta_data': 'CustomSampleMetaData'
}
attribute_map = {
'file_name': 'fileName',
'thumb_name': 'thumbName',
'exif': 'exif',
'meta_data': 'metaData',
'custom_meta_data': 'customMetaData'
}
def __init__(self, file_name=None, thumb_name=None, exif=None, meta_data=None, custom_meta_data=None, _configuration=None): # noqa: E501
"""SampleUpdateRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._file_name = None
self._thumb_name = None
self._exif = None
self._meta_data = None
self._custom_meta_data = None
self.discriminator = None
if file_name is not None:
self.file_name = file_name
if thumb_name is not None:
self.thumb_name = thumb_name
if exif is not None:
self.exif = exif
if meta_data is not None:
self.meta_data = meta_data
if custom_meta_data is not None:
self.custom_meta_data = custom_meta_data
@property
def file_name(self):
"""Gets the file_name of this SampleUpdateRequest. # noqa: E501
:return: The file_name of this SampleUpdateRequest. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this SampleUpdateRequest.
:param file_name: The file_name of this SampleUpdateRequest. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def thumb_name(self):
"""Gets the thumb_name of this SampleUpdateRequest. # noqa: E501
:return: The thumb_name of this SampleUpdateRequest. # noqa: E501
:rtype: str
"""
return self._thumb_name
@thumb_name.setter
def thumb_name(self, thumb_name):
"""Sets the thumb_name of this SampleUpdateRequest.
:param thumb_name: The thumb_name of this SampleUpdateRequest. # noqa: E501
:type: str
"""
self._thumb_name = thumb_name
@property
def exif(self):
"""Gets the exif of this SampleUpdateRequest. # noqa: E501
:return: The exif of this SampleUpdateRequest. # noqa: E501
:rtype: dict(str, object)
"""
return self._exif
@exif.setter
def exif(self, exif):
"""Sets the exif of this SampleUpdateRequest.
:param exif: The exif of this SampleUpdateRequest. # noqa: E501
:type: dict(str, object)
"""
self._exif = exif
@property
def meta_data(self):
"""Gets the meta_data of this SampleUpdateRequest. # noqa: E501
:return: The meta_data of this SampleUpdateRequest. # noqa: E501
:rtype: SampleMetaData
"""
return self._meta_data
@meta_data.setter
def meta_data(self, meta_data):
"""Sets the meta_data of this SampleUpdateRequest.
:param meta_data: The meta_data of this SampleUpdateRequest. # noqa: E501
:type: SampleMetaData
"""
self._meta_data = meta_data
@property
def custom_meta_data(self):
"""Gets the custom_meta_data of this SampleUpdateRequest. # noqa: E501
:return: The custom_meta_data of this SampleUpdateRequest. # noqa: E501
:rtype: CustomSampleMetaData
"""
return self._custom_meta_data
@custom_meta_data.setter
def custom_meta_data(self, custom_meta_data):
"""Sets the custom_meta_data of this SampleUpdateRequest.
:param custom_meta_data: The custom_meta_data of this SampleUpdateRequest. # noqa: E501
:type: CustomSampleMetaData
"""
self._custom_meta_data = custom_meta_data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SampleUpdateRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SampleUpdateRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SampleUpdateRequest):
return True
return self.to_dict() != other.to_dict()
|
import torch as t
from .quantizer import Quantizer
def grad_scale(x, scale):
y = x
y_grad = x * scale
return (y - y_grad).detach() + y_grad
def round_pass(x):
y = x.round()
y_grad = x
return (y - y_grad).detach() + y_grad
class LsqQuan(Quantizer):
def __init__(self, bit, all_positive=False, symmetric=False, per_channel=True):
super().__init__(bit)
if all_positive:
assert not symmetric, "Positive quantization cannot be symmetric"
# unsigned activation is quantized to [0, 2^b-1]
self.thd_neg = 0
self.thd_pos = 2 ** bit - 1
else:
if symmetric:
# signed weight/activation is quantized to [-2^(b-1)+1, 2^(b-1)-1]
self.thd_neg = - 2 ** (bit - 1) + 1
self.thd_pos = 2 ** (bit - 1) - 1
else:
# signed weight/activation is quantized to [-2^(b-1), 2^(b-1)-1]
self.thd_neg = - 2 ** (bit - 1)
self.thd_pos = 2 ** (bit - 1) - 1
self.per_channel = per_channel
self.s = t.nn.Parameter(t.ones(1)*0.3)
def init_from(self, x, *args, **kwargs):
if self.per_channel:
self.s = t.nn.Parameter(
x.detach().abs().mean(dim=list(range(1, x.dim())), keepdim=True) * 2 / (self.thd_pos ** 0.5))
else:
self.s = t.nn.Parameter(x.detach().abs().mean() * 2 / (self.thd_pos ** 0.5))
def forward(self, x):
if self.per_channel:
s_grad_scale = 1.0 / ((self.thd_pos * x.numel()) ** 0.5)
else:
s_grad_scale = 1.0 / ((self.thd_pos * x.numel()) ** 0.5)
s_scale = grad_scale(self.s, s_grad_scale)
x = x / s_scale
x = t.clamp(x, self.thd_neg, self.thd_pos)
x = round_pass(x)
x = x * s_scale
return x
|
import pytest
import tensorflow as tf
from libreco.algorithms import YouTubeRanking
from tests.utils_data import prepare_feat_data
from tests.utils_reco import recommend_in_former_consumed
@pytest.mark.parametrize("task", ["rating", "ranking"])
@pytest.mark.parametrize(
"lr_decay, reg, num_neg, use_bn, dropout_rate, hidden_units, recent_num", [
(False, None, 1, False, None, "128,64,32", 10),
(True, 0.001, 3, True, 0.5, "1,1,1", 6)
]
)
def test_youtube_ranking(
prepare_feat_data,
task,
lr_decay,
reg,
num_neg,
use_bn,
dropout_rate,
hidden_units,
recent_num
):
tf.compat.v1.reset_default_graph()
pd_data, train_data, eval_data, data_info = prepare_feat_data
if task == "ranking":
train_data.build_negative_samples(data_info, item_gen_mode="random",
num_neg=1, seed=2022)
eval_data.build_negative_samples(data_info, item_gen_mode="random",
num_neg=1, seed=2222)
metrics = (
["rmse", "mae", "r2"]
if task == "rating"
else ["roc_auc", "precision", "ndcg"]
)
model = YouTubeRanking(
task=task,
data_info=data_info,
embed_size=16,
n_epochs=2,
lr=1e-4,
lr_decay=lr_decay,
reg=reg,
batch_size=256,
num_neg=num_neg,
use_bn=use_bn,
dropout_rate=dropout_rate,
hidden_units=hidden_units,
recent_num=recent_num,
tf_sess_config=None
)
if task == "rating":
with pytest.raises(AssertionError):
model.fit(
train_data,
verbose=2,
shuffle=True,
eval_data=eval_data,
metrics=metrics
)
else:
model.fit(
train_data,
verbose=2,
shuffle=True,
eval_data=eval_data,
metrics=metrics
)
pred = model.predict(user=1, item=2333)
# prediction in range
if task == "rating":
assert 1 <= pred <= 5
else:
assert 0 <= pred <= 1
cold_pred1 = model.predict(user="cold user1", item="cold item2")
cold_pred2 = model.predict(user="cold user2", item="cold item2")
assert cold_pred1 == cold_pred2
assert len(model.predict_data_with_feats(pd_data[:5])) == 5
# cold start strategy
with pytest.raises(ValueError):
model.recommend_user(user=-99999, n_rec=7, cold_start="sss")
# different recommendation for different users
reco_take_one = [i[0] for i in model.recommend_user(user=1, n_rec=7)]
reco_take_two = [i[0] for i in model.recommend_user(user=2, n_rec=7)]
assert len(reco_take_one) == len(reco_take_two) == 7
# assert reco_take_one != reco_take_two
assert not recommend_in_former_consumed(data_info, reco_take_one, 1)
assert not recommend_in_former_consumed(data_info, reco_take_two, 2)
cold_reco1 = model.recommend_user(user=-99999, n_rec=3)
cold_reco2 = model.recommend_user(user=-1, n_rec=3)
assert cold_reco1 == cold_reco2
|
### decision trees and tree search
### first version is just a binary tree
class binaryTree(object):
def __init__(self, value):
self.value = value
self.leftBranch = None
self.rightBranch = None
self.parent = None
def setLeftBranch(self, node):
self.leftBranch = node
def setRightBranch(self, node):
self.rightBranch = node
def setParent(self, parent):
self.parent = parent
def getValue(self):
return self.value
def getLeftBranch(self):
return self.leftBranch
def getRightBranch(self):
return self.rightBranch
def getParent(self):
return self.parent
def __str__(self):
return self.value
def DFSBinary(root, fcn):
queue = [root]
while len(queue) > 0:
print 'at node ' + str(queue[0].getValue())
if fcn(queue[0]):
return True
else:
temp = queue.pop(0)
if temp.getRightBranch():
queue.insert(0, temp.getRightBranch())
if temp.getLeftBranch():
queue.insert(0, temp.getLeftBranch())
return False
def BFSBinary(root, fcn):
queue = [root]
while len(queue) > 0:
print 'at node ' + str(queue[0].getValue())
if fcn(queue[0]):
return True
else:
temp = queue.pop(0)
if temp.getLeftBranch():
queue.append(temp.getLeftBranch())
if temp.getRightBranch():
queue.append(temp.getRightBranch())
return False
def DFSBinaryOrdered(root, fcn, ltFcn):
queue = [root]
while len(queue) > 0:
if fcn(queue[0]):
return True
elif ltFcn(queue[0]):
temp = queue.pop(0)
if temp.getLeftBranch():
queue.insert(0, temp.getLeftBranch())
else:
temp = queue.pop(0)
if temp.getRightBranch():
queue.insert(0, temp.getRightBranch())
return False
n5 = binaryTree(5)
n2 = binaryTree(2)
n1 = binaryTree(1)
n4 = binaryTree(4)
n8 = binaryTree(8)
n6 = binaryTree(6)
n7 = binaryTree(7)
n3 = binaryTree(3)
n5.setLeftBranch(n2)
n2.setParent(n5)
n5.setRightBranch(n8)
n8.setParent(n5)
n2.setLeftBranch(n1)
n1.setParent(n2)
n2.setRightBranch(n4)
n4.setParent(n2)
n8.setLeftBranch(n6)
n6.setParent(n8)
n6.setRightBranch(n7)
n7.setParent(n6)
n4.setLeftBranch(n3)
n3.setParent(n4)
def find6(node):
return node.getValue() == 6
def find10(node):
return node.getValue() == 10
def find2(node):
return node.getValue() == 2
def lt6(node):
return node.getValue() > 6
# test examples
print 'DFS'
DFSBinary(n5, find6)
print ''
print 'BFS'
BFSBinary(n5, find6)
## if we wanted to return the path that got to the goal, would need to modify
def DFSBinaryPath(root, fcn):
queue = [root]
while len(queue) > 0:
if fcn(queue[0]):
return TracePath(queue[0])
else:
temp = queue.pop(0)
if temp.getRightBranch():
queue.insert(0, temp.getRightBranch())
if temp.getLeftBranch():
queue.insert(0, temp.getLeftBranch())
return False
def TracePath(node):
if not node.getParent():
return [node]
else:
return [node] + TracePath(node.getParent())
print''
print 'DFS path'
pathTo6 = DFSBinaryPath(n5, find6)
print [e.getValue() for e in pathTo6]
## make a decision tree
## for efficiency should really generate on the fly, but here will build
## and then search
def buildDTree(sofar, todo):
if len(todo) == 0:
return binaryTree(sofar)
else:
withelt = buildDTree(sofar + [todo[0]], todo[1:])
withoutelt = buildDTree(sofar, todo[1:])
here = binaryTree(sofar)
here.setLeftBranch(withelt)
here.setRightBranch(withoutelt)
return here
def DFSDTree(root, valueFcn, constraintFcn):
queue = [root]
best = None
visited = 0
while len(queue) > 0:
visited += 1
if constraintFcn(queue[0].getValue()):
if best == None:
best = queue[0]
print best.getValue()
elif valueFcn(queue[0].getValue()) > valueFcn(best.getValue()):
best = queue[0]
print best.getValue()
temp = queue.pop(0)
if temp.getRightBranch():
queue.insert(0, temp.getRightBranch())
if temp.getLeftBranch():
queue.insert(0, temp.getLeftBranch())
else:
queue.pop(0)
print 'visited', visited
return best
def BFSDTree(root, valueFcn, constraintFcn):
queue = [root]
best = None
visited = 0
while len(queue) > 0:
visited += 1
if constraintFcn(queue[0].getValue()):
if best == None:
best = queue[0]
print best.getValue()
elif valueFcn(queue[0].getValue()) > valueFcn(best.getValue()):
best = queue[0]
print best.getValue()
temp = queue.pop(0)
if temp.getLeftBranch():
queue.append(temp.getLeftBranch())
if temp.getRightBranch():
queue.append(temp.getRightBranch())
else:
queue.pop(0)
print 'visited', visited
return best
a = [6,3]
b = [7,2]
c = [8,4]
d = [9,5]
treeTest = buildDTree([], [a,b,c,d])
def sumValues(lst):
vals = [e[0] for e in lst]
return sum(vals)
def sumWeights(lst):
wts = [e[1] for e in lst]
return sum(wts)
def WeightsBelow10(lst):
return sumWeights(lst) <= 10
def WeightsBelow6(lst):
return sumWeights(lst) <= 6
print ''
print 'DFS decision tree'
foobar = DFSDTree(treeTest, sumValues, WeightsBelow10)
print foobar.getValue()
print ''
print 'BFS decision tree'
foobarnew = BFSDTree(treeTest, sumValues, WeightsBelow10)
print foobarnew.getValue()
def DFSDTreeGoodEnough(root, valueFcn, constraintFcn, stopFcn):
stack = [root]
best = None
visited = 0
while len(stack) > 0:
visited += 1
if constraintFcn(stack[0].getValue()):
if best == None:
best = stack[0]
print best.getValue()
elif valueFcn(stack[0].getValue()) > valueFcn(best.getValue()):
best = stack[0]
print best.getValue()
if stopFcn(best.getValue()):
print 'visited', visited
return best
temp = stack.pop(0)
if temp.getRightBranch():
stack.insert(0, temp.getRightBranch())
if temp.getLeftBranch():
stack.insert(0, temp.getLeftBranch())
else:
stack.pop(0)
print 'visited', visited
return best
def BFSDTreeGoodEnough(root, valueFcn, constraintFcn, stopFcn):
queue = [root]
best = None
visited = 0
while len(queue) > 0:
visited += 1
if constraintFcn(queue[0].getValue()):
if best == None:
best = queue[0]
print best.getValue()
elif valueFcn(queue[0].getValue()) > valueFcn(best.getValue()):
best = queue[0]
print best.getValue()
if stopFcn(best.getValue()):
print 'visited', visited
return best
temp = queue.pop(0)
if temp.getLeftBranch():
queue.append(temp.getLeftBranch())
if temp.getRightBranch():
queue.append(temp.getRightBranch())
else:
queue.pop(0)
print 'visited', visited
return best
def atLeast15(lst):
return sumValues(lst) >= 15
print ''
print 'DFS decision tree good enough'
foobar = DFSDTreeGoodEnough(treeTest, sumValues, WeightsBelow10,
atLeast15)
print foobar.getValue()
print ''
print 'BFS decision tree good enough'
foobarnew = BFSDTreeGoodEnough(treeTest, sumValues, WeightsBelow10,
atLeast15)
print foobarnew.getValue()
def DTImplicit(toConsider, avail):
if toConsider == [] or avail == 0:
result = (0, ())
elif toConsider[0][1] > avail:
result = DTImplicit(toConsider[1:], avail)
else:
nextItem = toConsider[0]
withVal, withToTake = DTImplicit(toConsider[1:], avail - nextItem[1])
withVal += nextItem[0]
withoutVal, withoutToTake = DTImplicit(toConsider[1:], avail)
if withVal > withoutVal:
result = (withVal, withToTake + (nextItem,))
else:
result = (withoutVal, withoutToTake)
return result
stuff = [a,b,c,d]
val, taken = DTImplicit(stuff, 10)
print ''
print 'implicit decision search'
print 'value of stuff'
print val
print 'actual stuff'
print taken
def DFSBinaryNoLoop(root, fcn):
queue = [root]
seen = []
while len(queue) > 0:
print 'at node ' + str(queue[0].getValue())
if fcn(queue[0]):
return True
else:
temp = queue.pop(0)
seen.append(temp)
if temp.getRightBranch():
if not temp.getRightBranch() in seen:
queue.insert(0, temp.getRightBranch())
if temp.getLeftBranch():
if not temp.getLeftBranch() in seen:
queue.insert(0, temp.getLeftBranch())
return False
##comment out
n3.setLeftBranch(n5)
n5.setParent(n3)
# run DFSBinary(n5, find6)
|
from datetime import date, datetime, time, timedelta, tzinfo
import operator
from typing import Optional
import warnings
import numpy as np
from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib
from pandas._libs.tslibs import Resolution, parsing, timezones, to_offset
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_scalar,
)
from pandas.core.dtypes.missing import is_valid_nat_for_dtype
from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
def _new_DatetimeIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
if not isinstance(data, DatetimeArray):
# For backward compat with older pickles, we may need to construct
# a DatetimeArray to adapt to the newer _simple_new signature
tz = d.pop("tz")
freq = d.pop("freq")
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
else:
dta = data
for key in ["tz", "freq"]:
# These are already stored in our DatetimeArray; if they are
# also in the pickle and don't match, we have a problem.
if key in d:
assert d.pop(key) == getattr(dta, key)
result = cls._simple_new(dta, **d)
else:
with warnings.catch_warnings():
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, **d)
return result
@inherit_names(
["to_perioddelta", "to_julian_date", "strftime", "isocalendar"]
+ DatetimeArray._field_ops
+ [
method
for method in DatetimeArray._datetimelike_methods
if method not in ("tz_localize",)
],
DatetimeArray,
wrap=True,
)
@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"tz",
"tzinfo",
"dtype",
"to_pydatetime",
"_has_same_tz",
"_format_native_types",
"date",
"time",
"timetz",
]
+ DatetimeArray._bool_ops,
DatetimeArray,
)
class DatetimeIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray-like of datetime64 data.
Represented internally as int64, and which can be boxed to Timestamp objects
that are subclasses of datetime and carry metadata.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
Set the Timezone of the data.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
closed : {'left', 'right'}, optional
Set whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
dtype : numpy.dtype or DatetimeTZDtype or str, default None
Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
copy : bool, default False
Make a copy of input ndarray.
name : label, default None
Name to be stored in the index.
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
timetz
dayofyear
weekofyear
week
dayofweek
weekday
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_period
to_perioddelta
to_pydatetime
to_series
to_frame
month_name
day_name
mean
See Also
--------
Index : The base pandas Index type.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
to_datetime : Convert argument to datetime.
date_range : Create a fixed-frequency DatetimeIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "datetimeindex"
_engine_type = libindex.DatetimeEngine
_supports_partial_string_indexing = True
_comparables = ["name", "freqstr", "tz"]
_attributes = ["name", "tz", "freq"]
_is_numeric_dtype = False
_data: DatetimeArray
tz: Optional[tzinfo]
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in DatetimeIndex
@doc(DatetimeArray.tz_localize)
def tz_localize(
self, tz, ambiguous="raise", nonexistent="raise"
) -> "DatetimeIndex":
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_period)
def to_period(self, freq=None) -> "DatetimeIndex":
arr = self._data.to_period(freq)
return type(self)._simple_new(arr, name=self.name)
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq=lib.no_default,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name)
return subarr
@classmethod
def _simple_new(cls, values: DatetimeArray, name: Label = None):
assert isinstance(values, DatetimeArray), type(values)
result = object.__new__(cls)
result._data = values
result.name = name
result._cache = {}
result._no_setting_name = False
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _is_dates_only(self) -> bool:
"""
Return a boolean if we are only dates (and don't have a timezone)
Returns
-------
bool
"""
from pandas.io.formats.format import _is_dates_only
return self.tz is None and _is_dates_only(self._values)
def __reduce__(self):
# we use a special reduce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (type(self), d), None
def _convert_for_op(self, value):
"""
Convert value to be insertable to ndarray.
"""
if self._has_same_tz(value):
return Timestamp(value).asm8
raise ValueError("Passed item and index have different timezone")
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not is_datetime64_any_dtype(dtype):
return False
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: f"'{formatter(x, tz=self.tz)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes.
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
this = Index.union(this, other)
return this
# --------------------------------------------------------------------
def _get_time_micros(self):
"""
Return the number of microseconds since midnight.
Returns
-------
ndarray[int64_t]
"""
values = self.asi8
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._data._local_timestamps()
nanos = values % (24 * 3600 * 1_000_000_000)
micros = nanos // 1000
micros[self._isnan] = -1
return micros
def to_series(self, keep_tz=lib.no_default, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index.
Parameters
----------
keep_tz : optional, defaults True
Return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
.. versionchanged:: 1.0.0
The default value is now True. In a future version,
this keyword will be removed entirely. Stop passing the
argument to obtain the future behavior and silence the warning.
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
if keep_tz is not lib.no_default:
if keep_tz:
warnings.warn(
"The 'keep_tz' keyword in DatetimeIndex.to_series "
"is deprecated and will be removed in a future version. "
"You can stop passing 'keep_tz' to silence this warning.",
FutureWarning,
stacklevel=2,
)
else:
warnings.warn(
"Specifying 'keep_tz=False' is deprecated and this "
"option will be removed in a future release. If "
"you want to remove the timezone information, you "
"can do 'idx.tz_convert(None)' before calling "
"'to_series'.",
FutureWarning,
stacklevel=2,
)
else:
keep_tz = True
if keep_tz and self.tz is not None:
# preserve the tz & copy
values = self.copy(deep=True)
else:
values = self._values.view("M8[ns]").copy()
return Series(values, index=index, name=name)
def snap(self, freq="S"):
"""
Snap time stamps to nearest occurring frequency.
Returns
-------
DatetimeIndex
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=DT64NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
dta = DatetimeArray(snapped, dtype=self.dtype)
return DatetimeIndex._simple_new(dta, name=self.name)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : str
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
if (
self.is_monotonic
and reso.attrname in ["day", "hour", "minute", "second"]
and self._resolution_obj >= reso
):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
# See also GH14826
raise KeyError
if reso == "microsecond":
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if not is_scalar(key):
raise InvalidIndexError(key)
orig_key = key
if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, str):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError, OverflowError):
pass
try:
key = self._maybe_cast_for_get_loc(key)
except ValueError as err:
raise KeyError(key) from err
elif isinstance(key, timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, time):
if method is not None:
raise NotImplementedError(
"cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes
key = Timestamp(key)
if key.tzinfo is None:
key = key.tz_localize(self.tz)
else:
key = key.tz_convert(self.tz)
return key
def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(label, freq)
reso = Resolution.from_attrname(reso)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == "left" else lower
return lower if side == "left" else upper
else:
return label
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(key, freq)
reso = Resolution.from_attrname(reso)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# --------------------------------------------------------------------
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "datetime"
@property
def inferred_type(self) -> str:
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return "datetime64"
def indexer_at_time(self, time, asof=False):
"""
Return index locations of values at particular time of day
(e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
Time passed in either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
Returns
-------
values_at_time : array of integers
See Also
--------
indexer_between_time : Get index locations of values between particular
times of day.
DataFrame.at_time : Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start=True, include_end=True
):
"""
Return index locations of values between particular times of day
(e.g., 9:00-9:30AM).
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
include_end : bool, default True
Returns
-------
values_between_time : array of integers
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
closed=closed,
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, name=name)
def bdate_range(
start=None,
end=None,
periods=None,
freq="B",
tz=None,
normalize=True,
name=None,
weekmask=None,
holidays=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency.
Parameters
----------
start : str or datetime-like, default None
Left bound for generating dates.
end : str or datetime-like, default None
Right bound for generating dates.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'.
tz : str or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
weekmask : str or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
holidays : list-like or None, default None
Dates to exclude from the set of valid business days, passed to
``numpy.busdaycalendar``, only used when custom frequency strings
are passed.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
DatetimeIndex
Notes
-----
Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. Specifying ``freq`` is a requirement
for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
desired.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Note how the two weekend days are skipped in the result.
>>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-08'],
dtype='datetime64[ns]', freq='B')
"""
if freq is None:
msg = "freq must be specified for bdate_range; use date_range instead"
raise TypeError(msg)
if isinstance(freq, str) and freq.startswith("C"):
try:
weekmask = weekmask or "Mon Tue Wed Thu Fri"
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError) as err:
msg = f"invalid custom frequency string: {freq}"
raise ValueError(msg) from err
elif holidays or weekmask:
msg = (
"a custom frequency string is required when holidays or "
f"weekmask are passed, got frequency {freq}"
)
raise ValueError(msg)
return date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs,
)
def _time_to_micros(time_obj: time) -> int:
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
return 1_000_000 * seconds + time_obj.microsecond
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the LAPAX linear algebra module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import itertools
import unittest
import numpy as onp
import scipy as osp
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.lib
from jax import jit, grad, jvp, vmap
from jax import lax
from jax import lax_linalg
from jax import numpy as np
from jax import scipy as jsp
from jax import test_util as jtu
from jax.lib import xla_bridge
from jax.lib import lapack
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
T = lambda x: onp.swapaxes(x, -1, -2)
float_types = [onp.float32, onp.float64]
complex_types = [onp.complex64, onp.complex128]
def _skip_if_unsupported_type(dtype):
dtype = onp.dtype(dtype)
if (not FLAGS.jax_enable_x64 and
dtype in (onp.dtype('float64'), onp.dtype('complex128'))):
raise unittest.SkipTest("--jax_enable_x64 is not set")
class NumpyLinalgTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (1000, 0, 0)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testCholesky(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
def args_maker():
factor_shape = shape[:-1] + (2 * shape[-1],)
a = rng(factor_shape, dtype)
return [onp.matmul(a, np.conj(T(a)))]
if (np.issubdtype(dtype, np.complexfloating) and
(jtu.device_under_test() == "tpu" or
(jtu.device_under_test() == "cpu" and jax.lib.version < (0, 1, 38)))):
self.skipTest("Unimplemented case for complex Cholesky decomposition.")
self._CheckAgainstNumpy(onp.linalg.cholesky, np.linalg.cholesky, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.cholesky, args_maker, check_dtypes=True)
if np.finfo(dtype).bits == 64:
jtu.check_grads(np.linalg.cholesky, args_maker(), order=2)
def testCholeskyGradPrecision(self):
rng = jtu.rand_default()
a = rng((3, 3), onp.float32)
a = onp.dot(a, a.T)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, np.linalg.cholesky), (a,), (a,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype, "rng_factory": rng_factory}
for n in [0, 4, 5, 25] # TODO(mattjj): complex64 unstable on large sizes?
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testDet(self, n, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng((n, n), dtype)]
self._CheckAgainstNumpy(onp.linalg.det, np.linalg.det, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.det, args_maker, check_dtypes=True,
rtol={onp.float64: 1e-13})
def testDetOfSingularMatrix(self):
x = np.array([[-1., 3./2], [2./3, -1.]], dtype=onp.float32)
self.assertAllClose(onp.float32(0), jsp.linalg.det(x), check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(0, 0), (1, 1), (3, 3), (4, 4), (10, 10), (200, 200),
(2, 2, 2), (2, 3, 3), (3, 2, 2)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu")
def testSlogdet(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp.linalg.slogdet, np.linalg.slogdet, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.slogdet, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (5, 5), (2, 7, 7)]
for dtype in float_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu")
def testSlogdetGrad(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
a = rng(shape, dtype)
jtu.check_grads(np.linalg.slogdet, (a,), 2, atol=1e-1, rtol=1e-1)
def testIssue1213(self):
for n in range(5):
mat = np.array([onp.diag(onp.ones([5], dtype=onp.float32))*(-.01)] * 2)
args_maker = lambda: [mat]
self._CheckAgainstNumpy(onp.linalg.slogdet, np.linalg.slogdet, args_maker,
check_dtypes=True, tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(0, 0), (4, 4), (5, 5), (50, 50), (2, 6, 6)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
# TODO(phawkins): enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEig(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = onp.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * np.finfo(dtype).eps)
a, = args_maker()
w, v = np.linalg.eig(a)
self.assertTrue(onp.all(norm(onp.matmul(a, v) - w[..., None, :] * v) < 100))
self._CompileAndCheck(partial(np.linalg.eig), args_maker,
check_dtypes=True, rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
# TODO: enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEigvals(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
w1, _ = np.linalg.eig(a)
w2 = np.linalg.eigvals(a)
self.assertAllClose(w1, w2, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (5, 5)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu", "tpu")
def testEigBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
shape = (10,) + shape
args = rng(shape, dtype)
ws, vs = vmap(np.linalg.eig)(args)
self.assertTrue(onp.all(onp.linalg.norm(
onp.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_n={}_lower={}".format(
jtu.format_shape_dtype_string((n,n), dtype), lower),
"n": n, "dtype": dtype, "lower": lower, "rng_factory": rng_factory}
for n in [0, 4, 5, 50]
for dtype in float_types + complex_types
for lower in [False, True]
for rng_factory in [jtu.rand_default]))
def testEigh(self, n, dtype, lower, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
tol = 30
if jtu.device_under_test() == "tpu":
if np.issubdtype(dtype, onp.complexfloating):
raise unittest.SkipTest("No complex eigh on TPU")
# TODO(phawkins): this tolerance is unpleasantly high.
tol = 1500
args_maker = lambda: [rng((n, n), dtype)]
uplo = "L" if lower else "U"
# Norm, adjusted for dimension and type.
def norm(x):
norm = onp.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * np.finfo(dtype).eps)
a, = args_maker()
a = (a + onp.conj(a.T)) / 2
w, v = np.linalg.eigh(onp.tril(a) if lower else onp.triu(a),
UPLO=uplo, symmetrize_input=False)
self.assertTrue(norm(onp.eye(n) - onp.matmul(onp.conj(T(v)), v)) < 5)
self.assertTrue(norm(onp.matmul(a, v) - w * v) < tol)
self._CompileAndCheck(partial(np.linalg.eigh, UPLO=uplo), args_maker,
check_dtypes=True, rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testEigvalsh(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if jtu.device_under_test() == "tpu":
if np.issubdtype(dtype, np.complexfloating):
raise unittest.SkipTest("No complex eigh on TPU")
n = shape[-1]
def args_maker():
a = rng((n, n), dtype)
a = (a + onp.conj(a.T)) / 2
return [a]
self._CheckAgainstNumpy(onp.linalg.eigvalsh, np.linalg.eigvalsh, args_maker,
check_dtypes=True, tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_lower={}".format(jtu.format_shape_dtype_string(shape, dtype),
lower),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "lower":lower}
for shape in [(1, 1), (4, 4), (5, 5), (50, 50), (2, 10, 10)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]
for lower in [True, False]))
def testEighGrad(self, shape, dtype, rng_factory, lower):
rng = rng_factory()
self.skipTest("Test fails with numeric errors.")
uplo = "L" if lower else "U"
a = rng(shape, dtype)
a = (a + onp.conj(T(a))) / 2
ones = onp.ones((a.shape[-1], a.shape[-1]), dtype=dtype)
a *= onp.tril(ones) if lower else onp.triu(ones)
# Gradient checks will fail without symmetrization as the eigh jvp rule
# is only correct for tangents in the symmetric subspace, whereas the
# checker checks against unconstrained (co)tangents.
if dtype not in complex_types:
f = partial(np.linalg.eigh, UPLO=uplo, symmetrize_input=True)
else: # only check eigenvalue grads for complex matrices
f = lambda a: partial(np.linalg.eigh, UPLO=uplo, symmetrize_input=True)(a)[0]
jtu.check_grads(f, (a,), 2, rtol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_lower={}".format(jtu.format_shape_dtype_string(shape, dtype),
lower),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "lower":lower, "eps":eps}
for shape in [(1, 1), (4, 4), (5, 5), (50, 50)]
for dtype in complex_types
for rng_factory in [jtu.rand_default]
for lower in [True, False]
for eps in [1e-4]))
# TODO(phawkins): enable when there is a complex eigendecomposition
# implementation for TPU.
@jtu.skip_on_devices("tpu")
def testEighGradVectorComplex(self, shape, dtype, rng_factory, lower, eps):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
# Special case to test for complex eigenvector grad correctness.
# Exact eigenvector coordinate gradients are hard to test numerically for complex
# eigensystem solvers given the extra degrees of per-eigenvector phase freedom.
# Instead, we numerically verify the eigensystem properties on the perturbed
# eigenvectors. You only ever want to optimize eigenvector directions, not coordinates!
uplo = "L" if lower else "U"
a = rng(shape, dtype)
a = (a + onp.conj(a.T)) / 2
a = onp.tril(a) if lower else onp.triu(a)
a_dot = eps * rng(shape, dtype)
a_dot = (a_dot + onp.conj(a_dot.T)) / 2
a_dot = onp.tril(a_dot) if lower else onp.triu(a_dot)
# evaluate eigenvector gradient and groundtruth eigensystem for perturbed input matrix
f = partial(np.linalg.eigh, UPLO=uplo)
(w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(a_dot,))
new_a = a + a_dot
new_w, new_v = f(new_a)
new_a = (new_a + onp.conj(new_a.T)) / 2
# Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues.
RTOL=1e-2
assert onp.max(
onp.abs((onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL
# Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues.
assert onp.max(
onp.linalg.norm(onp.abs(new_w*(v+dv) - onp.dot(new_a, (v+dv))), axis=0) /
onp.linalg.norm(onp.abs(new_w*(v+dv)), axis=0)
) < RTOL
def testEighGradPrecision(self):
rng = jtu.rand_default()
a = rng((3, 3), onp.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, np.linalg.eigh), (a,), (a,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (5, 5)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testEighBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (jtu.device_under_test() == "tpu" and
np.issubdtype(dtype, onp.complexfloating)):
raise unittest.SkipTest("No complex eigh on TPU")
shape = (10,) + shape
args = rng(shape, dtype)
args = (args + onp.conj(T(args))) / 2
ws, vs = vmap(jsp.linalg.eigh)(args)
self.assertTrue(onp.all(onp.linalg.norm(
onp.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_ord={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims),
"shape": shape, "dtype": dtype, "axis": axis, "keepdims": keepdims,
"ord": ord, "rng_factory": rng_factory}
for axis, shape in [
(None, (1,)), (None, (7,)), (None, (5, 8)),
(0, (9,)), (0, (4, 5)), ((1,), (10, 7, 3)), ((-2,), (4, 8)),
(-1, (6, 3)), ((0, 2), (3, 4, 5)), ((2, 0), (7, 8, 9)),
(None, (7, 8, 11))]
for keepdims in [False, True]
for ord in (
[None] if axis is None and len(shape) > 2
else [None, 0, 1, 2, 3, -1, -2, -3, np.inf, -np.inf]
if (axis is None and len(shape) == 1) or
isinstance(axis, int) or
(isinstance(axis, tuple) and len(axis) == 1)
else [None, 'fro', 1, 2, -1, -2, np.inf, -np.inf, 'nuc'])
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testNorm(self, shape, dtype, ord, axis, keepdims, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (ord in ('nuc', 2, -2) and (
jtu.device_under_test() != "cpu" or
(isinstance(axis, tuple) and len(axis) == 2))):
raise unittest.SkipTest("No adequate SVD implementation available")
args_maker = lambda: [rng(shape, dtype)]
onp_fn = partial(onp.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
np_fn = partial(np.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
self._CheckAgainstNumpy(onp_fn, np_fn, args_maker,
check_dtypes=False, tol=1e-3)
self._CompileAndCheck(np_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_n={}_full_matrices={}_compute_uv={}".format(
jtu.format_shape_dtype_string(b + (m, n), dtype), full_matrices,
compute_uv),
"b": b, "m": m, "n": n, "dtype": dtype, "full_matrices": full_matrices,
"compute_uv": compute_uv, "rng_factory": rng_factory}
for b in [(), (3,), (2, 3)]
for m in [2, 7, 29, 53]
for n in [2, 7, 29, 53]
for dtype in float_types + complex_types
for full_matrices in [False, True]
for compute_uv in [False, True]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu", "tpu") # TODO(b/145608614): SVD crashes on GPU.
def testSVD(self, b, m, n, dtype, full_matrices, compute_uv, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(b + (m, n), dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = onp.linalg.norm(x, axis=(-2, -1))
return norm / (max(m, n) * np.finfo(dtype).eps)
a, = args_maker()
out = np.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv)
if compute_uv:
# Check the reconstructed matrices
if full_matrices:
k = min(m, n)
if m < n:
self.assertTrue(onp.all(
norm(a - onp.matmul(out[1][..., None, :] * out[0], out[2][..., :k, :])) < 50))
else:
self.assertTrue(onp.all(
norm(a - onp.matmul(out[1][..., None, :] * out[0][..., :, :k], out[2])) < 350))
else:
self.assertTrue(onp.all(
norm(a - onp.matmul(out[1][..., None, :] * out[0], out[2])) < 300))
# Check the unitary properties of the singular vector matrices.
self.assertTrue(onp.all(norm(onp.eye(out[0].shape[-1]) - onp.matmul(onp.conj(T(out[0])), out[0])) < 10))
if m >= n:
self.assertTrue(onp.all(norm(onp.eye(out[2].shape[-1]) - onp.matmul(onp.conj(T(out[2])), out[2])) < 10))
else:
self.assertTrue(onp.all(norm(onp.eye(out[2].shape[-2]) - onp.matmul(out[2], onp.conj(T(out[2])))) < 20))
else:
self.assertTrue(onp.allclose(onp.linalg.svd(a, compute_uv=False), onp.asarray(out), atol=1e-4, rtol=1e-4))
self._CompileAndCheck(partial(np.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv),
args_maker, check_dtypes=True)
if not full_matrices:
svd = partial(np.linalg.svd, full_matrices=False)
jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=1e-2, atol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_fullmatrices={}".format(
jtu.format_shape_dtype_string(shape, dtype), full_matrices),
"shape": shape, "dtype": dtype, "full_matrices": full_matrices,
"rng_factory": rng_factory}
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for dtype in float_types + complex_types
for full_matrices in [False, True]
for rng_factory in [jtu.rand_default]))
def testQr(self, shape, dtype, full_matrices, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (np.issubdtype(dtype, onp.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex QR implementation")
m, n = shape[-2:]
if full_matrices:
mode, k = "complete", m
else:
mode, k = "reduced", min(m, n)
a = rng(shape, dtype)
lq, lr = np.linalg.qr(a, mode=mode)
# onp.linalg.qr doesn't support batch dimensions. But it seems like an
# inevitable extension so we support it in our version.
nq = onp.zeros(shape[:-2] + (m, k), dtype)
nr = onp.zeros(shape[:-2] + (k, n), dtype)
for index in onp.ndindex(*shape[:-2]):
nq[index], nr[index] = onp.linalg.qr(a[index], mode=mode)
max_rank = max(m, n)
# Norm, adjusted for dimension and type.
def norm(x):
n = onp.linalg.norm(x, axis=(-2, -1))
return n / (max_rank * np.finfo(dtype).eps)
def compare_orthogonal(q1, q2):
# Q is unique up to sign, so normalize the sign first.
sum_of_ratios = onp.sum(onp.divide(q1, q2), axis=-2, keepdims=True)
phases = onp.divide(sum_of_ratios, onp.abs(sum_of_ratios))
q1 *= phases
self.assertTrue(onp.all(norm(q1 - q2) < 30))
# Check a ~= qr
self.assertTrue(onp.all(norm(a - onp.matmul(lq, lr)) < 30))
# Compare the first 'k' vectors of Q; the remainder form an arbitrary
# orthonormal basis for the null space.
compare_orthogonal(nq[..., :k], lq[..., :k])
# Check that q is close to unitary.
self.assertTrue(onp.all(
norm(onp.eye(k) -onp.matmul(onp.conj(T(lq)), lq)) < 5))
if not full_matrices and m >= n:
jtu.check_jvp(np.linalg.qr, partial(jvp, np.linalg.qr), (a,), atol=3e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype,
"rng_factory": rng_factory}
for shape in [(10, 4, 5), (5, 3, 3), (7, 6, 4)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testQrBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
args = rng(shape, np.float32)
qs, rs = vmap(jsp.linalg.qr)(args)
self.assertTrue(onp.all(onp.linalg.norm(args - onp.matmul(qs, rs)) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
((1, 2, 2), (3, 2)),
((2, 1, 3, 3), (2, 4, 3, 4)),
]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testSolve(self, lhs_shape, rhs_shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(onp.linalg.solve, np.linalg.solve, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.solve, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (5, 5, 5)]
for dtype in float_types
for rng_factory in [jtu.rand_default]))
def testInv(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if jtu.device_under_test() == "gpu" and shape == (200, 200):
raise unittest.SkipTest("Test is flaky on GPU")
def args_maker():
invertible = False
while not invertible:
a = rng(shape, dtype)
try:
onp.linalg.inv(a)
invertible = True
except onp.linalg.LinAlgError:
pass
return [a]
self._CheckAgainstNumpy(onp.linalg.inv, np.linalg.inv, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.inv, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (2, 70, 7), (2000, 7), (7, 10000), (70, 7, 2)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # SVD is not implemented on the TPU backend
def testPinv(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp.linalg.pinv, np.linalg.pinv, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.pinv, args_maker, check_dtypes=True)
# Regression test for incorrect type for eigenvalues of a complex matrix.
@jtu.skip_on_devices("tpu") # TODO(phawkins): No complex eigh implementation on TPU.
def testIssue669(self):
def test(x):
val, vec = np.linalg.eigh(x)
return np.real(np.sum(val))
grad_test_jc = jit(grad(jit(test)))
xc = onp.eye(3, dtype=onp.complex)
self.assertAllClose(xc, grad_test_jc(xc), check_dtypes=True)
def testIssue1151(self):
A = np.array(onp.random.randn(100, 3, 3), dtype=np.float32)
b = np.array(onp.random.randn(100, 3), dtype=np.float32)
x = np.linalg.solve(A, b)
self.assertAllClose(vmap(np.dot)(A, x), b, atol=1e-3, rtol=1e-2,
check_dtypes=True)
jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A, b)
jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A, b)
jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A[0], b[0])
jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A[0], b[0])
def testIssue1383(self):
seed = jax.random.PRNGKey(0)
tmp = jax.random.uniform(seed, (2,2))
a = np.dot(tmp, tmp.T)
def f(inp):
val, vec = np.linalg.eigh(inp)
return np.dot(np.dot(vec, inp), vec.T)
grad_func = jax.jacfwd(f)
hess_func = jax.jacfwd(grad_func)
cube_func = jax.jacfwd(hess_func)
self.assertFalse(onp.any(onp.isnan(cube_func(a))))
class ScipyLinalgTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 5), (10, 5), (50, 50)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testLu(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(shape, dtype)]
x, = args_maker()
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, onp.matmul(p, onp.matmul(l, u)), check_dtypes=True,
rtol={onp.float32: 1e-4, onp.float64:1e-12,
onp.complex64: 1e-4, onp.complex128:1e-12})
self._CompileAndCheck(jsp.linalg.lu, args_maker, check_dtypes=True)
def testLuOfSingularMatrix(self):
x = np.array([[-1., 3./2], [2./3, -1.]], dtype=onp.float32)
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, onp.matmul(p, onp.matmul(l, u)), check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 5), (10, 5), (10, 10), (6, 7, 7)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # TODO(phawkins): precision problems on TPU.
def testLuGrad(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
a = rng(shape, dtype)
lu = vmap(jsp.linalg.lu) if len(shape) > 2 else jsp.linalg.lu
jtu.check_grads(lu, (a,), 2, atol=5e-2, rtol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(4, 5), (6, 5)]
for dtype in [np.float32]
for rng_factory in [jtu.rand_default]))
def testLuBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args = [rng(shape, np.float32) for _ in range(10)]
expected = list(osp.linalg.lu(x) for x in args)
ps = onp.stack([out[0] for out in expected])
ls = onp.stack([out[1] for out in expected])
us = onp.stack([out[2] for out in expected])
actual_ps, actual_ls, actual_us = vmap(jsp.linalg.lu)(np.stack(args))
self.assertAllClose(ps, actual_ps, check_dtypes=True)
self.assertAllClose(ls, actual_ls, check_dtypes=True)
self.assertAllClose(us, actual_us, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype, "rng_factory": rng_factory}
for n in [1, 4, 5, 200]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testLuFactor(self, n, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng((n, n), dtype)]
x, = args_maker()
lu, piv = jsp.linalg.lu_factor(x)
l = onp.tril(lu, -1) + onp.eye(n, dtype=dtype)
u = onp.triu(lu)
for i in range(n):
x[[i, piv[i]],] = x[[piv[i], i],]
self.assertAllClose(x, onp.matmul(l, u), check_dtypes=True, rtol=1e-3,
atol=1e-3)
self._CompileAndCheck(jsp.linalg.lu_factor, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_trans={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
trans),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"trans": trans, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4, 2)),
]
for trans in [0, 1, 2]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testLuSolve(self, lhs_shape, rhs_shape, dtype, trans, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
osp_fun = lambda lu, piv, rhs: osp.linalg.lu_solve((lu, piv), rhs, trans=trans)
jsp_fun = lambda lu, piv, rhs: jsp.linalg.lu_solve((lu, piv), rhs, trans=trans)
def args_maker():
a = rng(lhs_shape, dtype)
lu, piv = osp.linalg.lu_factor(a)
return [lu, piv, rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_sym_pos={}_lower={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
sym_pos, lower),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"sym_pos": sym_pos, "lower": lower, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
]
for sym_pos, lower in [
(False, False),
(True, False),
(True, True),
]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testSolve(self, lhs_shape, rhs_shape, dtype, sym_pos, lower, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (sym_pos and np.issubdtype(dtype, onp.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest(
"Complex Cholesky decomposition not implemented on TPU")
osp_fun = lambda lhs, rhs: osp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower)
jsp_fun = lambda lhs, rhs: jsp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower)
def args_maker():
a = rng(lhs_shape, dtype)
if sym_pos:
a = onp.matmul(a, onp.conj(T(a)))
a = onp.tril(a) if lower else onp.triu(a)
return [a, rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_lower={}_transposea={}_unit_diagonal={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lower, transpose_a, unit_diagonal),
"lower": lower, "transpose_a": transpose_a,
"unit_diagonal": unit_diagonal, "lhs_shape": lhs_shape,
"rhs_shape": rhs_shape, "dtype": dtype, "rng_factory": rng_factory}
for lower in [False, True]
for transpose_a in [False, True]
for unit_diagonal in [False, True]
for lhs_shape, rhs_shape in [
((4, 4), (4,)),
((4, 4), (4, 3)),
((2, 8, 8), (2, 8, 10)),
]
for dtype in float_types
for rng_factory in [jtu.rand_default]))
def testSolveTriangular(self, lower, transpose_a, unit_diagonal, lhs_shape,
rhs_shape, dtype, rng_factory):
_skip_if_unsupported_type(dtype)
rng = rng_factory()
k = rng(lhs_shape, dtype)
l = onp.linalg.cholesky(onp.matmul(k, T(k))
+ lhs_shape[-1] * onp.eye(lhs_shape[-1]))
l = l.astype(k.dtype)
b = rng(rhs_shape, dtype)
if unit_diagonal:
a = onp.tril(l, -1) + onp.eye(lhs_shape[-1], dtype=dtype)
else:
a = l
a = a if lower else T(a)
inv = onp.linalg.inv(T(a) if transpose_a else a).astype(a.dtype)
if len(lhs_shape) == len(rhs_shape):
onp_ans = onp.matmul(inv, b)
else:
onp_ans = onp.einsum("...ij,...j->...i", inv, b)
# The standard scipy.linalg.solve_triangular doesn't support broadcasting.
# But it seems like an inevitable extension so we support it.
ans = jsp.linalg.solve_triangular(
l if lower else T(l), b, trans=1 if transpose_a else 0, lower=lower,
unit_diagonal=unit_diagonal)
self.assertAllClose(onp_ans, ans, check_dtypes=True,
rtol={onp.float32: 1e-4, onp.float64: 1e-11})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_A={}_B={}_lower={}_transposea={}_conja={}_unitdiag={}_leftside={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype),
lower, transpose_a, conjugate_a, unit_diagonal, left_side),
"lower": lower, "transpose_a": transpose_a, "conjugate_a": conjugate_a,
"unit_diagonal": unit_diagonal, "left_side": left_side,
"a_shape": a_shape, "b_shape": b_shape, "dtype": dtype,
"rng_factory": rng_factory}
for lower in [False, True]
for unit_diagonal in [False, True]
for dtype in float_types + complex_types
for transpose_a in [False, True]
for conjugate_a in (
[False] if np.issubdtype(dtype, np.floating) else [False, True])
for left_side, a_shape, b_shape in [
(False, (4, 4), (1, 4,)),
(False, (3, 3), (4, 3)),
(True, (4, 4), (4, 1)),
(True, (4, 4), (4, 3)),
(True, (2, 8, 8), (2, 8, 10)),
]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # TODO(phawkins): Test fails on TPU.
def testTriangularSolveGrad(
self, lower, transpose_a, conjugate_a, unit_diagonal, left_side, a_shape,
b_shape, dtype, rng_factory):
_skip_if_unsupported_type(dtype)
rng = rng_factory()
# Test lax_linalg.triangular_solve instead of scipy.linalg.solve_triangular
# because it exposes more options.
A = np.tril(rng(a_shape, dtype) + 5 * onp.eye(a_shape[-1], dtype=dtype))
A = A if lower else T(A)
B = rng(b_shape, dtype)
f = partial(lax_linalg.triangular_solve, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal, left_side=left_side)
jtu.check_grads(f, (A, B), 2, rtol=4e-2, eps=1e-3)
def testTriangularSolveGradPrecision(self):
rng = jtu.rand_default()
a = np.tril(rng((3, 3), onp.float32))
b = rng((1, 3), onp.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST,
partial(jvp, lax_linalg.triangular_solve),
(a, b),
(a, b))
if __name__ == "__main__":
absltest.main()
|
import unittest
import sys
import os
import time
import threading
import inspect
import pymsgbox
# Note: Yes, PyAutoGUI does have PyMsgBox itself as a dependency, but we won't be using that part of PyAutoGUI for this testing.
import pyautogui # PyAutoGUI simulates key presses on the message boxes.
pyautogui.PAUSE = 0.1
GUI_WAIT = 0.4 # if tests start failing, maybe try bumping this up a bit (though that'll slow the tests down)
"""
NOTE: You will often see this code in this test:
print('Line', inspect.currentframe().f_lineno);
This is because due to the GUI nature of these tests, if something messes up
and PyAutoGUI is unable to click on the message box, this program will get
held up. By printing out the line number, you will at least be able to see
which line displayed the message box that is held up.
This is a bit unorthodox, and I'm welcome to other suggestions about how to
deal with this possible scenario.
"""
class KeyPresses(threading.Thread):
def __init__(self, keyPresses):
super(KeyPresses, self).__init__()
self.keyPresses = keyPresses
def run(self):
time.sleep(GUI_WAIT)
pyautogui.typewrite(self.keyPresses, interval=0.05)
class AlertTests(unittest.TestCase):
def test_alert(self):
for func in (pymsgbox._alertTkinter, pymsgbox.alert):
if func is pymsgbox._alertTkinter:
print('Testing tkinter alert()')
elif func is pymsgbox.alert:
print('Testing native alert()')
# no text
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(), 'OK')
# text
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello'), 'OK')
# text and title
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title'), 'OK')
# text, title, and custom button
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', 'Button'), 'Button')
# using keyword arguments
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(text='Hello', title='Title', button='Button'), 'Button')
class ConfirmTests(unittest.TestCase):
def test_confirm(self):
for func in (pymsgbox._confirmTkinter, pymsgbox.confirm):
if func is pymsgbox._confirmTkinter:
print('Testing tkinter confirm()')
elif func is pymsgbox.confirm:
print('Testing native confirm()')
# press enter on OK
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(), 'OK')
# press right, enter on Cancel
t = KeyPresses(['right', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(), 'Cancel')
# press right, left, right, enter on Cancel
t = KeyPresses(['right', 'left', 'right', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(), 'Cancel')
# press tab, enter on Cancel
t = KeyPresses(['tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(), 'Cancel')
# press tab, tab, enter on OK
t = KeyPresses(['tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(), 'OK')
# with text
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello'), 'OK')
# with text, title
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title'), 'OK')
# with text, title, and one custom button
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A']), 'A')
# with text, title, and one custom blank button
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['']), '')
# with text, title, and two custom buttons
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B']), 'A')
t = KeyPresses(['right', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B']), 'B')
t = KeyPresses(['right', 'left', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B']), 'A')
t = KeyPresses(['tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B']), 'B')
t = KeyPresses(['tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B']), 'A')
# with text, title, and three custom buttons
t = KeyPresses(['tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B', 'C']), 'C')
# with text, title, and four custom buttons
t = KeyPresses(['tab', 'tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B', 'C', 'D']), 'D')
# with text, title, and five custom buttons
t = KeyPresses(['tab', 'tab', 'tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func('Hello', 'Title', ['A', 'B', 'C', 'D', 'E']), 'E')
# with text, title, and three custom buttons specified with keyword arguments
t = KeyPresses(['tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(text='Hello', title='Title', buttons=['A', 'B', 'C']), 'C')
# test that pressing Esc is the same as clicking Cancel (but only when there is a cancel button)
t = KeyPresses(['escape'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(text='Escape button press test'), 'Cancel')
# Make sure that Esc keypress does nothing if there is no Cancel button.
t = KeyPresses(['escape', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(func(text='Escape button press test', buttons=['OK', 'Not OK']), 'OK')
class PromptPasswordTests(unittest.TestCase):
def test_prompt(self):
self._prompt_and_password_tests(pymsgbox._promptTkinter, 'prompt()')
def test_password(self):
# NOTE: Currently there is no way to test the appearance of the * or custom mask characters.
self._prompt_and_password_tests(pymsgbox._passwordTkinter, 'password()')
def _prompt_and_password_tests(self, msgBoxFunc, msgBoxFuncName):
# entering nothing
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc()), (msgBoxFuncName, ''))
# entering text
t = KeyPresses(['a', 'b', 'c', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc()), (msgBoxFuncName, 'abc'))
# entering text, tabbing to the Ok key
t = KeyPresses(['a', 'b', 'c', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc()), (msgBoxFuncName, 'abc'))
# entering text but hitting cancel
t = KeyPresses(['a', 'b', 'c', 'tab', 'tab', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc()), (msgBoxFuncName, None))
# with text
t = KeyPresses(['a', 'b', 'c', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc('Hello')), (msgBoxFuncName, 'abc'))
# with text and title
t = KeyPresses(['a', 'b', 'c', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc('Hello', 'Title')), (msgBoxFuncName, 'abc'))
# with text, title and default value
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc('Hello', 'Title', 'default')), (msgBoxFuncName, 'default'))
# with text, title and default value specified by keyword arguments
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual((msgBoxFuncName, msgBoxFunc(text='Hello', title='Title', default='default')), (msgBoxFuncName, 'default'))
class TimeoutTests(unittest.TestCase):
def test_timeout(self):
# Note: If these test's fail, the unit tests will hang.
self.assertEqual(pymsgbox._alertTkinter('timeout test', timeout=300), pymsgbox.TIMEOUT_RETURN_VALUE)
self.assertEqual(pymsgbox._confirmTkinter('timeout test', timeout=300), pymsgbox.TIMEOUT_RETURN_VALUE)
self.assertEqual(pymsgbox.prompt('timeout test', timeout=300), pymsgbox.TIMEOUT_RETURN_VALUE)
self.assertEqual(pymsgbox.password('timeout test', timeout=300), pymsgbox.TIMEOUT_RETURN_VALUE)
""""
# NOTE: This is weird. This test fails (the additional typed in text gets added
# to the end of the default string, instead of replacing it), but when I run
# this same code using PyAutoGUI from the interactive shell (on Win7 Py3.3) it
# works. It also works when I type it in myself.
# Commenting this out for now.
class DefaultValueOverwriteTests(unittest.TestCase):
def test_prompt(self):
self._prompt_and_password_tests(pymsgbox.prompt, 'prompt()')
def test_password(self):
# NOTE: Currently there is no way to test the appearance of the * or custom mask characters.
self._prompt_and_password_tests(pymsgbox.password, 'password()')
def _prompt_and_password_tests(self, msgBoxFunc, msgBoxFuncName):
# with text, title and default value that is typed over
t = KeyPresses(['a', 'b', 'c', 'enter'])
t.start()
print('Line', inspect.currentframe().f_lineno); self.assertEqual((msgBoxFuncName, msgBoxFunc('Hello', 'Title', 'default')), (msgBoxFuncName, 'abc'))
"""
class WindowsNativeAlertTests(unittest.TestCase):
def test_alert(self):
if sys.platform != 'win32':
return
# TODO - We need some way of determining if the tkinter or native message box appeared.
# test passing True for _tkinter
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(pymsgbox.alert(_tkinter=True), pymsgbox.OK_TEXT)
# test passing timeout
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(pymsgbox.alert(timeout=300), pymsgbox.OK_TEXT)
# test non-ok button to check that it falls back to tkinter
t = KeyPresses(['enter'])
t.start()
print('Line', inspect.currentframe().f_lineno)
self.assertEqual(pymsgbox.alert(button='Test'), 'Test')
class WindowsNativeConfirmTests(unittest.TestCase):
def test_confirm(self):
if sys.platform != 'win32':
return
# TODO - We need some way of determining if the tkinter or native message box appeared.
# press enter on OK
#t = KeyPresses(['enter'])
#t.start()
#print('Line', inspect.currentframe().f_lineno)
#self.assertEqual(pymsgbox.confirm(), pymsgbox.OK_TEXT)
if __name__ == '__main__':
unittest.main()
|
# Identify location
import socket
location = socket.gethostname()
if location == 'Monolith':
dropbox = 'E:\\Users\\Chris\\Dropbox\\'
if location == 'Hobbitslayer':
dropbox = 'C:\\Users\\spx7cjc\\Dropbox\\'
if location == 'saruman':
dropbox = '/home/herdata/spx7cjc/Dropbox/'
# Import smorgasbord
import os
import sys
import multiprocessing as mp
import numpy as np
import astropy.io.votable
from astroquery import sha
import montage_wrapper.commands
import shutil
import signal
import gc
#warnings.filterwarnings("ignore")
from glob import glob
import subprocess
import wget
import pdb
import time
import ChrisFuncs
import ChrisFuncs.Coadd
# Add SWarp directory to path
os.environ['PATH'] = os.environ['PATH'] + ':/home/user/spx7cjc/swarp/bin'
# Define a timeout handler
def Handler(signum, frame):
raise Exception("Timout!")
# Define function to wget and extact Spitzer files
def Spitzer_wget(tile_url, tile_filename):
print('Acquiring '+tile_url)
if os.path.exists(tile_filename):
os.remove(tile_filename)
success = False
while success==False:
try:
wget.download(tile_url, out=tile_filename)
print('Successful acquisition of '+tile_url)
success = True
except:
print('Failure! Retrying acquistion of '+tile_url)
time.sleep(0.1)
success = False
os.system('unzip '+tile_filename)
# Defien function to replace null pixels in SWarp outputs with NaNs
def Spitzer_SWarp_NaN(target):
in_fitsdata = astropy.io.fits.open(target)
in_image = in_fitsdata[0].data
in_header = in_fitsdata[0].header
in_fitsdata.close()
out_image = in_image.copy()
out_image[ np.where( out_image<-1E20 ) ] = np.NaN
out_hdu = astropy.io.fits.PrimaryHDU(data=out_image, header=in_header)
out_hdulist = astropy.io.fits.HDUList([out_hdu])
out_hdulist.writeto(target, clobber=True)
# Commence main task
if __name__ == "__main__":
# Decide what intrument to work for
instrument = 'IRAC'
# Define paths
in_dir = '/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Temporary_Files/'
out_dir = '/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Mosaics_'+instrument+'/'
# Read in source catalogue
ness_cat = np.genfromtxt(dropbox+'Work/Tables/NESS/NESS_Sample.csv', delimiter=',', names=True, dtype=None)
name_list = ness_cat['name']
# Read in list of already-Montaged sources
already_processed_path = '/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Spitzer_'+instrument+'_Already_Processed_List.dat'
if not os.path.exists(already_processed_path):
open(already_processed_path,'a')
already_processed = np.genfromtxt(already_processed_path, dtype=('S50')).tolist()
# Identify targets not yet processed
remaining_list = []
for i in range(0, name_list.shape[0]):
already_done = 0
name = name_list[i]
if name not in already_processed:
remaining_list.append(i)
name_list = ness_cat['name']
ra_list = ness_cat['ra']
dec_list = ness_cat['dec']
# State band information
if instrument=='IRAC':
bands_dict = {'3.6um':{'instrument':'IRAC','band_long':'3.6','channel':'ch1','pix_size':0.6},
'4.5um':{'instrument':'IRAC','band_long':'4.5','channel':'ch2','pix_size':0.6},
'5.8um':{'instrument':'IRAC','band_long':'5.8','channel':'ch3','pix_size':0.6},
'8.0um':{'instrument':'IRAC','band_long':'8.0','channel':'ch4','pix_size':0.6}}
elif instrument=='MIPS':
bands_dict = {'24um':{'instrument':'MIPS','band_long':'24','channel':'ch1','pix_size':2.45},
'70um':{'instrument':'MIPS','band_long':'70','channel':'ch2','pix_size':4.0},
'160um':{'instrument':'MIPS','band_long':'160','channel':'ch3','pix_size':8.0}}
# Register signal function handler, for dealing with timeouts
signal.signal(signal.SIGALRM, Handler)
# Record time taken
time_list = [ time.time() ]
# Loop over each source
for i in np.random.permutation(range(0, ness_cat.shape[0])):
name = name_list[i].replace(' ','_')
ra = ra_list[i]
dec = dec_list[i]
time_start = time.time()
width = 0.25#
print('Processing source '+name)
# Check if source is in list of already-montaged sources
if name in already_processed:
print(name+' already processed')
continue
# Check which, if any, bands already have data
print('Checking existing finalised cutouts for matches to current source')
bands_dict_req = {}
for band in bands_dict.keys():
if name+'_Spitzer_'+bands_dict[band]['band_long']+'.fits.gz' not in os.listdir('/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Cutouts/'):
bands_dict_req[band] = bands_dict[band]
# Create tile processing dirctories (deleting any prior), and set appropriate Python (ie, Montage) working directory
gal_dir = in_dir+str(name)+'/'
if os.path.exists(gal_dir):
shutil.rmtree(gal_dir)
os.makedirs(gal_dir)
os.makedirs(gal_dir+'Errors')
os.makedirs(gal_dir+'Raw')
os.chdir(gal_dir+'Raw')
# Perform query, with error handling
print('Querying Spitzer server')
query_success = False
query_fail_count = 0
while query_success==False:
if query_fail_count>=10:
break
try:
print('NOTE: Astroquery currently not working with Spitzer; gettings query results using Spitzer API instead')
"""
query_obj = Spitzer.query(ra=ra, dec=dec, size=width)
"""
query_url = 'http://sha.ipac.caltech.edu/applications/Spitzer/SHA/servlet/DataService?RA='+str(ra)+'&DEC='+str(dec)+'&SIZE='+str(width)+'&VERB=3&DATASET=ivo%3A%2F%2Firsa.csv%2Fspitzer.level2'
query_filename = gal_dir+'Spitzer_Query.csv'
if os.path.exists(query_filename):
os.remove(query_filename)
wget.download(query_url, out=query_filename.replace('.csv','.txt'))
os.system('stilts tcopy ifmt=ipac ofmt=csv '+query_filename.replace('.csv','.txt')+' '+query_filename)
os.remove(query_filename.replace('.csv','.txt'))
query_success = True
except:
print('Spitzer query failed; reattempting')
query_fail_count += 1
if not os.path.exists(query_filename):
query_success=False
if query_success==False:
print('No Spitzer data for '+name)
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
time_list.append( time.time() )
shutil.rmtree(gal_dir)
gc.collect()
continue
# Save query result (removing pre-existing query file, if present)
"""
query_filename = gal_dir+'Spitzer_Query.csv'
query_obj.write(query_filename, format='csv')
"""
# Establish if any data was found; if not, skip
query_in = np.genfromtxt(query_filename, delimiter=',', names=True, dtype=None)
if query_in.size==0:
print('No Spitzer data for '+name)
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
time_list.append( time.time() )
shutil.rmtree(gal_dir)
gc.collect()
continue
# Record which urls correspond to data in the desired bands (dealing with awkwardness for if there is only 1 entry, or silly massive files)
Spitzer_urls = []
Spitzer_bands = []
if query_in.size==1:
if query_in['accessWithAnc1Url']!='NONE' and query_in['filesize']<1E9:
for band in bands_dict_req.keys():
if query_in['wavelength']==bands_dict_req[band]['instrument']+' '+band:
Spitzer_urls.append(query_in['accessWithAnc1Url'])
Spitzer_bands.append(band)
else:
for j in range(0, query_in.size):
if query_in[j]['accessWithAnc1Url']!='NONE' and query_in[j]['filesize']<1E9:
for band in bands_dict_req.keys():
if query_in[j]['wavelength']==bands_dict_req[band]['instrument']+' '+band:
Spitzer_urls.append(query_in[j]['accessWithAnc1Url'])
Spitzer_bands.append(band)
# In parallel, download and extract files
os.chdir(gal_dir+'Raw')
dl_pool = mp.Pool(processes=20)
for j in range(0, len(Spitzer_urls)):
tile_url = Spitzer_urls[j]
tile_filename = gal_dir+'Raw/'+name+'_'+Spitzer_bands[j]+'_'+str(j)+'.zip'
dl_pool.apply_async( Spitzer_wget, args=(tile_url, tile_filename,) )#Spitzer_wget(tile_url, tile_filename)
dl_pool.close()
dl_pool.join()
[ os.remove(dl_zip) for dl_zip in os.listdir(gal_dir+'Raw/') if '.zip' in dl_zip ]
# Copy files to relevant folders
for band in bands_dict_req.keys():
if os.path.exists(gal_dir+band+'/'):
shutil.rmtree(gal_dir+band+'/')
if os.path.exists(gal_dir+'Errors/'+band+'/'):
shutil.rmtree(gal_dir+'Errors/'+band+'/')
os.makedirs(gal_dir+band+'/')
os.makedirs(gal_dir+'Errors/'+band+'/')
channel = bands_dict_req[band]['channel']
for dl_folder in os.listdir(gal_dir+'Raw/'):
if os.path.exists(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd'):
for dl_file in os.listdir(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd'):
if '_maic.fits' in dl_file:
shutil.copy2(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd/'+dl_file, gal_dir+band)
if '_munc.fits' in dl_file:
shutil.copy2(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd/'+dl_file, gal_dir+band)
shutil.rmtree(gal_dir+'Raw')
# Check that the retrieved files provide actual coverage of the point in question
coverage_bands = []
for band in bands_dict_req.keys():
montage_wrapper.commands.mImgtbl(gal_dir+band, gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', corners=True)
if os.stat(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat').st_size==0:
continue
montage_wrapper.commands_extra.mCoverageCheck(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Overlap_Check.dat', mode='point', ra=ra, dec=dec)
if sum(1 for line in open(gal_dir+band+'/'+band+'_Overlap_Check.dat'))>3:
coverage_bands.append(band)
if len(coverage_bands)==0:
print('No Spitzer data for '+name)
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
time_list.append( time.time() )
shutil.rmtree(gal_dir)
gc.collect()
continue
# Loop over each band for coaddition
for band in coverage_bands:
print('Commencing Montaging and SWarping of '+name+'_Spitzer_'+band)
os.chdir(gal_dir+band)
os.mkdir(gal_dir+band+'/Diffs_Temp')
os.mkdir(gal_dir+band+'/Backsub_Temp')
os.mkdir(gal_dir+band+'/SWarp_Temp')
# Create Montage FITS header
location_string = str(ra)+' '+str(dec)
pix_size = bands_dict_req[band]['pix_size']
montage_wrapper.commands.mHdr(location_string, width, gal_dir+band+'/'+str(name)+'_HDR', pix_size=pix_size)
# Use Montage wrapper to reproject all fits files to common projection, skipping if none acually overlap
print('Performing reporjections for '+name+'_Spitzer_'+band+' maps')
location_string = str(ra)+' '+str(dec)
target_files = []
proj_fail = 0
[ target_files.append(target_file) for target_file in os.listdir(gal_dir+band) if '.fits' in target_file ]
for target_file in target_files:
try:
montage_wrapper.wrappers.reproject(os.path.join(gal_dir+band,target_file), os.path.join(gal_dir+band,target_file), header=gal_dir+band+'/'+str(name)+'_HDR', exact_size=True)
except:
os.remove(os.path.join(gal_dir+band,target_file))
proj_fail += 1
if proj_fail==len(target_files):
print('No Spitzer coverage for '+name+' at '+band)
continue
# Loop over error maps and copy
for listfile in os.listdir(gal_dir+band):
if '_munc.fits' in listfile:
shutil.copy2(gal_dir+band+'/'+listfile, gal_dir+'Errors/'+band)
# Convert error maps to weight maps
unc_fitsdata = astropy.io.fits.open(gal_dir+band+'/'+listfile)
unc_image = unc_fitsdata[0].data
unc_header = unc_fitsdata[0].header
unc_fitsdata.close()
unc_image = unc_image**-1.0
unc_hdu = astropy.io.fits.PrimaryHDU(data=unc_image, header=unc_header)
unc_hdulist = astropy.io.fits.HDUList([unc_hdu])
unc_hdulist.writeto(gal_dir+band+'/SWarp_Temp/'+listfile.replace('_munc.fits','_maic.wgt.fits'), clobber=True)
# Delete old uncertainty map
os.remove(gal_dir+band+'/'+listfile)
# If only one image file, proceed straight to co-adding; otherwise, commence background-matching
mosaic_count = 0
for listfile in os.listdir(gal_dir+band):
if '.fits' in listfile:
mosaic_count += 1
if mosaic_count==1:
for listfile in os.listdir(gal_dir+band):
if '.fits' in listfile:
shutil.move(listfile, gal_dir+band+'/SWarp_Temp')
if mosaic_count>1:
# Use Montage wrapper to determine appropriate corrections for background matching
print('Determining background corrections for '+name+'_Spitzer_'+band+' maps')
montage_wrapper.commands.mImgtbl(gal_dir+band, gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', corners=True)
montage_wrapper.commands.mOverlaps(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Image_Diffs_Table.dat')
montage_wrapper.commands.mDiffExec(gal_dir+band+'/'+band+'_Image_Diffs_Table.dat', gal_dir+band+'/'+str(name)+'_HDR', gal_dir+band+'/Diffs_Temp', no_area=True)
montage_wrapper.commands.mFitExec(gal_dir+band+'/'+band+'_Image_Diffs_Table.dat', gal_dir+band+'/'+band+'_Image_Fitting_Table.dat', gal_dir+band+'/Diffs_Temp')
montage_wrapper.commands.mBgModel(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Image_Fitting_Table.dat', gal_dir+band+'/'+band+'_Image_Corrections_Table.dat', level_only=True, n_iter=16384)
# Apply background corrections using Montage subprocess, with timeout handling
print('Applying background corrections to '+name+'_Spitzer_'+band+' maps')
mBgExec_fail_count = 0
mBgExec_success = False
mBgExec_uberfail = False
while mBgExec_success==False:
# Attempt background-matching
mBgExec_sp = subprocess.Popen( ['/home/soft/montage/bin/mBgExec', '-n', gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Image_Corrections_Table.dat', gal_dir+band+'/SWarp_Temp' ], preexec_fn=os.setsid, stdout=subprocess.PIPE )
mBgExec_fail = False
seconds = 0
minutes_max = 45
while mBgExec_fail==False:
time.sleep(1)
mBgExec_stdout = mBgExec_sp.stdout.readline()
if mBgExec_sp.poll()==None:
seconds += 1
if 'Table has no data records' in mBgExec_stdout:
mBgExec_fail = True
mBgExec_fail_count += 1
break
if seconds>=(60*minutes_max):
mBgExec_fail = True
mBgExec_fail_count += 1
break
if mBgExec_sp.poll()!=None:
mBgExec_success = True
break
# Handle timeouts and other failures
if mBgExec_fail_count>0:
print('Background matching with Montage has failed '+str(mBgExec_fail_count)+' time(s); reattempting')
if mBgExec_fail==True and mBgExec_success==False and mBgExec_fail_count>=3:
mBgExec_uberfail = True
print('Background matching with Montage has failed 3 times; proceeding directly to co-additon')
try:
os.killpg( os.getpgid(mBgExec_sp.pid), 15 )
except:
'Background matching subprocess appears to have imploded; no task to kill'
for listfile in os.listdir(gal_dir+band):
if '_maic.fits' in listfile:
shutil.move(listfile, gal_dir+band+'/SWarp_Temp')
break
# Sort out daft filename differences between image maps and error maps
for gal_file in os.listdir(gal_dir+band+'/SWarp_Temp'):
os.rename(gal_dir+band+'/SWarp_Temp/'+gal_file, gal_dir+band+'/SWarp_Temp/'+gal_file.replace('_'+gal_file.split('_')[-2:][0], '') )
# Perform least-squares plane fitting to match MIPS image levels
if instrument=='MIPS':
ChrisFuncs.Coadd.LevelFITS(gal_dir+band+'/SWarp_Temp', 'maic.fits', convfile_dir=False)
# Use SWarp to co-add images weighted by their error maps
print('Co-adding '+name+'_Spitzer_'+band+' maps')
os.chdir(gal_dir+band+'/SWarp_Temp')
os.system('swarp *_maic.fits -IMAGEOUT_NAME '+name+'_Spitzer_'+band+'_SWarp.fits -WEIGHT_SUFFIX .wgt.fits -COMBINE_TYPE WEIGHTED -COMBINE_BUFSIZE 2048 -GAIN_KEYWORD DIESPIZERDIE -RESCALE_WEIGHTS N -SUBTRACT_BACK N -RESAMPLE N -VMEM_MAX 4095 -MEM_MAX 4096 -WEIGHT_TYPE MAP_WEIGHT -NTHREADS 4 -VERBOSE_TYPE QUIET')
Spitzer_SWarp_NaN(name+'_Spitzer_'+band+'_SWarp.fits')
# Re-project finalised image map using Montage
montage_wrapper.wrappers.reproject(gal_dir+band+'/SWarp_Temp/'+name+'_Spitzer_'+band+'_SWarp.fits', out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits', header=gal_dir+band+'/'+str(name)+'_HDR', exact_size=True)
# Compress finalised image map
os.chdir(out_dir)
if os.path.exists(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits.gz'):
os.remove(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits.gz')
os.system('gzip '+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits')
print('Completed Montaging and SWarping '+name+'_Spitzer_'+band+' image map')
# Turn error maps into exposure time maps
for listfile in os.listdir(gal_dir+'Errors/'+band):
if '_munc.fits' in listfile:
unc_fitsdata = astropy.io.fits.open(gal_dir+'Errors/'+band+'/'+listfile)
unc_image = unc_fitsdata[0].data
unc_header = unc_fitsdata[0].header
unc_fitsdata.close()
unc_image = unc_image**-2.0
unc_hdu = astropy.io.fits.PrimaryHDU(data=unc_image, header=unc_header)
unc_hdulist = astropy.io.fits.HDUList([unc_hdu])
unc_hdulist.writeto(gal_dir+'Errors/'+band+'/'+listfile.replace('_munc.fits','_mexp.fits'), clobber=True)
# Use Montage to add exposure time images
print('Co-adding '+name+'_Spitzer_'+band+' error maps')
target_files = []
[ target_files.append(dir_file) for dir_file in os.listdir(gal_dir+'Errors/'+band) if 'mexp.fits' in dir_file ]
for i in range(0, len(target_files)):
exp_fitsdata = astropy.io.fits.open(gal_dir+'Errors/'+band+'/'+target_files[i])
exp_image = exp_fitsdata[0].data
exp_header = exp_fitsdata[0].header
exp_fitsdata.close(gal_dir+'Errors/'+band+'/'+target_files[i])
if i==0:
add_image = np.zeros([ exp_image.shape[0], exp_image.shape[1] ])
add_header = exp_header.copy()
exp_good = np.where( np.isnan(exp_image)==False )
add_image[exp_good] += exp_image[exp_good]
add_hdu = astropy.io.fits.PrimaryHDU(data=add_image, header=add_header)
add_hdulist = astropy.io.fits.HDUList([add_hdu])
add_hdulist.writeto(gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp_Add.fits', clobber=True)
# Re-project final exposure map using Montage
montage_wrapper.wrappers.reproject(gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp_Add.fits', gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp.fits', header=gal_dir+band+'/'+str(name)+'_HDR', exact_size=True)
# Convert final exposure time map into error map
unc_fitsdata = astropy.io.fits.open(gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp.fits')
unc_image = unc_fitsdata[0].data
unc_header = unc_fitsdata[0].header
unc_fitsdata.close()
unc_image[ np.where(unc_image<0) ] = np.NaN
unc_image = unc_image**-0.5
unc_image[ np.where(unc_image==np.inf) ] = np.NaN
unc_hdu = astropy.io.fits.PrimaryHDU(data=unc_image, header=unc_header)
unc_hdulist = astropy.io.fits.HDUList([unc_hdu])
unc_hdulist.writeto(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits', clobber=True)
# Compress finalised exposure time map
os.chdir(out_dir)
if os.path.exists(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits.gz'):
os.remove(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits.gz')
os.system('gzip '+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits')
print('Completed Montaging '+name+'_Spitzer_'+band+' error map')
# Record that processing of souce has been compelted
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
# Clean memory, and return timings
shutil.rmtree(gal_dir)
time_list.append( time.time() )
time_est = ChrisFuncs.TimeEst(time_list, len(name_list))
time_file = open( os.path.join('/'.join(in_dir.split('/')[:-2]),'Estimated_Completion_Time.txt'), 'w')
time_file.write(time_est)
time_file.close()
print('Estimated completion time: '+time_est)
# Jubilate
print('All done!')
|
from ._bounding_box import *
from ._user_IDs import *
from ._user_points import *
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# import models/tasks to register them
from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gym env tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import gym
from gym.spaces import Box
from gym.spaces import Discrete
import numpy as np
from tensor2tensor.data_generators import gym_env
import tensorflow as tf
class TestEnv(gym.Env):
"""Test environment.
Odd frames are "done".
"""
action_space = Discrete(1)
observation_space = Box(
low=0, high=255, shape=(2, 6, 3), dtype=np.uint8
)
def __init__(self):
self._counter = 0
def _generate_ob(self):
return np.zeros(
self.observation_space.shape, self.observation_space.dtype
)
def step(self, action):
done = self._counter % 2 == 1
self._counter += 1
reward = 5 if done else -5
return (self._generate_ob(), reward, done, {})
def reset(self):
return self._generate_ob()
class GymEnvTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.out_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.out_dir)
os.mkdir(cls.out_dir)
def init_batch_and_play(self, env_lambda, n_steps=1, **kwargs):
raw_envs = [env_lambda(), env_lambda()]
env = gym_env.T2TGymEnv(raw_envs, **kwargs)
obs = list()
rewards = list()
obs.append(env.reset())
for _ in range(n_steps):
step_obs, step_rewards, dones = env.step(actions=[0, 0])
obs.append(step_obs)
rewards.append(step_rewards)
for (i, done) in enumerate(dones):
if done:
env.reset([i])
return env, obs, rewards
def test_generates(self):
# This test needs base env which outputs done after two steps.
env_lambda = TestEnv
env, _, _ = self.init_batch_and_play(env_lambda, n_steps=20)
env.generate_data(self.out_dir, tmp_dir=None)
filenames = os.listdir(self.out_dir)
self.assertTrue(filenames)
path = os.path.join(self.out_dir, filenames[0])
records = list(tf.python_io.tf_record_iterator(path))
self.assertTrue(records)
def test_clipping(self):
# This test needs base env with rewards out of [-1,1] range.
env_lambda = TestEnv
# TODO(lukaszkaiser): turn clipping on by default after refactor.
# _, _, rewards = self.init_batch_and_play(env_lambda, n_steps=2)
# self.assertTrue(np.max(rewards) == 1)
# self.assertTrue(np.min(rewards) == -1)
_, _, unclipped_rewards = self.init_batch_and_play(env_lambda, n_steps=2)
self.assertTrue(np.max(unclipped_rewards) > 1)
self.assertTrue(np.min(unclipped_rewards) < -1)
def test_resize(self):
env_lambda = TestEnv
orig_env = env_lambda()
resize_height_factor = 2
resize_width_factor = 3
orig_height, orig_width = orig_env.observation_space.shape[:2]
env, obs, _ = self.init_batch_and_play(
env_lambda, n_steps=1,
resize_height_factor=resize_height_factor,
resize_width_factor=resize_width_factor)
for obs_batch in obs:
ob = obs_batch[0]
self.assertEqual(ob.shape, env.observation_space.shape)
height, width = ob.shape[:2]
self.assertEqual(height, orig_height // resize_height_factor)
self.assertEqual(width, orig_width // resize_width_factor)
def assert_channels(self, env, obs, n_channels):
self.assertEqual(env.observation_space.shape[2], n_channels)
self.assertEqual(env.num_channels, n_channels)
for obs_batch in obs:
ob = obs_batch[0]
self.assertEqual(ob.shape[2], n_channels)
def test_channels(self):
env_lambda = TestEnv
env, obs, _ = self.init_batch_and_play(env_lambda, grayscale=True)
self.assert_channels(env, obs, n_channels=1)
env, obs, _ = self.init_batch_and_play(env_lambda, grayscale=False)
self.assert_channels(env, obs, n_channels=3)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/python3
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# See LICENSE for license information.
import argparse
import collections
import datetime
import re
import sys
cloud_init_pattern = re.compile(r'(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d) ')
iso8601_pattern = re.compile(r'(\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d\.\d+) ')
timestamp_pattern = re.compile(r'((\d+)(\.\d+)?) ')
def make_argument_parser():
"""
Build command line argument parser.
:return: Parser for command line
:rtype argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Merge multiple log files, from different sources, preserving order")
parser.add_argument("-p", "--prefix", help="List of prefixes to be applied to log entries", nargs="+")
parser.add_argument("--no-prefix", help="Suppress automatic generation of prefixes", action="store_true")
parser.add_argument("-r", "--regex", help="Regex to match and capture the entire timestamp")
parser.add_argument("-f", "--format", help="strptime format to convert the captured timestamp")
# parser.add_argument("--colors", help="List of colors for each log", required=False, nargs="+")
parser.add_argument("-c", "--colorize", help="Color-code log output", required=False, action="store_true")
parser.add_argument('logfiles', nargs='+')
return parser
def parse_datetime(line):
"""
Parse the date and time from the beginning of a log line. If no timestamp can be recognized, return None.
:param line: The log line to be parsed
:return: Either a datetime or None
"""
if custom_pattern:
match = custom_pattern.match(line)
if match:
entry_datetime = datetime.datetime.strptime(match.group(1), custom_format)
return entry_datetime
match = iso8601_pattern.match(line)
if match:
entry_datetime = datetime.datetime.strptime(match.group(1), '%Y/%m/%d %H:%M:%S.%f')
return entry_datetime
match = cloud_init_pattern.match(line)
if match:
entry_datetime = datetime.datetime.strptime(match.group(1), '%Y-%m-%d %H:%M:%S,%f')
return entry_datetime
match = timestamp_pattern.match(line)
if match:
entry_datetime = datetime.datetime.utcfromtimestamp(float(match.group(1)))
return entry_datetime
return None
class Logfile:
def _advance(self):
"""
Read and accumulate saved line plus continuation lines (if any). When a line beginning with a timestamp is
found, save that (new initial) line and the timestamp, then return the flattened accumulated array of strings.
Invariant: All lines of the current entry have been read. The instance knows the timestamp of the *next*
log entry, and has already read the first line of that entry, *or* EOF has been reached and the appropriate
internal marker has been set. The instance is prepared for either timestamp() or entry() to be called.
:rtype str[]
"""
results = [self._line]
while True:
line = self._f.readline()
if line == '':
self._eof = True
return results
timestamp = parse_datetime(line)
if timestamp is not None:
self._line = line
self._timestamp = timestamp
return results
results.append(line)
def __init__(self, path):
self._f = open(path, "r")
self._eof = False
self._timestamp = datetime.datetime.max
self._line = ''
self._advance() # Ignoring any untimestamped lines at the beginning of the log
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def timestamp(self):
if self._eof:
raise EOFError
return self._timestamp
def entry(self):
if self._eof:
raise EOFError
return self._advance()
def close(self):
self._f.close()
self._f = None
self._line = ''
self._eof = True
class LogSet:
def __init__(self, pathnames):
self._logs = {}
for pathname in pathnames:
self._logs[pathname] = Logfile(pathname)
def next_entry(self):
"""
Find the earliest entry in the set of logs, advancing that one logfile to the next entry.
:return: Pathname of the logfile, the entry as an array of one or more lines.
:rtype str, str[]
"""
if len(self._logs) == 0:
raise EOFError
low_path = ''
low_timestamp = datetime.datetime.max
to_delete = []
for log_name, log in self._logs.items():
try:
timestamp = log.timestamp()
except EOFError:
to_delete.append(log_name)
continue
if timestamp <= low_timestamp:
low_path = log_name
low_timestamp = timestamp
for log_name in to_delete:
self._logs[log_name].close()
del self._logs[log_name]
if len(self._logs) == 0:
# Last log hit EOF and was deleted
raise EOFError
return low_path, self._logs[low_path].entry()
def render(line, prefix_arg=None, color=-1):
"""
Turn a line of text into a ready-to-display string.
If prefix_arg is set, prepend it to the line.
If color is set, change to that color at the beginning of the rendered line and change out before the newline (if
there is a newline).
:param str line: Output line to be rendered
:param str prefix_arg: Optional prefix to be stuck at the beginning of the rendered line
:param int color: If 0-255, insert escape codes to display the line in that color
"""
pretext = '' if prefix_arg is None else prefix_arg
if -1 < color < 256:
pretext = "\x1b[38;5;{}m{}".format(str(color), pretext)
if line[-1] == "\n":
line = "{}\x1b[0m\n".format(line[:-1])
else:
line = "{}\x1b[0m".format(line)
return "{}{}".format(pretext, line)
def main():
args = make_argument_parser().parse_args()
if args.logfiles is None or len(args.logfiles) < 2:
print("Requires at least two logfiles")
exit(1)
elif bool(args.format) != bool(args.regex):
print("Requires both timestamp regex and format or none")
exit(1)
global custom_pattern, custom_format
if args.regex:
custom_pattern = re.compile(args.regex.encode().decode('unicode_escape'))
custom_format = args.format
else:
custom_pattern, custom_format = None, None
prefixes = collections.defaultdict(lambda: '')
colorize = args.colorize if sys.stdout.isatty() else False
colors = collections.defaultdict(lambda: 15 if colorize else -1)
index = 1
limit = len(args.prefix) if args.prefix else 0
no_prefix = args.no_prefix or (colorize and limit == 0)
for path in args.logfiles:
if not no_prefix:
prefixes[path] = "{} ".format(args.prefix[index-1]) if index <= limit else "log{} ".format(index)
if colorize:
colors[path] = index
index += 1
merger = LogSet(args.logfiles)
while True:
try:
path, entry = merger.next_entry()
except EOFError:
exit(0)
for line in entry:
print(render(line, prefixes[path], colors[path]), end='')
main()
# :vi ai sw=4 expandtab ts=4 :
|
import os
import io
import sys
from setuptools import setup, find_packages
from pkg_resources import parse_version, get_distribution, DistributionNotFound
import subprocess
import distutils.command.clean
import distutils.spawn
import glob
import shutil
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from torch.utils.hipify import hipify_python
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def get_dist(pkgname):
try:
return get_distribution(pkgname)
except DistributionNotFound:
return None
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, 'version.txt')
with open(version_txt, 'r') as f:
version = f.readline().strip()
sha = 'Unknown'
package_name = 'torchvision'
try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha != 'Unknown':
version += '+' + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, 'torchvision', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
f.write("from torchvision.extension import _check_cuda_version\n")
f.write("if _check_cuda_version() > 0:\n")
f.write(" cuda = _check_cuda_version()\n")
pytorch_dep = 'torch'
if os.getenv('PYTORCH_VERSION'):
pytorch_dep += "==" + os.getenv('PYTORCH_VERSION')
requirements = [
'numpy',
pytorch_dep,
]
pillow_ver = ' >= 4.1.1'
pillow_req = 'pillow-simd' if get_dist('pillow-simd') is not None else 'pillow'
requirements.append(pillow_req + pillow_ver)
def find_library(name, vision_include):
this_dir = os.path.dirname(os.path.abspath(__file__))
build_prefix = os.environ.get('BUILD_PREFIX', None)
is_conda_build = build_prefix is not None
library_found = False
conda_installed = False
lib_folder = None
include_folder = None
library_header = '{0}.h'.format(name)
# Lookup in TORCHVISION_INCLUDE or in the package file
package_path = [os.path.join(this_dir, 'torchvision')]
for folder in vision_include + package_path:
candidate_path = os.path.join(folder, library_header)
library_found = os.path.exists(candidate_path)
if library_found:
break
if not library_found:
print('Running build on conda-build: {0}'.format(is_conda_build))
if is_conda_build:
# Add conda headers/libraries
if os.name == 'nt':
build_prefix = os.path.join(build_prefix, 'Library')
include_folder = os.path.join(build_prefix, 'include')
lib_folder = os.path.join(build_prefix, 'lib')
library_header_path = os.path.join(
include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
else:
# Check if using Anaconda to produce wheels
conda = distutils.spawn.find_executable('conda')
is_conda = conda is not None
print('Running build on conda: {0}'.format(is_conda))
if is_conda:
python_executable = sys.executable
py_folder = os.path.dirname(python_executable)
if os.name == 'nt':
env_path = os.path.join(py_folder, 'Library')
else:
env_path = os.path.dirname(py_folder)
lib_folder = os.path.join(env_path, 'lib')
include_folder = os.path.join(env_path, 'include')
library_header_path = os.path.join(
include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
if not library_found:
if sys.platform == 'linux':
library_found = os.path.exists('/usr/include/{0}'.format(
library_header))
library_found = library_found or os.path.exists(
'/usr/local/include/{0}'.format(library_header))
return library_found, conda_installed, include_folder, lib_folder
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'torchvision', 'csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp')) + glob.glob(os.path.join(extensions_dir, 'ops',
'*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'ops', 'autograd', '*.cpp')) + glob.glob(
os.path.join(extensions_dir, 'ops', 'cpu', '*.cpp'))
is_rocm_pytorch = False
if torch.__version__ >= '1.5':
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
if is_rocm_pytorch:
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="torchvision/csrc/ops/cuda/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(os.path.join(extensions_dir, 'ops', 'hip', '*.hip'))
# Copy over additional files
for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
shutil.copy(file, "torchvision/csrc/ops/hip")
else:
source_cuda = glob.glob(os.path.join(extensions_dir, 'ops', 'cuda', '*.cu'))
source_cuda += glob.glob(os.path.join(extensions_dir, 'ops', 'autocast', '*.cpp'))
sources = main_file + source_cpu
extension = CppExtension
compile_cpp_tests = os.getenv('WITH_CPP_MODELS_TEST', '0') == '1'
if compile_cpp_tests:
test_dir = os.path.join(this_dir, 'test')
models_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'models')
test_file = glob.glob(os.path.join(test_dir, '*.cpp'))
source_models = glob.glob(os.path.join(models_dir, '*.cpp'))
test_file = [os.path.join(test_dir, s) for s in test_file]
source_models = [os.path.join(models_dir, s) for s in source_models]
tests = test_file + source_models
tests_include_dirs = [test_dir, models_dir]
define_macros = []
extra_compile_args = {
'cxx': []
}
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \
or os.getenv('FORCE_CUDA', '0') == '1':
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
if nvcc_flags == '':
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(' ')
else:
define_macros += [('WITH_HIP', None)]
nvcc_flags = []
extra_compile_args['nvcc'] = nvcc_flags
if sys.platform == 'win32':
define_macros += [('torchvision_EXPORTS', None)]
extra_compile_args['cxx'].append('/MP')
elif sys.platform == 'linux':
extra_compile_args['cxx'].append('-fopenmp')
debug_mode = os.getenv('DEBUG', '0') == '1'
if debug_mode:
print("Compile in debug mode")
extra_compile_args['cxx'].append("-g")
extra_compile_args['cxx'].append("-O0")
if "nvcc" in extra_compile_args:
# we have to remove "-OX" and "-g" flag if exists and append
nvcc_flags = extra_compile_args["nvcc"]
extra_compile_args["nvcc"] = [
f for f in nvcc_flags if not ("-O" in f or "-g" in f)
]
extra_compile_args["nvcc"].append("-O0")
extra_compile_args["nvcc"].append("-g")
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
'torchvision._C',
sorted(sources),
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
if compile_cpp_tests:
ext_modules.append(
extension(
'torchvision._C_tests',
tests,
include_dirs=tests_include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
)
# ------------------- Torchvision extra extensions ------------------------
vision_include = os.environ.get('TORCHVISION_INCLUDE', None)
vision_library = os.environ.get('TORCHVISION_LIBRARY', None)
vision_include = (vision_include.split(os.pathsep)
if vision_include is not None else [])
vision_library = (vision_library.split(os.pathsep)
if vision_library is not None else [])
include_dirs += vision_include
library_dirs = vision_library
# Image reading extension
image_macros = []
image_include = [extensions_dir]
image_library = []
image_link_flags = []
# Locating libPNG
libpng = distutils.spawn.find_executable('libpng-config')
pngfix = distutils.spawn.find_executable('pngfix')
png_found = libpng is not None or pngfix is not None
print('PNG found: {0}'.format(png_found))
if png_found:
if libpng is not None:
# Linux / Mac
png_version = subprocess.run([libpng, '--version'],
stdout=subprocess.PIPE)
png_version = png_version.stdout.strip().decode('utf-8')
print('libpng version: {0}'.format(png_version))
png_version = parse_version(png_version)
if png_version >= parse_version("1.6.0"):
print('Building torchvision with PNG image support')
png_lib = subprocess.run([libpng, '--libdir'],
stdout=subprocess.PIPE)
png_lib = png_lib.stdout.strip().decode('utf-8')
if 'disabled' not in png_lib:
image_library += [png_lib]
png_include = subprocess.run([libpng, '--I_opts'],
stdout=subprocess.PIPE)
png_include = png_include.stdout.strip().decode('utf-8')
_, png_include = png_include.split('-I')
print('libpng include path: {0}'.format(png_include))
image_include += [png_include]
image_link_flags.append('png')
else:
print('libpng installed version is less than 1.6.0, '
'disabling PNG support')
png_found = False
else:
# Windows
png_lib = os.path.join(
os.path.dirname(os.path.dirname(pngfix)), 'lib')
png_include = os.path.join(os.path.dirname(
os.path.dirname(pngfix)), 'include', 'libpng16')
image_library += [png_lib]
image_include += [png_include]
image_link_flags.append('libpng')
# Locating libjpeg
(jpeg_found, jpeg_conda,
jpeg_include, jpeg_lib) = find_library('jpeglib', vision_include)
print('JPEG found: {0}'.format(jpeg_found))
image_macros += [('PNG_FOUND', str(int(png_found)))]
image_macros += [('JPEG_FOUND', str(int(jpeg_found)))]
if jpeg_found:
print('Building torchvision with JPEG image support')
image_link_flags.append('jpeg')
if jpeg_conda:
image_library += [jpeg_lib]
image_include += [jpeg_include]
image_path = os.path.join(extensions_dir, 'io', 'image')
image_src = glob.glob(os.path.join(image_path, '*.cpp')) + glob.glob(os.path.join(image_path, 'cpu', '*.cpp'))
if png_found or jpeg_found:
ext_modules.append(extension(
'torchvision.image',
image_src,
include_dirs=image_include + include_dirs + [image_path],
library_dirs=image_library + library_dirs,
define_macros=image_macros,
libraries=image_link_flags,
extra_compile_args=extra_compile_args
))
ffmpeg_exe = distutils.spawn.find_executable('ffmpeg')
has_ffmpeg = ffmpeg_exe is not None
print("FFmpeg found: {}".format(has_ffmpeg))
if has_ffmpeg:
ffmpeg_libraries = {
'libavcodec',
'libavformat',
'libavutil',
'libswresample',
'libswscale'
}
ffmpeg_bin = os.path.dirname(ffmpeg_exe)
ffmpeg_root = os.path.dirname(ffmpeg_bin)
ffmpeg_include_dir = os.path.join(ffmpeg_root, 'include')
ffmpeg_library_dir = os.path.join(ffmpeg_root, 'lib')
gcc = distutils.spawn.find_executable('gcc')
platform_tag = subprocess.run(
[gcc, '-print-multiarch'], stdout=subprocess.PIPE)
platform_tag = platform_tag.stdout.strip().decode('utf-8')
if platform_tag:
# Most probably a Debian-based distribution
ffmpeg_include_dir = [
ffmpeg_include_dir,
os.path.join(ffmpeg_include_dir, platform_tag)
]
ffmpeg_library_dir = [
ffmpeg_library_dir,
os.path.join(ffmpeg_library_dir, platform_tag)
]
else:
ffmpeg_include_dir = [ffmpeg_include_dir]
ffmpeg_library_dir = [ffmpeg_library_dir]
has_ffmpeg = True
for library in ffmpeg_libraries:
library_found = False
for search_path in ffmpeg_include_dir + include_dirs:
full_path = os.path.join(search_path, library, '*.h')
library_found |= len(glob.glob(full_path)) > 0
if not library_found:
print('{0} header files were not found, disabling ffmpeg '
'support')
has_ffmpeg = False
if has_ffmpeg:
print("ffmpeg include path: {}".format(ffmpeg_include_dir))
print("ffmpeg library_dir: {}".format(ffmpeg_library_dir))
# TorchVision base decoder + video reader
video_reader_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'video_reader')
video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
base_decoder_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'decoder')
base_decoder_src = glob.glob(
os.path.join(base_decoder_src_dir, "*.cpp"))
# Torchvision video API
videoapi_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'video')
videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
# exclude tests
base_decoder_src = [x for x in base_decoder_src if '_test.cpp' not in x]
combined_src = video_reader_src + base_decoder_src + videoapi_src
ext_modules.append(
CppExtension(
'torchvision.video_reader',
combined_src,
include_dirs=[
base_decoder_src_dir,
video_reader_src_dir,
videoapi_src_dir,
extensions_dir,
*ffmpeg_include_dir,
*include_dirs
],
library_dirs=ffmpeg_library_dir + library_dirs,
libraries=[
'avcodec',
'avformat',
'avutil',
'swresample',
'swscale',
],
extra_compile_args=["-std=c++14"] if os.name != 'nt' else ['/std:c++14', '/MP'],
extra_link_args=["-std=c++14" if os.name != 'nt' else '/std:c++14'],
)
)
return ext_modules
class clean(distutils.command.clean.clean):
def run(self):
with open('.gitignore', 'r') as f:
ignores = f.read()
for wildcard in filter(None, ignores.split('\n')):
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
if __name__ == "__main__":
print("Building wheel {}-{}".format(package_name, version))
write_version_file()
with open('README.rst') as f:
readme = f.read()
setup(
# Metadata
name=package_name,
version=version,
author='PyTorch Core Team',
author_email='soumith@pytorch.org',
url='https://github.com/pytorch/vision',
description='image and video datasets and models for torch deep learning',
long_description=readme,
license='BSD',
# Package info
packages=find_packages(exclude=('test',)),
package_data={
package_name: ['*.dll', '*.dylib', '*.so']
},
zip_safe=False,
install_requires=requirements,
extras_require={
"scipy": ["scipy"],
},
ext_modules=get_extensions(),
cmdclass={
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True),
'clean': clean,
}
)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="PyBingTiles",
version="0.0.1",
install_requires=[
"pandas",
"geopandas",
"numpy",
"shapely",
"contextily"
],
author="Shoichi Yip",
author_email="shoichi.yip@gmail.com",
description="A little tool in order to deal with Bing Tiles and to generate Shapefiles from them",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shoyip/pybingtiles",
project_urls={
"Bug Tracker": "https://github.com/shoyip/pybingtiles/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6"
)
|
# http://www.ster.kuleuven.be/~pieterd/python/html/plotting/mayavi_example.html
import numpy as np
from numpy import sin,cos,pi,sqrt # makes the code more readable
from scipy.optimize import newton
import sys
#mayavi 3d
import pylab as plt
from mayavi import mlab # or from enthought.mayavi import mlab
#matplotlib 3d
#import matplotlib.pyplot as plt
#from matplotlib import cm
#from mpl_toolkits.mplot3d import Axes3D
#fig = plt.figure(figsize=(8.0, 6.0)) #single column fig
#fig = plt.figure(figsize=(3.54, 2.19)) #single column fig
#fig = plt.figure(figsize=(7.48, 2.5)) #two column figure
#ax = fig.add_subplot(111, projection='3d')
#ax.set_xlim(-1.5, 1.5)
#ax.set_ylim(-1.5, 1.5)
#ax.set_zlim(-1.5, 1.5)
#plt.rc('font', family='serif')
#plt.rc('xtick', labelsize=5)
#plt.rc('ytick', labelsize=5)
#plt.rc('axes', labelsize=5)
def roche(r,theta,phi,pot,q):
lamr,nu = r*cos(phi)*sin(theta),cos(theta)
return (pot - (1./r + q*( 1./sqrt(1. - 2*lamr + r**2) - lamr) + 0.5*(q+1) * r**2 * (1-nu**2) ))
theta,phi = np.mgrid[0:np.pi:75j,-0.5*pi:1.5*np.pi:150j]
pot1,pot2 = 2.88,10.
q = 0.5
r_init = 1e-5
print "Newton iteration..."
r1 = [newton(roche,r_init,args=(th,ph,pot1,q)) for th,ph in zip(theta.ravel(),phi.ravel())]
r2 = [newton(roche,r_init,args=(th,ph,pot2,1./q)) for th,ph in zip(theta.ravel(),phi.ravel())]
r1 = np.array(r1).reshape(theta.shape)
r2 = np.array(r2).reshape(theta.shape)
#change to cartesian
x1 = r1*sin(theta)*cos(phi)
y1 = r1*sin(theta)*sin(phi)
z1 = r1*cos(theta)
x2 = r2*np.sin(theta)*np.cos(phi)
y2 = r2*np.sin(theta)*np.sin(phi)
z2 = r2*np.cos(theta)
#plot
print "plotting..."
mlab.figure()
mlab.mesh(x1,y1,z1,scalars=r1)
#move secondary to proper place
rot_angle = pi
Rz = np.array([[cos(rot_angle),-sin(rot_angle),0],
[sin(rot_angle), cos(rot_angle),0],
[0, 0, 1]])
B = np.dot(Rz,np.array([x2,y2,z2]).reshape((3,-1))) # we need to have a 3x3 times 3xN array
x2,y2,z2 = B.reshape((3,x2.shape[0],x2.shape[1])) # but we want our original shape back
x2 += 1 # simple translation
#mlab.figure()
#mlab.mesh(x1,y1,z1,scalars=r1)
#mlab.mesh(x2,y2,z2,scalars=r2)
#ax.plot_surface(x1, y1, z2, r1)
#ax.plot_wireframe(x1, y1, z2)
#plt.show()
#plt.savefig("roche_surf.pdf")
#plt.imshow(r1)
#plt.colorbar()
#plt.figure()
#plt.show()
#plt.imshow(r2)
#plt.colorbar()
|
#!/usr/bin/env python3
import sys
import subprocess
import configparser
import os
def parse_config():
config = configparser.ConfigParser()
config.read('configs.ini')
return config
def main():
print("Starting experiments")
config = parse_config()
os.chdir('CIFAR10')
mode = int(config["SCRIPT"]["mode"])
keysFree= [c for c in config["FREE"] ]
keysFast= [c for c in config["FAST"] ]
freeCommand = ["python3 free.py"] + ["--"+k+" "+config["FREE"][k] for k in keysFree]
freeCommand = " ".join(freeCommand)
fastCommand = ["python3 fast.py"] + ["--"+k+" "+config["FAST"][k] for k in keysFast]
fastCommand = " ".join(fastCommand)
if(mode==0): # BOTH FREE AND FAST
print("-"*40,"FREE","-"*40)
print("Launching",freeCommand)
subprocess.call(freeCommand, shell=True)
print("-"*40,"FAST","-"*40)
print("Launching",fastCommand)
subprocess.call(fastCommand, shell=True)
elif(mode==1): # ONLY FREE
print("-"*40,"FREE","-"*40)
print("Launching",freeCommand)
subprocess.call(freeCommand, shell=True)
elif(mode==2): # ONLY FAST
print("-"*40,"FAST","-"*40)
print("Launching",fastCommand)
subprocess.call(fastCommand, shell=True)
def __main__(function, path, user_args):
"""Wraps a main function to display a usage message when necessary."""
co = function.__code__
num_args = co.co_argcount
if function.__defaults__ is not None:
min_args = num_args - len(function.__defaults__)
else:
min_args = num_args
if co.co_flags & 0x04: # function captures extra arguments with *
max_args = None
else:
max_args = num_args
if min_args <= len(user_args) and (max_args is None or
max_args >= len(user_args)):
return(function(*user_args))
if max_args == 0:
sys.stderr.write("Usage: {path}\n".format(path=path))
else:
arg_list = list()
optionals = 0
for index in range(num_args):
if index < min_args:
arg_list.append(co.co_varnames[index])
else:
arg_list.append("[" + co.co_varnames[index])
optionals += 1
if max_args is None:
arg_list.append("[" + co.co_varnames[num_args] + "")
optionals += 1
sys.stderr.write("Usage: {path} {args}{optional_closes}\n".format
(path=path,
args=" ".join(arg_list),
optional_closes="]" * optionals))
if function.__doc__:
sys.stderr.write("\n")
sys.stderr.write(function.__doc__)
sys.stderr.write("\n")
return(1)
if __name__ == "__main__":
sys.exit(__main__(main, sys.argv[0], sys.argv[1:]))
|
#!/usr/bin/env python
import click
import boto3
import math
import time
from datetime import datetime, timezone
def send_statistics_aggregate(stats, time_window = 3600, max_bounces_rate = 0.04, max_complaints_rate = 0.0007):
now = datetime.now(timezone.utc)
total = {
'DeliveryAttempts': 0,
'Bounces': 0,
'Complaints': 0,
'Rejects': 0
}
for data_point in stats['SendDataPoints']:
ts = data_point['Timestamp']
diff = (now - data_point['Timestamp']).total_seconds()
if diff < time_window:
for key, value in data_point.items():
if key != 'Timestamp':
total[key] = total[key] + value
total['BouncesRate'] = total['Bounces'] / total['DeliveryAttempts'] if total['DeliveryAttempts'] else 0
total['ComplaintsRate'] = total['Complaints'] / total['DeliveryAttempts'] if total['DeliveryAttempts'] else 0
total['RejectsRate'] = total['Rejects'] / total['DeliveryAttempts'] if total['DeliveryAttempts'] else 0
total['BounceNumToSend'] = calc_num_to_send(total['DeliveryAttempts'], total['Bounces'], max_bounces_rate)
total['ComplaintNumToSend'] = calc_num_to_send(total['DeliveryAttempts'], total['Complaints'], max_complaints_rate)
total['NumToSend'] = max(total['BounceNumToSend'], total['ComplaintNumToSend'])
return total
def calc_num_to_send(delivery, error_num, rate):
return max(0, math.ceil(error_num / rate - delivery))
@click.command()
@click.argument('access_key', required=1)
@click.argument('secret_key', required=1)
@click.argument('from_email', required=1)
@click.argument('to_email', required=1)
@click.option('--region', default='us-east-1', help='AWS SES Region (default is us-east-1)')
@click.option('--interval', default=3600, help='Timer interval')
def watch(access_key, secret_key, from_email, to_email, region, interval):
client = boto3.client(
'ses',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
stats_data = client.get_send_statistics()
quota_data = client.get_send_quota()
data = send_statistics_aggregate(stats_data, interval)
remaining = num_to_send = data['NumToSend']
# Half speed to avoid real emails would be rejected by rate limit
send_rate = (int) (quota_data['MaxSendRate'] / 2)
print(data)
while remaining > 0:
remaining = remaining - send_rate
time1 = time.time()
for i in range(0, send_rate):
client.send_email(
Source=from_email,
Destination={
'ToAddresses': [to_email],
},
Message={
'Subject': {
'Data': 'Lorem Ipsum is simply dummy text',
},
'Body': {
'Text': {
'Data': 'Lorem Ipsum is simply dummy text of the printing and typesetting industry.'
},
'Html': {
'Data': '<p>Lorem Ipsum is simply dummy text of the printing and typesetting industry.</p>'
}
}
}
)
print('Sent '+str(send_rate)+' emails.')
time2 = time.time()
wait_time = max(0, 1.2 - (time2 - time1))
print('Wait '+str(wait_time)+' seconds.')
time.sleep(wait_time)
if __name__ == '__main__':
watch()
|
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import random
import threading
import copy
from osa_utils import OSAUtils
from daos_utils import DaosCommand
from dmg_utils import check_system_query_status
from test_utils_pool import TestPool
from command_utils import CommandFailure
from apricot import skipForTicket
import queue
class OSAOfflineParallelTest(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server offline drain,reintegration,
extend test cases in parallel.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get("ior_test_sequence",
'/run/ior/iorflags/*')
# Start an additional server.
self.extra_servers = self.params.get("test_servers",
"/run/extra_servers/*")
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
self.out_queue = queue.Queue()
self.dmg_command.exit_status_exception = True
self.server_boot = None
def dmg_thread(self, action, action_args, results):
"""Generate different dmg command related to OSA.
Args:
action_args(dict) : {action: {"puuid":
pool[val].uuid,
"rank": rank,
"target": t_string,
"action": action,}
results (queue) : dmg command output queue.
"""
dmg = copy.copy(self.dmg_command)
try:
if action == "reintegrate":
text = "Waiting for rebuild to complete"
time.sleep(3)
self.print_and_assert_on_rebuild_failure(text)
# For each action, read the values from the
# dictionary.
# example {"exclude" : {"puuid": self.pool, "rank": rank
# "target": t_string, "action": exclude}}
# getattr is used to obtain the method in dmg object.
# eg: dmg -> pool_exclude method, then pass arguments like
# puuid, rank, target to the pool_exclude method.
if action == "exclude" and self.server_boot is True:
ranks = action_args[action][1]
getattr(dmg, "system stop --ranks={}".format(ranks))
output = "Stopping the rank : {}".format(ranks)
self.print_and_assert_on_rebuild_failure(output)
getattr(dmg, "system start --ranks={}".format(ranks))
self.print_and_assert_on_rebuild_failure(output)
else:
getattr(dmg, "pool_{}".format(action))(**action_args[action])
except CommandFailure as _error:
results.put("{} failed".format(action))
def run_offline_parallel_test(self, num_pool, data=False, oclass=None):
"""Run multiple OSA commands in parallel with or without data.
Args:
num_pool (int) : total pools to create for testing purposes.
data (bool) : whether pool has no data or to create
some data in pool. Defaults to False.
oclass (str) : Daos object class (RP_2G1,etc)
"""
# Create a pool
pool = {}
pool_uuid = []
target_list = []
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
# Exclude target : random two targets (target idx : 0-7)
n = random.randint(0, 6)
target_list.append(n)
target_list.append(n+1)
t_string = "{},{}".format(target_list[0], target_list[1])
# Exclude rank 2.
rank = 2
test_seq = self.ior_test_sequence[0]
for val in range(0, num_pool):
pool[val] = TestPool(self.context,
dmg_command=self.get_dmg_command())
pool[val].get_params(self)
pool[val].create()
pool_uuid.append(pool[val].uuid)
self.pool = pool[val]
self.pool.set_property("reclaim", "disabled")
if data:
self.run_ior_thread("Write", oclass, test_seq)
if oclass != "S1":
self.run_mdtest_thread()
# if self.test_during_aggregation is set,
# Create another container and run the IOR
# command using the second container.
if self.test_during_aggregation is True:
self.run_ior_thread("Write", oclass, test_seq)
# Start the additional servers and extend the pool
self.log.info("Extra Servers = %s", self.extra_servers)
self.start_additional_servers(self.extra_servers)
# Give sometime for the additional server to come up.
for retry in range(0, 10):
scan_info = self.get_dmg_command().system_query()
if not check_system_query_status(scan_info):
if retry == 9:
self.fail("One or more servers not in expected status")
else:
break
# Exclude and reintegrate the pool_uuid, rank and targets
for val in range(0, num_pool):
self.pool = pool[val]
self.pool.display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
# If we need to trigger aggregation on pool 1, delete
# the second container which has IOR data.
if self.test_during_aggregation is True and val == 0:
self.delete_extra_container(self.pool)
# Create the threads here
threads = []
# Action dictionary with OSA dmg command parameters
action_args = {
"drain": {"pool": self.pool.uuid, "rank": rank,
"tgt_idx": None},
"exclude": {"pool": self.pool.uuid, "rank": (rank + 1),
"tgt_idx": t_string},
"reintegrate": {"pool": self.pool.uuid, "rank": (rank + 1),
"tgt_idx": t_string},
"extend": {"pool": self.pool.uuid, "ranks": (rank + 2),
"scm_size": self.pool.scm_size,
"nvme_size": self.pool.nvme_size}
}
for action in sorted(action_args):
# Add a dmg thread
process = threading.Thread(target=self.dmg_thread,
kwargs={"action": action,
"action_args":
action_args,
"results":
self.out_queue})
process.start()
threads.append(process)
# Wait to finish the threads
for thrd in threads:
thrd.join()
time.sleep(5)
# Check the queue for any failure.
tmp_list = list(self.out_queue.queue)
for failure in tmp_list:
if "FAIL" in failure:
self.fail("Test failed : {0}".format(failure))
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
pool[val].display_pool_daos_space(display_string)
self.is_rebuild_done(3)
self.assert_on_rebuild_failure()
pver_end = self.get_pool_version()
self.log.info("Pool Version at the End %s", pver_end)
self.assertTrue(pver_end >= 26,
"Pool Version Error: at the end")
if data:
self.run_ior_thread("Read", oclass, test_seq)
if oclass != "S1":
self.run_mdtest_thread()
self.container = self.pool_cont_dict[self.pool][0]
kwargs = {"pool": self.pool.uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
@skipForTicket("DAOS-7247")
def test_osa_offline_parallel_test(self):
"""
JIRA ID: DAOS-4752
Test Description: Runs multiple OSA commands in parallel.
:avocado: tags=all,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=offline_parallel,offline_parallel_basic_test
"""
self.log.info("Offline Parallel Test: Basic Test")
self.run_offline_parallel_test(1, data=True)
@skipForTicket("DAOS-7247")
def test_osa_offline_parallel_test_without_csum(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
without enabling checksum.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_without_csum
"""
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.log.info("Offline Parallel Test: Without Checksum")
self.run_offline_parallel_test(1, data=True)
@skipForTicket("DAOS-7247")
def test_osa_offline_parallel_test_rank_boot(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
with a rank rebooted using system stop/start.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_srv_rank_boot
"""
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.server_boot = self.params.get("flags",
'/run/system_stop_start/*')
self.log.info("Offline Parallel Test: Restart a rank")
self.run_offline_parallel_test(1, data=True)
@skipForTicket("DAOS-7195,DAOS-7247")
def test_osa_offline_parallel_test_with_aggregation(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
with aggregation turned on.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_with_aggregation
"""
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.log.info("Offline Parallel Test : Aggregation")
self.run_offline_parallel_test(1, data=True)
@skipForTicket("DAOS-7247")
def test_osa_offline_parallel_test_oclass(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
with different object class.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_oclass
"""
self.log.info("Offline Parallel Test : OClass")
# Presently, the script is limited and supports only one extra
# object class testing. We are testing S1 apart from RP_2G1.
self.run_offline_parallel_test(1, data=True,
oclass=self.test_oclass[0])
|
#
# mDKL
#
# Copyright (c) Siemens AG, 2021
# Authors:
# Zhiliang Wu <zhiliang.wu@siemens.com>
# License-Identifier: MIT
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from yellowbrick.features import JointPlotVisualizer
from yellowbrick.regressor import ResidualsPlot, PredictionError
from sklearn.metrics import mean_squared_error, r2_score, \
mean_absolute_error, median_absolute_error
from logging_conf import logger
from model_utils import Mock
def joint_plot_xy(x_t, y_t, x_te, y_te, dp, n_epoch, label):
"""A joint plot for predicted location (x, y).
Args:
x_t (np.ndarray): The values of the predicted x values in the
training set.
y_t (np.ndarray): The values of the predicted y values in the
training set.
x_te (np.ndarray): The values of the predicted x values in the
validatoin/test set.
y_te (np.ndarray): The values of the predicted y values in the
validation/test set.
dp (str): Data path of the generated plot.
n_epoch (int): The number of training epoch.
label (str): The name of the plot in a format of pred_[valid/test].
Returns:
None: The joint-plot is saved to the given data path.
"""
fig, ax = plt.subplots()
viz = JointPlotVisualizer(ax)
viz.fit_transform(X=x_t, y=y_t)
viz.fit_transform(X=x_te, y=y_te)
viz.finalize()
save_fp = f'./{dp}/epoch_{n_epoch}_{label}_joint.pdf'
fig.savefig(save_fp, dpi=600)
plt.close(fig)
def residual_plot(y_t_true, y_t_p, y_te_true, y_te_p, dp, n_epoch, label):
"""Generate the residual plot for the predictions in training and testing.
Args:
y_t_true (np.ndarray): True values of the targets in the training set.
y_t_p (np.ndarray): Predicted values of the targets in the training
set.
y_te_true (np.ndarray): True values of the targets in the
validation/teset set.
y_te_p (np.ndarray): Predicted values of the targets in the
validation/teset set.
dp (str): Data path of the generated plot.
n_epoch (int): The number of training epoch.
label (str): The name of the plot in a format of pred_[valid/test].
Returns:
None: The residual plot is saved to the given data path.
"""
mock = Mock(y_t_p, y_te_p)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 8])
visualizer1 = ResidualsPlot(mock, ax1, is_fitted=True)
visualizer1.score(True, y_t_true, train=True)
visualizer1.score(False, y_te_true, train=False)
visualizer1.finalize()
visualizer2 = PredictionError(mock, ax2, is_fitted=True)
visualizer2.score(True, y_t_true)
visualizer2.score(False, y_te_true)
visualizer2.finalize()
save_fp = f'./{dp}/epoch_{n_epoch}_residual_{label}.pdf'
fig.savefig(save_fp, dpi=600)
plt.close(fig)
def prepare_pq_plot_equal(y_true, y_pred, y_std, n_quantiles=10):
"""Prepare metrics table for QP plot, each chunk have the same number of
samples.
Args:
y_true (np.ndarray): True labels of the shape (n_samples, [n_tasks]).
y_pred (np.ndarray): Prediction of the labels.
y_std (np.ndarray): Stadard deviation of the predictions.
n_quantiles (int): How many chunks we want to have.
Returns:
pd.DataFrame: Metrics table with corresponding quantile split.
"""
if y_std.ndim > 1:
y_std_avg = np.mean(y_std, axis=1)
# argsort return indices from small to large
y_true_sorted = y_true[np.argsort(y_std_avg), :]
y_pred_sorted = y_pred[np.argsort(y_std_avg), :]
else:
y_true_sorted = y_true[np.argsort(y_std)]
y_pred_sorted = y_pred[np.argsort(y_std)]
n_samples = y_true.shape[0]
n_samples_per_quantile = n_samples // n_quantiles
logger.info('Equal mode')
logger.info(f'In each chunk, there are {n_samples_per_quantile} samples.')
y_true_list = [y_true_sorted[i: i+n_samples_per_quantile] for i in range(
0, n_samples, n_samples_per_quantile)]
y_pred_list = [y_pred_sorted[i: i+n_samples_per_quantile] for i in range(
0, n_samples, n_samples_per_quantile)]
quantile_list = []
rmse_list = []
mae_list = []
r2score_list = []
mad_list = [] # median absolute error
for i, (y_t, y_p) in enumerate(zip(y_true_list[:n_quantiles],
y_pred_list[:n_quantiles])):
rmse_quantile = mean_squared_error(y_t, y_p,
multioutput='uniform_average',
squared=False)
mae_quantile = mean_absolute_error(y_t, y_p,
multioutput='uniform_average')
r2s_quantile = r2_score(y_t, y_p, multioutput='uniform_average')
mad_quantile = median_absolute_error(y_t, y_p,
multioutput='uniform_average')
quantile_list.append((i + 1) / n_quantiles)
rmse_list.append(rmse_quantile)
mae_list.append(mae_quantile)
r2score_list.append(r2s_quantile)
mad_list.append(mad_quantile)
array_pq = np.stack([quantile_list, rmse_list, mae_list,
r2score_list, mad_list],
axis=1)
df_pq = pd.DataFrame(data=array_pq,
columns=['quantile', 'rmse', 'mae', 'r2s', 'mad']
)
return df_pq
def prepare_pq_plot_accu(y_true, y_pred, y_std, n_quantiles=10):
"""Prepare metrics table for qp plot, the samples are accumulated.
Args:
y_true (np.ndarray): True labels of shape (n_samples, [n_tasks]).
y_pred (np.ndarray): Prediction of the labels.
y_std (np.ndarray): Stadard deviation of the predictions.
n_quantiles (int): How many chunks we want to have.
Returns:
pd.DataFrame: Metrics table with accumulated quantile splits.
"""
if y_std.ndim > 1:
y_std_avg = np.mean(y_std, axis=1)
y_true_sorted = y_true[np.argsort(y_std_avg), :]
y_pred_sorted = y_pred[np.argsort(y_std_avg), :]
else:
y_true_sorted = y_true[np.argsort(y_std)]
y_pred_sorted = y_pred[np.argsort(y_std)]
n_samples = y_true.shape[0]
n_samples_per_quantile = n_samples // n_quantiles
y_true_list = [y_true_sorted[: i+n_samples_per_quantile] for i in range(
0, n_samples, n_samples_per_quantile)]
y_pred_list = [y_pred_sorted[: i+n_samples_per_quantile] for i in range(
0, n_samples, n_samples_per_quantile)]
if n_quantiles < len(y_true_list):
# to include all samples in the last chunk
y_true_list[-2] = y_true_sorted[:]
y_pred_list[-2] = y_pred_sorted[:]
logger.info('The extra chunk is merged into the last chunk!')
quantile_list = []
rmse_list = []
mae_list = []
r2score_list = []
mad_list = [] # median absolute error
logger.info('Accumulation mode...')
for i, (y_t, y_p) in enumerate(zip(y_true_list[:n_quantiles],
y_pred_list[:n_quantiles])):
logger.info(f'In chunk {i}, there are {y_t.shape[0]} samples.')
rmse_quantile = mean_squared_error(y_t, y_p,
multioutput='uniform_average',
squared=False)
mae_quantile = mean_absolute_error(y_t, y_p,
multioutput='uniform_average')
r2s_quantile = r2_score(y_t, y_p, multioutput='uniform_average')
mad_quantile = median_absolute_error(y_t, y_p,
multioutput='uniform_average')
quantile_list.append((i + 1) / n_quantiles)
rmse_list.append(rmse_quantile)
mae_list.append(mae_quantile)
r2score_list.append(r2s_quantile)
mad_list.append(mad_quantile)
array_pq = np.stack([quantile_list, rmse_list, mae_list,
r2score_list, mad_list],
axis=1)
df_pq = pd.DataFrame(data=array_pq,
columns=['quantile', 'rmse', 'mae', 'r2s', 'mad'])
return df_pq
def plot_pq(df_pq, df_pq_std=None, columns=('mae', 'r2s'),
title='Performance-Quantile'):
"""Plot the quantile performance plot from the prepared metrics table.
Args:
df_pq (pd.DataFrame): The QP table information with mean values.
df_pq_std (pd.DataFrame): The QP table information with std values.
columns (tuple): Which column of the qp table to be plotted, limited
to 2 items.
title (str): An optional name of the figure.
Returns:
plt.Figure: A figure of the resulting QP plot.
"""
fig, ax1 = plt.subplots(figsize=(16, 9))
if len(columns) == 1:
ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r', label=columns[0])
ax1.set_ylabel(columns[0].upper())
ax1.legend(loc=1)
if df_pq_std is not None:
ax1.fill_between(df_pq['quantile'],
df_pq[columns[0]] - df_pq_std[columns[0]],
df_pq[columns[0]] + df_pq_std[columns[0]],
color='r',
alpha=0.5
)
elif len(columns) == 2:
_ = ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r',
label=columns[0])
ax1.set_ylabel(columns[0].upper())
ax2 = ax1.twinx()
_ = ax2.plot(df_pq['quantile'], df_pq[columns[1]], 'g',
label=columns[1])
ax2.set_ylabel(columns[1].upper())
ax1.legend(loc=1)
ax2.legend(loc=4)
if df_pq_std is not None:
ax1.fill_between(df_pq['quantile'],
df_pq[columns[0]] - df_pq_std[columns[0]],
df_pq[columns[0]] + df_pq_std[columns[0]],
color='r',
alpha=0.5
)
ax2.fill_between(df_pq['quantile'],
df_pq[columns[1]] - df_pq_std[columns[1]],
df_pq[columns[1]] + df_pq_std[columns[1]],
color='g',
alpha=0.5
)
else:
raise ValueError('Too many columns. Currently only two are allowed.')
ax1.set_xlabel('Quantile')
ax1.set_title(title)
plt.show()
return fig
def plot_pq_all(mean_arr, std_arr, label_list, title='QP_accu',
metric='rmse', ax=None, colors=None, alpha=0.5):
"""Generate a summary QP plots with different methods.
Args:
mean_arr (np.ndarray): Mean performance of each method at a certain
quantile, (#methods, #quantiles).
std_arr (np.ndarray): Standard deviations of each method at a certain
quantile, (#methods, #quantiles).
label_list (list): The name list of each methods, used for legends.
title (str): An optional name of the figure.
metric (str): The performance metric, used for the y-axis labe.
ax (matplotlib.axes.Axes): An optional existing ax to plot the figure.
colors (list): The color list for each methods.
alpha (float): The value of opacity.
Returns:
plt.Figure: A figure of the summary QP plot.
"""
if ax:
fig = None
else:
fig, ax = plt.subplots(figsize=(16, 9))
n_quantile = mean_arr.shape[1]
x_values = (np.arange(n_quantile) + 1) / n_quantile
for i, l in enumerate(label_list):
if colors:
ax.plot(x_values, mean_arr[i, :], label=l, linewidth=0.8,
color=colors[i])
ax.fill_between(x_values,
mean_arr[i, :] - std_arr[i, :],
mean_arr[i, :] + std_arr[i, :],
alpha=alpha, color=colors[i])
else:
ax.plot(x_values, mean_arr[i, :], label=l, linewidth=0.8)
ax.fill_between(x_values,
mean_arr[i, :] - std_arr[i, :],
mean_arr[i, :] + std_arr[i, :],
alpha=alpha)
ax.legend(loc=4, frameon=True, borderaxespad=0.1)
ax.set_xlabel('Quantile of the predictive variance')
ax.set_ylabel(metric.upper())
ax.grid(True)
if title:
ax.set_title(title)
plt.show()
return fig
|
from setuptools import setup, find_packages
setup(name='reqinstall',
version='0.0.2',
description='',
author='Quali',
license='MIT License',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License'],
packages=find_packages(exclude=['tests']),
install_requires=[])
|
'''
Copyright <2021> <Thomas Chapman>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import rhinoscriptsyntax as rs
def TurnOffSelectedLayer():
obj = rs.GetObjects("select obj Layers to turn off")
if obj:
rs.EnableRedraw(False)
for i in obj:
layer = rs.ObjectLayer(i)
rs.LayerVisible(layer, visible=False)
rs.EnableRedraw(True)
if __name__ == "__main__":
TurnOffSelectedLayer()
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Git regex tests."""
import pytest
from renku import errors
from renku.models._git import GitURL
@pytest.mark.parametrize(
"fields", [
{
'href': 'https://example.com/repo.git',
'protocol': 'https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'repo.git',
},
{
'href': 'https://example.com/repo',
'protocol': 'https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'repo',
},
{
'href': 'https://example.com/owner/repo.git',
'protocol': 'https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
},
{
'href': 'https://example.com:1234/repo.git',
'protocol': 'https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'repo.git',
'port': '1234',
},
{
'href': 'https://example.com:1234/owner/repo.git',
'protocol': 'https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
'port': '1234',
},
{
'href': 'https://example.com:1234/prefix/owner/repo.git',
'protocol': 'https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'prefix/owner/repo.git',
'owner': 'owner',
'port': '1234',
},
{
'href': 'https://example.com/pre.fix/owner.name/repo.name.git',
'protocol': 'https',
'hostname': 'example.com',
'pathname': 'pre.fix/owner.name/repo.name.git',
'owner': 'owner.name',
'name': 'repo.name',
},
{
'href': 'git+https://example.com:1234/owner/repo.git',
'protocol': 'git+https',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
'port': '1234',
},
{
'href': 'git+ssh://example.com:1234/owner/repo.git',
'protocol': 'git+ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
'port': '1234',
},
{
'href': 'git+ssh://user:pass@example.com:1234/owner/repo.git',
'protocol': 'git+ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
'port': '1234',
'username': 'user',
'password': 'pass',
},
{
'href': 'ssh://user:pass@example.com/~user/owner/repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': '~user/owner/repo.git',
'owner': 'owner',
'username': 'user',
'password': 'pass',
},
pytest.param(
{
'href': 'git@example.com/repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'repo.git',
'username': 'git',
},
marks=pytest.mark.xfail(
raises=errors.ConfigurationError, strict=True
),
),
pytest.param(
{
'href': 'git@example.com/owner/repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
'username': 'git',
},
marks=pytest.mark.xfail(
raises=errors.ConfigurationError, strict=True
),
),
{
'href': 'git@example.com:repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'repo.git',
'username': 'git',
},
{
'href': 'git@example.com:owner/repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'owner/repo.git',
'owner': 'owner',
'username': 'git',
},
{
'href': 'git@example.com:prefix/owner/repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': 'prefix/owner/repo.git',
'owner': 'owner',
'username': 'git',
},
{
'href': '/path/to/repo',
'pathname': '/path/to/repo',
},
{
'href': 'file:///path/to/repo',
'pathname': '/path/to/repo',
},
{
'href': '../relative/path/to/repo',
'pathname': '../relative/path/to/repo',
},
{
'href': 'file://../relative/path/to/repo',
'pathname': '../relative/path/to/repo',
},
pytest.param(
{
'href': 'https://example.com:1234:repo.git',
'protocol': 'https',
'hostname': 'example.com',
'port': '1234',
'name': 'repo',
'pathname': 'repo.git',
},
marks=pytest.mark.xfail(
raises=errors.ConfigurationError, strict=True
),
),
pytest.param(
{
'href': 'https://example.com:1234:owner/repo.git',
'protocol': 'https',
'hostname': 'example.com',
'port': '1234',
'name': 'repo',
'pathname': 'repo.git',
'owner': 'owner',
},
marks=pytest.mark.xfail(
raises=errors.ConfigurationError, strict=True
),
),
pytest.param(
{
'href': 'git@example.com:1234:owner/repo.git',
'protocol': 'ssh',
'hostname': 'example.com',
'port': '1234',
'name': 'repo',
'pathname': 'repo.git',
'owner': 'owner',
},
marks=pytest.mark.xfail(
raises=errors.ConfigurationError, strict=True
),
),
{
'href': 'git@example.com:1234/prefix/owner/repo.git',
'username': 'git',
'protocol': 'ssh',
'hostname': 'example.com',
'name': 'repo',
'pathname': '1234/prefix/owner/repo.git',
'owner': 'owner',
},
]
)
def test_valid_href(fields):
"""Test the various repo regexes."""
fields.pop('protocols', None)
assert GitURL(**fields) == GitURL.parse(fields['href'])
|
import paho.mqtt.publish as pub
import logging
logger = logging.getLogger("mqtt_sender")
def publish(payloads: list, client_id: str, topic: str, hostname: str, port: int, qos: int = 0, retain: bool = False):
# msg = {'topic':"<topic>", 'payload':"<payload>", 'qos':<qos>, 'retain':<retain>}
messages = [{'topic': topic, 'payload': p, 'qos': qos, 'retain': retain} for p in payloads]
pub.multiple(messages, hostname, port, client_id)
|
import math
import numpy as np
import datetime as dt
from typing import Dict, Optional
from voltoolbox import BusinessTimeMeasure, longest_increasing_subsequence, bs_implied_volatility
from voltoolbox.fit.option_quotes import OptionSnapshot, OptionQuoteSlice, QuoteSlice
from voltoolbox.fit.fit_utils import act365_time
def prepare_quotes_for_fit(quote_slice: OptionQuoteSlice,
pricing_date: dt.datetime,
discount : float,
spot_prior: float,
*, yield_threshold=0.10):
"""Return discounted option quotes, outlier have been removed.
"""
time_to_mat = act365_time(pricing_date, quote_slice.expiry)
ratio_min = min(math.exp(- time_to_mat * yield_threshold), (1.0 - yield_threshold))
ratio_max = math.exp(time_to_mat * yield_threshold)
call_sl = quote_slice.call
call_ks = np.array(call_sl.strikes)
call_bid = np.array(call_sl.bids) / discount
call_ask = np.array(call_sl.asks) / discount
call_mid = 0.5 * (call_ask + call_bid)
put_plus_fwd = call_mid + call_ks
filter_ = put_plus_fwd > spot_prior * ratio_min
inc_subseq = longest_increasing_subsequence(put_plus_fwd[filter_])
filt_call_sl = QuoteSlice(tuple(call_ks[filter_][inc_subseq]),
tuple(call_bid[filter_][inc_subseq]),
tuple(call_ask[filter_][inc_subseq]))
put_sl = quote_slice.put
put_ks = np.array(put_sl.strikes)
put_bid = np.array(put_sl.bids) / discount
put_ask = np.array(put_sl.asks) / discount
put_mid = 0.5 * (put_ask + put_bid)
filter_ = put_mid - put_ks > - spot_prior * ratio_max
inc_subseq = longest_increasing_subsequence(put_mid[filter_])
filt_put_sl = QuoteSlice(tuple(put_ks[filter_][inc_subseq]),
tuple(put_bid[filter_][inc_subseq]),
tuple(put_ask[filter_][inc_subseq]))
return OptionQuoteSlice(quote_slice.symbol,
quote_slice.expiry,
1.0,
filt_call_sl,
filt_put_sl)
class OptionKernelRegression:
def __init__(self, opt_slice: QuoteSlice):
self.opt_slice = opt_slice
call_ks = np.array(opt_slice.call.strikes)
call_bids = np.array(opt_slice.call.bids)
call_asks = np.array(opt_slice.call.asks)
put_bids = np.array(opt_slice.put.bids)
put_asks = np.array(opt_slice.put.asks)
self.put_target = np.concatenate(
(0.5 * (call_bids + call_asks) + call_ks,
0.5 * (put_bids + put_asks)
), axis=0)
self.ks = np.array(opt_slice.call.strikes + opt_slice.put.strikes)
self.smoothing = 1.0e-10 #Numerical parameter
def call_put_premium(self, k: float, kernel_width: float):
call_len = len(self.opt_slice.call.strikes)
put_len = len(self.opt_slice.put.strikes)
call_ones = np.array([1.0] * call_len + [0.0] * put_len)
put_ones = np.array([0.0] * call_len + [1.0] * put_len)
def convex(z):
a, b, c = (-1.5, 0.0, 1.5)
res = (max(0, z - a)**3 - max(0, z - b)**3) / (b - a)
res -= (max(0, z - b)**3 - max(0, z - c)**3) / (c - b)
res -= (b - a)**2
res += (max(0, c - z)**3 - max(0, b - z)**3) / (c - b)
res -= (max(0, b - z)**3 - max(0, a - z)**3) / (b - a)
res -= (c-b)**2
return res / 12.0
basis = np.column_stack([call_ones,
put_ones,
self.ks - k,
np.vectorize(convex)((self.ks / k - 1.0) / kernel_width)])
ws = np.exp(-0.5 * ((self.ks / k - 1.0) / kernel_width) **2)
b_ws = np.column_stack([ws] * 4)
var = np.matmul((basis * b_ws).T, basis * b_ws)
var_regul = self.smoothing * var.trace() / float(var.shape[0]) * np.identity(var.shape[0])
var_regul[0, 0] = 0.0
var_regul[1, 1] = 0.0
var += var_regul
cov = (basis * b_ws).T.dot(self.put_target * ws)
coeffs = np.linalg.solve(var, cov)
return (coeffs[0] - k), coeffs[1]
def fit_forward_curve(quotes: OptionSnapshot,
box_spread: float) -> Dict[dt.datetime, float]:
pricing_dt = quotes.time_stamp
previous_fit_fwd = quotes.ref_spot
forwards = {}
for quote_sl in quotes.slices:
if (quote_sl.expiry < pricing_dt):
continue
discount = quote_sl.discount * math.exp(-box_spread * act365_time(pricing_dt, quote_sl.expiry))
quote_sl = prepare_quotes_for_fit(quote_sl,
pricing_dt,
discount,
previous_fit_fwd,
yield_threshold=0.05)
t = act365_time(pricing_dt, quote_sl.expiry)
dev = 0.15 * np.sqrt(t)
kernel_width = 0.5 * dev
opt_kernel = OptionKernelRegression(quote_sl)
c, p = opt_kernel.call_put_premium(quotes.ref_spot, kernel_width)
raw_fwd = c - p + quotes.ref_spot
raw_vol = 0.5 * (bs_implied_volatility(raw_fwd, quotes.ref_spot, p, t, -1.0)
+ bs_implied_volatility(raw_fwd, quotes.ref_spot, c, t, 1.0))
dev = raw_vol * np.sqrt(t)
kernel_width = 0.25 * dev
ks = raw_fwd * np.exp(dev * np.linspace(-1.0, 1.0, 11))
estimated_fwds = []
for k in ks:
try :
c, p = opt_kernel.call_put_premium(k, kernel_width)
estimated_fwds.append(c - p + k)
except:
pass
forward = np.median(estimated_fwds)
forwards[quote_sl.expiry] = forward
previous_fit_fwd = forward
return forwards
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class AkscanSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class AkscanDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
# encoding: utf-8
from bs4 import BeautifulSoup
import mock
from ckan.lib.helpers import url_for
import pytest
import six
from six.moves.urllib.parse import urlparse
import ckan.model as model
import ckan.model.activity as activity_model
import ckan.plugins as p
import ckan.lib.dictization as dictization
from ckan.logic.validators import object_id_validators, package_id_exists
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
@pytest.fixture
def user_env():
user = factories.User()
return {"REMOTE_USER": six.ensure_str(user["name"])}
def _get_location(res):
location = res.headers['location']
return urlparse(location)._replace(scheme='', netloc='').geturl()
def _get_package_new_page(app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(url=url_for("dataset.new"), extra_environ=env)
return env, response
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageNew(object):
@pytest.mark.ckan_config("ckan.auth.create_unowned_dataset", "false")
def test_needs_organization_but_no_organizations_has_button(self, app):
""" Scenario: The settings say every dataset needs an organization
but there are no organizations. If the user is allowed to create an
organization they should be prompted to do so when they try to create
a new dataset"""
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
response = app.get(url=url_for("dataset.new"), extra_environ=env)
assert url_for(controller="organization", action="new") in response
@pytest.mark.ckan_config("ckan.auth.create_unowned_dataset", "false")
@pytest.mark.ckan_config("ckan.auth.user_create_organizations", "false")
@mock.patch("ckan.logic.auth.create.package_create")
def test_needs_organization_but_no_organizations_no_button(
self, mock_p_create, app
):
""" Scenario: The settings say every dataset needs an organization
but there are no organizations. If the user is not allowed to create an
organization they should be told to ask the admin but no link should be
presented. Note: This cannot happen with the default ckan and requires
a plugin to overwrite the package_create behavior"""
mock_p_create.return_value = {"success": True}
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(url=url_for("dataset.new"), extra_environ=env)
assert url_for(controller="organization", action="new") not in response
assert "Ask a system administrator" in response
def test_name_required(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={"save": ""})
assert "Name: Missing value" in response
def test_first_page_creates_draft_package(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={
"name": "first-page-creates-draft",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
pkg = model.Package.by_name(u"first-page-creates-draft")
assert pkg.state == "draft"
def test_resource_required(self, app, user_env):
url = url_for("dataset.new")
name = "one-resource-required"
response = app.post(url, environ_overrides=user_env, data={
"name": name,
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=user_env, data={
"id": "",
"url": "",
"save": "go-metadata",
})
assert "You must add at least one data resource" in response
def test_complete_package_with_one_resource(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={
"name": "complete-package-with-one-resource",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=user_env, data={
"id": "",
"url": "http://example.com/resource",
"save": "go-metadata"
})
pkg = model.Package.by_name(u"complete-package-with-one-resource")
assert pkg.resources[0].url == u"http://example.com/resource"
assert pkg.state == "active"
def test_complete_package_with_two_resources(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={
"name": "complete-package-with-two-resources",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
app.post(location, environ_overrides=user_env, data={
"id": "",
"url": "http://example.com/resource0",
"save": "again"
})
app.post(location, environ_overrides=user_env, data={
"id": "",
"url": "http://example.com/resource1",
"save": "go-metadata"
})
pkg = model.Package.by_name(u"complete-package-with-two-resources")
assert pkg.resources[0].url == u"http://example.com/resource0"
assert pkg.resources[1].url == u"http://example.com/resource1"
assert pkg.state == "active"
# resource upload is tested in TestExampleIUploaderPlugin
def test_previous_button_works(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={
"name": "previous-button-works",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=user_env, data={
"id": "",
"save": "go-dataset"
}, follow_redirects=False)
assert '/dataset/edit/' in response.headers['location']
def test_previous_button_populates_form(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={
"name": "previous-button-populates-form",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=user_env, data={
"id": "",
"save": "go-dataset"
})
assert 'name="title"' in response
assert 'value="previous-button-populates-form"'
def test_previous_next_maintains_draft_state(self, app, user_env):
url = url_for("dataset.new")
response = app.post(url, environ_overrides=user_env, data={
"name": "previous-next-maintains-draft",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=user_env, data={
"id": "",
"save": "go-dataset"
})
pkg = model.Package.by_name(u"previous-next-maintains-draft")
assert pkg.state == "draft"
def test_dataset_edit_org_dropdown_visible_to_normal_user_with_orgs_available(
self, app
):
"""
The 'Organization' dropdown is available on the dataset create/edit
page to normal (non-sysadmin) users who have organizations available
to them.
"""
user = factories.User()
# user is admin of org.
org = factories.Organization(
name="my-org", users=[{"name": user["id"], "capacity": "admin"}]
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("dataset.new")
response = app.post(url, environ_overrides=env, data={
"name": "my-dataset",
"owner_org": org["id"],
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=env, data={
"id": "",
"url": "http://example.com/resource",
"save": "go-metadata"
})
pkg = model.Package.by_name(u"my-dataset")
assert pkg.state == "active"
# edit package page response
url = url_for("dataset.edit", id=pkg.id)
pkg_edit_response = app.get(url=url, extra_environ=env)
# A field with the correct id is in the response
owner_org_options = [
option['value'] for option
in BeautifulSoup(pkg_edit_response.data).body.select(
"form#dataset-edit"
)[0].select('[name=owner_org]')[0].select('option')
]
assert org["id"] in owner_org_options
def test_dataset_edit_org_dropdown_normal_user_can_remove_org(self, app, user_env):
"""
A normal user (non-sysadmin) can remove an organization from a dataset
have permissions on.
"""
user = factories.User()
# user is admin of org.
org = factories.Organization(
name="my-org", users=[{"name": user["id"], "capacity": "admin"}]
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("dataset.new")
response = app.post(url, environ_overrides=env, data={
"name": "my-dataset",
"owner_org": org["id"],
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=env, data={
"id": "",
"url": "http://example.com/resource",
"save": "go-metadata"
})
pkg = model.Package.by_name(u"my-dataset")
assert pkg.state == "active"
assert pkg.owner_org == org["id"]
assert pkg.owner_org is not None
# edit package page response
url = url_for("dataset.edit", id=pkg.id)
pkg_edit_response = app.post(url=url, extra_environ=env, data={"owner_org": ""}, follow_redirects=False)
post_edit_pkg = model.Package.by_name(u"my-dataset")
assert post_edit_pkg.owner_org is None
assert post_edit_pkg.owner_org != org["id"]
def test_dataset_edit_org_dropdown_not_visible_to_normal_user_with_no_orgs_available(
self, app, user_env
):
"""
The 'Organization' dropdown is not available on the dataset
create/edit page to normal (non-sysadmin) users who have no
organizations available to them.
"""
user = factories.User()
# user isn't admin of org.
org = factories.Organization(name="my-org")
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("dataset.new")
response = app.post(url, environ_overrides=env, data={
"name": "my-dataset",
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=env, data={
"id": "",
"url": "http://example.com/resource",
"save": "go-metadata"
})
pkg = model.Package.by_name(u"my-dataset")
assert pkg.state == "active"
# edit package response
url = url_for(
"dataset.edit", id=model.Package.by_name(u"my-dataset").id
)
pkg_edit_response = app.get(url=url, extra_environ=env)
# A field with the correct id is in the response
assert 'value="{0}"'.format(org["id"]) not in pkg_edit_response
def test_dataset_edit_org_dropdown_visible_to_sysadmin_with_no_orgs_available(
self, app, user_env
):
"""
The 'Organization' dropdown is available to sysadmin users regardless
of whether they personally have an organization they administrate.
"""
user = factories.User()
sysadmin = factories.Sysadmin()
# user is admin of org.
org = factories.Organization(
name="my-org", users=[{"name": user["id"], "capacity": "admin"}]
)
# user in env is sysadmin
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
url = url_for("dataset.new")
response = app.get(url=url, extra_environ=env)
# organization dropdown available in create page.
assert 'id="field-organizations"' in response
response = app.post(url, environ_overrides=env, data={
"name": "my-dataset",
"owner_org": org["id"],
"save": "",
"_ckan_phase": 1
}, follow_redirects=False)
location = _get_location(response)
response = app.post(location, environ_overrides=env, data={
"id": "",
"url": "http://example.com/resource",
"save": "go-metadata"
})
pkg = model.Package.by_name(u"my-dataset")
assert pkg.state == "active"
# edit package page response
url = url_for("dataset.edit", id=pkg.id)
pkg_edit_response = app.get(url=url, extra_environ=env)
# A field with the correct id is in the response
assert 'id="field-organizations"' in pkg_edit_response
# The organization id is in the response in a value attribute
assert 'value="{0}"'.format(org["id"]) in pkg_edit_response
def test_unauthed_user_creating_dataset(self, app):
# provide REMOTE_ADDR to idenfity as remote user, see
# ckan.views.identify_user() for details
response = app.post(
url=url_for("dataset.new"),
extra_environ={"REMOTE_ADDR": "127.0.0.1"},
status=403,
)
def test_form_without_initial_data(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("dataset.new")
resp = app.get(url=url, extra_environ=env)
page = BeautifulSoup(resp.body)
form = page.select_one('#dataset-edit')
assert not form.select_one('[name=title]')['value']
assert not form.select_one('[name=name]')['value']
assert not form.select_one('[name=notes]').text
def test_form_with_initial_data(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
url = url_for("dataset.new", name="name",
notes="notes", title="title")
resp = app.get(url=url, extra_environ=env)
page = BeautifulSoup(resp.body)
form = page.select_one('#dataset-edit')
assert form.select_one('[name=title]')['value'] == "title"
assert form.select_one('[name=name]')['value'] == "name"
assert form.select_one('[name=notes]').text == "notes"
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageEdit(object):
def test_organization_admin_can_edit(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.edit", id=dataset["name"]), extra_environ=env,
data={
"notes": u"edited description",
"save": ""
}, follow_redirects=False
)
result = helpers.call_action("package_show", id=dataset["id"])
assert u"edited description" == result["notes"]
def test_organization_editor_can_edit(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "editor"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.edit", id=dataset["name"]), extra_environ=env,
data={
"notes": u"edited description",
"save": ""
}, follow_redirects=False
)
result = helpers.call_action("package_show", id=dataset["id"])
assert u"edited description" == result["notes"]
def test_organization_member_cannot_edit(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "member"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.edit", id=dataset["name"]),
extra_environ=env,
status=403,
)
def test_user_not_in_organization_cannot_edit(self, app):
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.edit", id=dataset["name"]),
extra_environ=env,
status=403,
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.edit", id=dataset["name"]),
data={"notes": "edited description"},
extra_environ=env,
status=403,
)
def test_anonymous_user_cannot_edit(self, app):
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"])
response = app.get(
url_for("dataset.edit", id=dataset["name"]), status=403
)
response = app.post(
url_for("dataset.edit", id=dataset["name"]),
data={"notes": "edited description"},
status=403,
)
def test_validation_errors_for_dataset_name_appear(self, app):
"""fill out a bad dataset set name and make sure errors appear"""
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.edit", id=dataset["name"]), extra_environ=env,
data={
"name": "this is not a valid name",
"save": ""
}
)
assert "The form contains invalid entries" in response.body
assert (
"Name: Must be purely lowercase alphanumeric (ascii) "
"characters and these symbols: -_" in response.body
)
def test_edit_a_dataset_that_does_not_exist_404s(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.edit", id="does-not-exist"),
extra_environ=env,
)
assert 404 == response.status_code
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageRead(object):
def test_read(self, app):
dataset = factories.Dataset()
response = app.get(url_for("dataset.read", id=dataset["name"]))
assert helpers.body_contains(response, "Test Dataset")
assert helpers.body_contains(response, "Just another test dataset")
def test_organization_members_can_read_private_datasets(self, app):
members = {
"member": factories.User(),
"editor": factories.User(),
"admin": factories.User(),
"sysadmin": factories.Sysadmin(),
}
organization = factories.Organization(
users=[
{"name": members["member"]["id"], "capacity": "member"},
{"name": members["editor"]["id"], "capacity": "editor"},
{"name": members["admin"]["id"], "capacity": "admin"},
]
)
dataset = factories.Dataset(owner_org=organization["id"], private=True)
for user, user_dict in members.items():
response = app.get(
url_for("dataset.read", id=dataset["name"]),
extra_environ={
"REMOTE_USER": six.ensure_str(user_dict["name"])
},
)
assert "Test Dataset" in response.body
assert "Just another test dataset" in response.body
def test_anonymous_users_cannot_read_private_datasets(self, app):
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"], private=True)
response = app.get(
url_for("dataset.read", id=dataset["name"]), status=404
)
assert 404 == response.status_code
def test_user_not_in_organization_cannot_read_private_datasets(self, app):
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"], private=True)
response = app.get(
url_for("dataset.read", id=dataset["name"]),
extra_environ={"REMOTE_USER": six.ensure_str(user["name"])},
status=404,
)
assert 404 == response.status_code
def test_read_rdf(self, app):
""" The RDF outputs now live in ckanext-dcat"""
dataset1 = factories.Dataset()
offset = url_for("dataset.read", id=dataset1["name"]) + ".rdf"
app.get(offset, status=404)
def test_read_n3(self, app):
""" The RDF outputs now live in ckanext-dcat"""
dataset1 = factories.Dataset()
offset = url_for("dataset.read", id=dataset1["name"]) + ".n3"
app.get(offset, status=404)
def test_read_dataset_as_it_used_to_be(self, app):
dataset = factories.Dataset(title="Original title")
activity = (
model.Session.query(model.Activity)
.filter_by(object_id=dataset["id"])
.one()
)
dataset["title"] = "Changed title"
helpers.call_action("package_update", **dataset)
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
response = app.get(
url_for(
"dataset.read", id=dataset["name"], activity_id=activity.id
),
extra_environ=env,
)
assert helpers.body_contains(response, "Original title")
def test_read_dataset_as_it_used_to_be_but_is_unmigrated(self, app):
# Renders the dataset using the activity detail, when that Activity was
# created with an earlier version of CKAN, and it has not been migrated
# (with migrate_package_activity.py), which should give a 404
user = factories.User()
dataset = factories.Dataset(user=user)
# delete the modern Activity object that's been automatically created
modern_activity = (
model.Session.query(model.Activity)
.filter_by(object_id=dataset["id"])
.one()
)
modern_activity.delete()
# Create an Activity object as it was in earlier versions of CKAN.
# This code is based on:
# https://github.com/ckan/ckan/blob/b348bf2fe68db6704ea0a3e22d533ded3d8d4344/ckan/model/package.py#L508
activity_type = "changed"
dataset_table_dict = dictization.table_dictize(
model.Package.get(dataset["id"]), context={"model": model}
)
activity = model.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type="%s package" % activity_type,
data={
# "actor": a legacy activity had no "actor"
# "package": a legacy activity had just the package table,
# rather than the result of package_show
"package": dataset_table_dict
},
)
model.Session.add(activity)
# a legacy activity had a ActivityDetail associated with the Activity
# This code is based on:
# https://github.com/ckan/ckan/blob/b348bf2fe68db6704ea0a3e22d533ded3d8d4344/ckan/model/package.py#L542
activity_detail = model.ActivityDetail(
activity_id=activity.id,
object_id=dataset["id"],
object_type=u"Package",
activity_type=activity_type,
data={u"package": dataset_table_dict},
)
model.Session.add(activity_detail)
model.Session.flush()
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
response = app.get(
url_for(
"dataset.read", id=dataset["name"], activity_id=activity.id
),
extra_environ=env,
status=404,
)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageDelete(object):
def test_owner_delete(self, app):
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.delete", id=dataset["name"]), extra_environ=env
)
assert 200 == response.status_code
deleted = helpers.call_action("package_show", id=dataset["id"])
assert "deleted" == deleted["state"]
def test_delete_on_non_existing_dataset(self, app):
response = app.post(
url_for("dataset.delete", id="schrodingersdatset"),
)
assert 404 == response.status_code
def test_sysadmin_can_delete_any_dataset(self, app):
owner_org = factories.Organization()
dataset = factories.Dataset(owner_org=owner_org["id"])
user = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.delete", id=dataset["name"]), extra_environ=env
)
assert 200 == response.status_code
deleted = helpers.call_action("package_show", id=dataset["id"])
assert "deleted" == deleted["state"]
def test_anon_user_cannot_delete_owned_dataset(self, app):
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
response = app.post(
url_for("dataset.delete", id=dataset["name"]), status=403
)
assert helpers.body_contains(response, "Unauthorized to delete package")
deleted = helpers.call_action("package_show", id=dataset["id"])
assert "active" == deleted["state"]
def test_logged_in_user_cannot_delete_owned_dataset(self, app):
owner = factories.User()
owner_org = factories.Organization(
users=[{"name": owner["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for("dataset.delete", id=dataset["name"]),
extra_environ=env,
)
assert 403 == response.status_code
assert helpers.body_contains(response, "Unauthorized to delete package")
def test_confirm_cancel_delete(self, app):
"""Test confirmation of deleting datasets
When package_delete is made as a get request, it should return a
'do you want to delete this dataset? confirmation page"""
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.delete", id=dataset["name"]), extra_environ=env
)
assert 200 == response.status_code
message = "Are you sure you want to delete dataset - {name}?"
assert helpers.body_contains(response, message.format(name=dataset["title"]))
response = app.post(
url_for("dataset.delete", id=dataset["name"]), extra_environ=env,
data={"cancel": ""}
)
assert 200 == response.status_code
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestResourceNew(object):
def test_manage_dataset_resource_listing_page(self, app):
user = factories.User()
organization = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=organization["id"])
resource = factories.Resource(package_id=dataset["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.resources", id=dataset["name"]), extra_environ=env
)
assert resource["name"] in response
assert resource["description"] in response
assert resource["format"] in response
def test_unauth_user_cannot_view_manage_dataset_resource_listing_page(
self, app
):
user = factories.User()
organization = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=organization["id"])
resource = factories.Resource(package_id=dataset["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.resources", id=dataset["name"]), extra_environ=env
)
assert resource["name"] in response
assert resource["description"] in response
assert resource["format"] in response
def test_404_on_manage_dataset_resource_listing_page_that_does_not_exist(
self, app
):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.resources", id="does-not-exist"),
extra_environ=env,
)
assert 404 == response.status_code
def test_add_new_resource_with_link_and_download(self, app):
user = factories.User()
dataset = factories.Dataset()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
extra_environ=env,
data={
"id": "",
"url": "http://test.com/",
"save": "go-dataset-complete"
}
)
result = helpers.call_action("package_show", id=dataset["id"])
response = app.get(
url_for(
"{}_resource.download".format(dataset["type"]),
id=dataset["id"],
resource_id=result["resources"][0]["id"],
),
extra_environ=env,
follow_redirects=False
)
assert 302 == response.status_code
def test_editor_can_add_new_resource(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "editor"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
extra_environ=env,
data={
"id": "",
"name": "test resource",
"url": "http://test.com/",
"save": "go-dataset-complete"
}
)
result = helpers.call_action("package_show", id=dataset["id"])
assert 1 == len(result["resources"])
assert u"test resource" == result["resources"][0]["name"]
def test_admin_can_add_new_resource(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
extra_environ=env,
data={
"id": "",
"name": "test resource",
"url": "http://test.com/",
"save": "go-dataset-complete"
}
)
result = helpers.call_action("package_show", id=dataset["id"])
assert 1 == len(result["resources"])
assert u"test resource" == result["resources"][0]["name"]
def test_member_cannot_add_new_resource(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "member"}]
)
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
extra_environ=env,
status=403,
)
response = app.post(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
data={"name": "test", "url": "test", "save": "save", "id": ""},
extra_environ=env,
status=403,
)
def test_non_organization_users_cannot_add_new_resource(self, app):
"""on an owned dataset"""
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
extra_environ=env,
status=403,
)
response = app.post(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
data={"name": "test", "url": "test", "save": "save", "id": ""},
extra_environ=env,
status=403,
)
def test_anonymous_users_cannot_add_new_resource(self, app):
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"])
response = app.get(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
), status=403
)
response = app.post(
url_for(
"{}_resource.new".format(dataset["type"]), id=dataset["id"]
),
data={"name": "test", "url": "test", "save": "save", "id": ""},
status=403,
)
def test_anonymous_users_cannot_edit_resource(self, app):
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"])
resource = factories.Resource(package_id=dataset["id"])
with app.flask_app.test_request_context():
response = app.get(
url_for(
"{}_resource.edit".format(dataset["type"]),
id=dataset["id"],
resource_id=resource["id"],
),
status=403,
)
response = app.post(
url_for(
"{}_resource.edit".format(dataset["type"]),
id=dataset["id"],
resource_id=resource["id"],
),
data={"name": "test", "url": "test", "save": "save", "id": ""},
status=403,
)
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("clean_db", "with_plugins", "with_request_context")
class TestResourceView(object):
def test_resource_view_create(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
url = url_for(
"resource.edit_view",
id=resource["package_id"],
resource_id=resource["id"],
view_type="image_view",
)
response = app.post(
url, data={"title": "Test Image View"}, extra_environ=env
)
assert helpers.body_contains(response, "Test Image View")
def test_resource_view_edit(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
resource_view = factories.ResourceView(resource_id=resource["id"])
url = url_for(
"resource.edit_view",
id=resource_view["package_id"],
resource_id=resource_view["resource_id"],
view_id=resource_view["id"],
)
response = app.post(
url, data={"title": "Updated RV Title"}, extra_environ=env
)
assert helpers.body_contains(response, "Updated RV Title")
def test_resource_view_delete(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
resource_view = factories.ResourceView(resource_id=resource["id"])
url = url_for(
"resource.edit_view",
id=resource_view["package_id"],
resource_id=resource_view["resource_id"],
view_id=resource_view["id"],
)
response = app.post(
url, data={"delete": "Delete"}, extra_environ=env
)
assert helpers.body_contains(response, "This resource has no views")
def test_existent_resource_view_page_returns_ok_code(self, app):
resource_view = factories.ResourceView()
url = url_for(
"resource.read",
id=resource_view["package_id"],
resource_id=resource_view["resource_id"],
view_id=resource_view["id"],
)
app.get(url, status=200)
def test_inexistent_resource_view_page_returns_not_found_code(self, app):
resource_view = factories.ResourceView()
url = url_for(
"resource.read",
id=resource_view["package_id"],
resource_id=resource_view["resource_id"],
view_id="inexistent-view-id",
)
app.get(url, status=404)
def test_resource_view_description_is_rendered_as_markdown(self, app):
resource_view = factories.ResourceView(description="Some **Markdown**")
url = url_for(
"resource.read",
id=resource_view["package_id"],
resource_id=resource_view["resource_id"],
view_id=resource_view["id"],
)
response = app.get(url)
assert helpers.body_contains(response, "Some <strong>Markdown</strong>")
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestResourceRead(object):
def test_existing_resource_with_not_associated_dataset(self, app):
dataset = factories.Dataset()
resource = factories.Resource()
url = url_for(
"{}_resource.read".format(dataset["type"]),
id=dataset["id"], resource_id=resource["id"]
)
app.get(url, status=404)
def test_resource_read_logged_in_user(self, app):
"""
A logged-in user can view resource page.
"""
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset["id"])
url = url_for(
"{}_resource.read".format(dataset["type"]),
id=dataset["id"], resource_id=resource["id"]
)
app.get(url, status=200, extra_environ=env)
def test_resource_read_anon_user(self, app):
"""
An anon user can view resource page.
"""
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset["id"])
url = url_for(
"{}_resource.read".format(dataset["type"]),
id=dataset["id"], resource_id=resource["id"]
)
app.get(url, status=200)
def test_resource_read_sysadmin(self, app):
"""
A sysadmin can view resource page.
"""
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset["id"])
url = url_for(
"{}_resource.read".format(dataset["type"]),
id=dataset["id"], resource_id=resource["id"]
)
app.get(url, status=200, extra_environ=env)
def test_user_not_in_organization_cannot_read_private_dataset(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"], private=True)
resource = factories.Resource(package_id=dataset["id"])
url = url_for(
"{}_resource.read".format(dataset["type"]),
id=dataset["id"], resource_id=resource["id"]
)
response = app.get(url, status=404, extra_environ=env)
def test_organization_members_can_read_resources_in_private_datasets(
self, app
):
members = {
"member": factories.User(),
"editor": factories.User(),
"admin": factories.User(),
"sysadmin": factories.Sysadmin(),
}
organization = factories.Organization(
users=[
{"name": members["member"]["id"], "capacity": "member"},
{"name": members["editor"]["id"], "capacity": "editor"},
{"name": members["admin"]["id"], "capacity": "admin"},
]
)
dataset = factories.Dataset(owner_org=organization["id"], private=True)
resource = factories.Resource(package_id=dataset["id"])
for user, user_dict in members.items():
response = app.get(
url_for(
"{}_resource.read".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
extra_environ={
"REMOTE_USER": six.ensure_str(user_dict["name"])
},
)
assert "Just another test resource" in response.body
def test_anonymous_users_cannot_read_private_datasets(self, app):
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"], private=True)
response = app.get(
url_for("dataset.read", id=dataset["name"]), status=404
)
assert 404 == response.status_code
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestResourceDelete(object):
def test_dataset_owners_can_delete_resources(self, app):
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
extra_environ=env,
)
assert 200 == response.status_code
assert helpers.body_contains(response, "This dataset has no data")
with pytest.raises(p.toolkit.ObjectNotFound):
helpers.call_action("resource_show", id=resource["id"])
def test_deleting_non_existing_resource_404s(self, app):
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id="doesnotexist",
),
extra_environ=env,
)
assert 404 == response.status_code
def test_anon_users_cannot_delete_owned_resources(self, app):
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
response = app.post(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
status=403,
)
assert helpers.body_contains(response, "Unauthorized to delete package")
def test_logged_in_users_cannot_delete_resources_they_do_not_own(
self, app
):
# setup our dataset
owner = factories.User()
owner_org = factories.Organization(
users=[{"name": owner["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
# access as another user
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.post(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
extra_environ=env,
)
assert 403 == response.status_code
assert helpers.body_contains(response, "Unauthorized to delete package")
def test_sysadmins_can_delete_any_resource(self, app):
owner_org = factories.Organization()
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
response = app.post(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
extra_environ=env,
)
assert 200 == response.status_code
assert helpers.body_contains(response, "This dataset has no data")
with pytest.raises(p.toolkit.ObjectNotFound):
helpers.call_action("resource_show", id=resource["id"])
def test_confirm_and_cancel_deleting_a_resource(self, app):
"""Test confirmation of deleting resources
When resource_delete is made as a get request, it should return a
'do you want to delete this reource? confirmation page"""
user = factories.User()
owner_org = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=owner_org["id"])
resource = factories.Resource(package_id=dataset["id"])
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
extra_environ=env,
)
assert 200 == response.status_code
message = "Are you sure you want to delete resource - {name}?"
assert helpers.body_contains(response, message.format(name=resource["name"]))
response = app.post(
url_for(
"{}_resource.delete".format(dataset["type"]),
id=dataset["name"],
resource_id=resource["id"],
),
extra_environ=env,
data={"cancel": ""}
)
assert 200 == response.status_code
@pytest.mark.usefixtures("clean_db", "clean_index", "with_request_context")
class TestSearch(object):
def test_search_basic(self, app):
dataset1 = factories.Dataset()
offset = url_for("dataset.search")
page = app.get(offset)
assert helpers.body_contains(page, dataset1["name"])
def test_search_language_toggle(self, app):
dataset1 = factories.Dataset()
with app.flask_app.test_request_context():
offset = url_for("dataset.search", q=dataset1["name"])
page = app.get(offset)
assert helpers.body_contains(page, dataset1["name"])
assert helpers.body_contains(page, "q=" + dataset1["name"])
def test_search_sort_by_blank(self, app):
factories.Dataset()
# ?sort has caused an exception in the past
offset = url_for("dataset.search") + "?sort"
app.get(offset)
def test_search_sort_by_bad(self, app):
factories.Dataset()
# bad spiders try all sorts of invalid values for sort. They should get
# a 400 error with specific error message. No need to alert the
# administrator.
offset = url_for("dataset.search") + "?sort=gvgyr_fgevat+nfp"
response = app.get(offset)
if response.status == 200:
import sys
sys.stdout.write(response.body)
raise Exception(
"Solr returned an unknown error message. "
"Please check the error handling "
"in ckan/lib/search/query.py:run"
)
def test_search_solr_syntax_error(self, app):
factories.Dataset()
# SOLR raises SyntaxError when it can't parse q (or other fields?).
# Whilst this could be due to a bad user input, it could also be
# because CKAN mangled things somehow and therefore we flag it up to
# the administrator and give a meaningless error, just in case
offset = url_for("dataset.search") + "?q=--included"
search_response = app.get(offset)
search_response_html = BeautifulSoup(search_response.data)
err_msg = search_response_html.select("#search-error")
err_msg = "".join([n.text for n in err_msg])
assert "error while searching" in err_msg
def test_search_plugin_hooks(self, app):
with p.use_plugin("test_package_controller_plugin") as plugin:
offset = url_for("dataset.search")
app.get(offset)
# get redirected ...
assert plugin.calls["before_search"] == 1, plugin.calls
assert plugin.calls["after_search"] == 1, plugin.calls
def test_search_page_request(self, app):
"""Requesting package search page returns list of datasets."""
factories.Dataset(name="dataset-one", title="Dataset One")
factories.Dataset(name="dataset-two", title="Dataset Two")
factories.Dataset(name="dataset-three", title="Dataset Three")
search_url = url_for("dataset.search")
search_response = app.get(search_url)
assert "3 datasets found" in search_response
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [n.string for n in ds_titles]
assert len(ds_titles) == 3
assert "Dataset One" in ds_titles
assert "Dataset Two" in ds_titles
assert "Dataset Three" in ds_titles
def test_search_page_results(self, app):
"""Searching for datasets returns expected results."""
factories.Dataset(name="dataset-one", title="Dataset One")
factories.Dataset(name="dataset-two", title="Dataset Two")
factories.Dataset(name="dataset-three", title="Dataset Three")
search_url = url_for("dataset.search")
search_results = app.get(search_url, query_string={'q': 'One'})
assert "1 dataset found" in search_results
search_response_html = BeautifulSoup(search_results.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [n.string for n in ds_titles]
assert len(ds_titles) == 1
assert "Dataset One" in ds_titles
def test_search_page_no_results(self, app):
"""Search with non-returning phrase returns no results."""
factories.Dataset(name="dataset-one", title="Dataset One")
factories.Dataset(name="dataset-two", title="Dataset Two")
factories.Dataset(name="dataset-three", title="Dataset Three")
search_url = url_for("dataset.search")
search_results = app.get(search_url, query_string={'q': 'Nout'})
assert 'No datasets found for "Nout"' in search_results
search_response_html = BeautifulSoup(search_results.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [n.string for n in ds_titles]
assert len(ds_titles) == 0
def test_search_page_results_tag(self, app):
"""Searching with a tag returns expected results."""
factories.Dataset(
name="dataset-one", title="Dataset One", tags=[{"name": "my-tag"}]
)
factories.Dataset(name="dataset-two", title="Dataset Two")
factories.Dataset(name="dataset-three", title="Dataset Three")
search_url = url_for("dataset.search")
search_response = app.get(search_url)
assert "/dataset/?tags=my-tag" in search_response
tag_search_response = app.get("/dataset?tags=my-tag")
assert "1 dataset found" in tag_search_response
search_response_html = BeautifulSoup(tag_search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [n.string for n in ds_titles]
assert len(ds_titles) == 1
assert "Dataset One" in ds_titles
def test_search_page_results_tags(self, app):
"""Searching with a tag returns expected results with multiple tags"""
factories.Dataset(
name="dataset-one",
title="Dataset One",
tags=[
{"name": "my-tag-1"},
{"name": "my-tag-2"},
{"name": "my-tag-3"},
],
)
factories.Dataset(name="dataset-two", title="Dataset Two")
factories.Dataset(name="dataset-three", title="Dataset Three")
params = "/dataset/?tags=my-tag-1&tags=my-tag-2&tags=my-tag-3"
tag_search_response = app.get(params)
assert "1 dataset found" in tag_search_response
search_response_html = BeautifulSoup(tag_search_response.data)
ds_titles = search_response_html.select(".filtered")
assert len(ds_titles) == 3
def test_search_page_results_private(self, app):
"""Private datasets don't show up in dataset search results."""
org = factories.Organization()
factories.Dataset(
name="dataset-one",
title="Dataset One",
owner_org=org["id"],
private=True,
)
factories.Dataset(name="dataset-two", title="Dataset Two")
factories.Dataset(name="dataset-three", title="Dataset Three")
search_url = url_for("dataset.search")
search_response = app.get(search_url)
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
ds_titles = [n.string for n in ds_titles]
assert len(ds_titles) == 2
assert "Dataset One" not in ds_titles
assert "Dataset Two" in ds_titles
assert "Dataset Three" in ds_titles
def test_user_not_in_organization_cannot_search_private_datasets(
self, app
):
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization["id"], private=True)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
search_url = url_for("dataset.search")
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
assert [n.string for n in ds_titles] == []
def test_user_in_organization_can_search_private_datasets(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "member"}]
)
dataset = factories.Dataset(
title="A private dataset",
owner_org=organization["id"],
private=True,
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
search_url = url_for("dataset.search")
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
assert [n.string for n in ds_titles] == ["A private dataset"]
def test_user_in_different_organization_cannot_search_private_datasets(
self, app
):
user = factories.User()
org1 = factories.Organization(
users=[{"name": user["id"], "capacity": "member"}]
)
org2 = factories.Organization()
dataset = factories.Dataset(
title="A private dataset", owner_org=org2["id"], private=True
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
search_url = url_for("dataset.search")
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
assert [n.string for n in ds_titles] == []
@pytest.mark.ckan_config("ckan.search.default_include_private", "false")
def test_search_default_include_private_false(self, app):
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "member"}]
)
dataset = factories.Dataset(owner_org=organization["id"], private=True)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
search_url = url_for("dataset.search")
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
assert [n.string for n in ds_titles] == []
def test_sysadmin_can_search_private_datasets(self, app):
user = factories.Sysadmin()
organization = factories.Organization()
dataset = factories.Dataset(
title="A private dataset",
owner_org=organization["id"],
private=True,
)
env = {"REMOTE_USER": six.ensure_str(user["name"])}
search_url = url_for("dataset.search")
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.data)
ds_titles = search_response_html.select(
".dataset-list " ".dataset-item " ".dataset-heading a"
)
assert [n.string for n in ds_titles] == ["A private dataset"]
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageFollow(object):
def test_package_follow(self, app):
user = factories.User()
package = factories.Dataset()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
follow_url = url_for("dataset.follow", id=package["id"])
response = app.post(follow_url, extra_environ=env)
assert "You are now following {0}".format(package["title"]) in response
def test_package_follow_not_exist(self, app):
"""Pass an id for a package that doesn't exist"""
user_one = factories.User()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
follow_url = url_for("dataset.follow", id="not-here")
response = app.post(follow_url, extra_environ=env)
assert "Dataset not found" in response
def test_package_unfollow(self, app):
user_one = factories.User()
package = factories.Dataset()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
follow_url = url_for("dataset.follow", id=package["id"])
app.post(follow_url, extra_environ=env)
unfollow_url = url_for("dataset.unfollow", id=package["id"])
unfollow_response = app.post(
unfollow_url, extra_environ=env
)
assert (
"You are no longer following {0}".format(package["title"])
in unfollow_response
)
def test_package_unfollow_not_following(self, app):
"""Unfollow a package not currently following"""
user_one = factories.User()
package = factories.Dataset()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
unfollow_url = url_for("dataset.unfollow", id=package["id"])
unfollow_response = app.post(
unfollow_url, extra_environ=env
)
assert (
"You are not following {0}".format(package["id"])
in unfollow_response
)
def test_package_unfollow_not_exist(self, app):
"""Unfollow a package that doesn't exist."""
user_one = factories.User()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
unfollow_url = url_for("dataset.unfollow", id="not-here")
unfollow_response = app.post(
unfollow_url, extra_environ=env
)
assert "Dataset not found" in unfollow_response
def test_package_follower_list(self, app):
"""Following users appear on followers list page."""
user_one = factories.Sysadmin()
package = factories.Dataset()
env = {"REMOTE_USER": six.ensure_str(user_one["name"])}
follow_url = url_for("dataset.follow", id=package["id"])
app.post(follow_url, extra_environ=env)
followers_url = url_for("dataset.followers", id=package["id"])
# Only sysadmins can view the followers list pages
followers_response = app.get(
followers_url, extra_environ=env, status=200
)
assert user_one["display_name"] in followers_response
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDatasetRead(object):
def test_dataset_read(self, app):
dataset = factories.Dataset()
url = url_for("dataset.read", id=dataset["name"])
response = app.get(url)
assert dataset["title"] in response
def test_redirect_when_given_id(self, app):
dataset = factories.Dataset()
response = app.get(
url_for("dataset.read", id=dataset["id"]),
follow_redirects=False
)
# redirect replaces the ID with the name in the URL
expected_url = url_for("dataset.read", id=dataset["name"], _external=True)
assert response.headers['location'] == expected_url
def test_redirect_also_with_activity_parameter(self, app):
dataset = factories.Dataset()
activity = activity_model.package_activity_list(
dataset["id"], limit=1, offset=0
)[0]
# view as an admin because viewing the old versions of a dataset
sysadmin = factories.Sysadmin()
env = {"REMOTE_USER": six.ensure_str(sysadmin["name"])}
response = app.get(
url_for("dataset.read", id=dataset["id"], activity_id=activity.id),
status=302,
extra_environ=env,
follow_redirects=False
)
expected_path = url_for("dataset.read", id=dataset["name"], _external=True, activity_id=activity.id)
assert response.headers['location'] == expected_path
def test_no_redirect_loop_when_name_is_the_same_as_the_id(self, app):
dataset = factories.Dataset(id="abc", name="abc")
app.get(
url_for("dataset.read", id=dataset["id"]), status=200
) # ie no redirect
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestActivity(object):
def test_simple(self, app):
"""Checking the template shows the activity stream."""
user = factories.User()
dataset = factories.Dataset(user=user)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert "Mr. Test User" in response
assert "created the dataset" in response
def test_create_dataset(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "created the dataset" in response
assert (
'<a href="/dataset/{}">Test Dataset'.format(dataset["id"])
in response
)
def _clear_activities(self):
model.Session.query(model.Activity).delete()
model.Session.flush()
def test_change_dataset(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
self._clear_activities()
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">Dataset with changed title'.format(
dataset["id"]
)
in response
)
def test_create_tag_directly(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
self._clear_activities()
dataset["tags"] = [{"name": "some_tag"}]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">{}'.format(dataset["id"], dataset["title"])
in response
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert len(activities) == 1
def test_create_tag(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
self._clear_activities()
dataset["tags"] = [{"name": "some_tag"}]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">{}'.format(dataset["id"], dataset["title"])
in response
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert len(activities) == 1
def test_create_extra(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
self._clear_activities()
dataset["extras"] = [{"key": "some", "value": "extra"}]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">{}'.format(dataset["id"], dataset["title"])
in response
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert len(activities) == 1
def test_create_resource(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
self._clear_activities()
helpers.call_action(
"resource_create",
context={"user": user["name"]},
name="Test resource",
package_id=dataset["id"],
)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">{}'.format(dataset["id"], dataset["title"])
in response
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert len(activities) == 1
def test_update_resource(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
resource = factories.Resource(package_id=dataset["id"])
self._clear_activities()
helpers.call_action(
"resource_update",
context={"user": user["name"]},
id=resource["id"],
name="Test resource updated",
package_id=dataset["id"],
)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">{}'.format(dataset["id"], dataset["title"])
in response
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert len(activities) == 1
def test_delete_dataset(self, app):
user = factories.User()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org["id"], user=user)
self._clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
url = url_for("organization.activity", id=org["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "deleted the dataset" in response
assert (
'<a href="/dataset/{}">Test Dataset'.format(dataset["id"])
in response
)
def test_admin_can_see_old_versions(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
dataset = factories.Dataset(user=user)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url, extra_environ=env)
assert "View this version" in response
def test_public_cant_see_old_versions(self, app):
user = factories.User()
dataset = factories.Dataset(user=user)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert "View this version" not in response
def test_admin_can_see_changes(self, app):
user = factories.User()
env = {"REMOTE_USER": six.ensure_str(user["name"])}
dataset = factories.Dataset() # activities by system user aren't shown
dataset["title"] = "Changed"
helpers.call_action("package_update", **dataset)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url, extra_environ=env)
assert "Changes" in response
def test_public_cant_see_changes(self, app):
dataset = factories.Dataset() # activities by system user aren't shown
dataset["title"] = "Changed"
helpers.call_action("package_update", **dataset)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert "Changes" not in response
def test_legacy_changed_package_activity(self, app):
"""Render an activity that was created with an earlier version of CKAN,
and it has not been migrated (with migrate_package_activity.py)
"""
user = factories.User()
dataset = factories.Dataset(user=user)
# delete the modern Activity object that's been automatically created
modern_activity = (
model.Session.query(model.Activity)
.filter_by(object_id=dataset["id"])
.one()
)
modern_activity.delete()
# Create an Activity object as it was in earlier versions of CKAN.
# This code is based on:
# https://github.com/ckan/ckan/blob/b348bf2fe68db6704ea0a3e22d533ded3d8d4344/ckan/model/package.py#L508
activity_type = "changed"
dataset_table_dict = dictization.table_dictize(
model.Package.get(dataset["id"]), context={"model": model}
)
activity = model.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type="%s package" % activity_type,
data={
# "actor": a legacy activity had no "actor"
# "package": a legacy activity had just the package table,
# rather than the result of package_show
"package": dataset_table_dict
},
)
model.Session.add(activity)
# a legacy activity had a ActivityDetail associated with the Activity
# This code is based on:
# https://github.com/ckan/ckan/blob/b348bf2fe68db6704ea0a3e22d533ded3d8d4344/ckan/model/package.py#L542
activity_detail = model.ActivityDetail(
activity_id=activity.id,
object_id=dataset["id"],
object_type=u"Package",
activity_type=activity_type,
data={u"package": dataset_table_dict},
)
model.Session.add(activity_detail)
model.Session.flush()
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
assert "updated the dataset" in response
assert (
'<a href="/dataset/{}">Test Dataset'.format(dataset["id"])
in response
)
# ckanext-canada uses their IActivity to add their custom activity to the
# list of validators: https://github.com/open-data/ckanext-canada/blob/6870e5bc38a04aa8cef191b5e9eb361f9560872b/ckanext/canada/plugins.py#L596
# but it's easier here to just hack patch it in
@mock.patch(
"ckan.logic.validators.object_id_validators",
dict(
list(object_id_validators.items())
+ [("changed datastore", package_id_exists)]
),
)
def test_custom_activity(self, app):
"""Render a custom activity
"""
user = factories.User()
organization = factories.Organization(
users=[{"name": user["id"], "capacity": "admin"}]
)
dataset = factories.Dataset(owner_org=organization["id"], user=user)
resource = factories.Resource(package_id=dataset["id"])
self._clear_activities()
# Create a custom Activity object. This one is inspired by:
# https://github.com/open-data/ckanext-canada/blob/master/ckanext/canada/activity.py
activity_dict = {
"user_id": user["id"],
"object_id": dataset["id"],
"activity_type": "changed datastore",
"data": {
"resource_id": resource["id"],
"pkg_type": dataset["type"],
"resource_name": "june-2018",
"owner_org": organization["name"],
"count": 5,
},
}
helpers.call_action("activity_create", **activity_dict)
url = url_for("dataset.activity", id=dataset["id"])
response = app.get(url)
assert (
'<a href="/user/{}">Mr. Test User'.format(user["name"]) in response
)
# it renders the activity with fallback.html, since we've not defined
# changed_datastore.html in this case
assert "changed datastore" in response
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestChanges(object): # i.e. the diff
def test_simple(self, app):
user = factories.User()
dataset = factories.Dataset(title="First title", user=user)
dataset["title"] = "Second title"
helpers.call_action("package_update", **dataset)
activity = activity_model.package_activity_list(
dataset["id"], limit=1, offset=0
)[0]
env = {"REMOTE_USER": six.ensure_str(user["name"])}
response = app.get(
url_for("dataset.changes", id=activity.id), extra_environ=env
)
assert helpers.body_contains(response, "First")
assert helpers.body_contains(response, "Second")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 21 15:37:11 2016
@author: Doppler
"""
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
n_features=200
x,y=datasets.make_classification(750,n_features,n_informative=5)
import numpy as np
training = np.random.choice([True,False],p=[.75,.25],size=len(y))
accuracies = []
for i in np.arange(1,n_features+1):
dt = DecisionTreeClassifier(max_depth=i)
dt.fit(x[training],y[training])
preds = dt.predict(x[~training])
accuracies.append((preds==y[~training]).mean())
import matplotlib.pylab as plt
f,ax = plt.subplots(figsize=(7,5))
ax.plot(range(1,n_features+1), accuracies, color='k')
ax.set_ylabel("% Correct")
ax.set_xlabel("Max Depth")
|
# coding: utf-8
# In[1]:
# %load paste_video_classification.py
# 这个代码每个epoch都跑一遍训练集和验证集
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# 参数
learning_rate = 0.001
momentum = 0.9
epochs = 40
batch_size = 4
display_step = 1
shuffle = True
num_classes = 4
# In[2]:
# 加载vgg16预训练模型
model = models.resnet152(pretrained=False)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
# In[3]:
# 数据准备
# crop:裁剪 resize:缩放 flip:翻转
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'validation': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# your image data file
data_dir = './images/'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x]) for x in ['train', 'validation']}
# torchvision.datasets.ImageFolder返回的是list,这里用torch.utils.data.DataLoader类将list类型的输入数据封装成Tensor数据格式
dataloders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=batch_size,
shuffle=shuffle,
num_workers=10) for x in ['train', 'validation']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation']}
# In[4]:
# 是否使用GPU
use_gpu = torch.cuda.is_available()
if use_gpu:
model = model.cuda()
print("use_gpu: " + str(use_gpu))
# 定义损失函数,这里采用交叉熵函数
loss_fn = nn.CrossEntropyLoss()
# 定义优化函数,这里采用随机梯度下降法
optimizer = optim.SGD(model.parameters(), learning_rate, momentum)
# 定义学习率的变化策略,这里采用torch.optim.lr_scheduler模块的StepLR类,表示每隔step_size个epoch就将学习率降为原来的gamma倍
# exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# In[6]:
# 开始训练
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
loss_train = [] # 训练集loss
acc_train = [] # 训练集正确率
loss_val = [] # 验证集loss
acc_val = [] # 验证集正确率
best_matrix = [[0 for i in range(num_classes)] for i in range(num_classes)]
for epoch in range(epochs):
if epoch % display_step == 0:
print('Epoch [{}/{}]:'.format(epoch + 1, epochs))
# 每一轮都跑一遍训练集和验证集
for phase in ['train', 'validation']:
if phase == 'train':
i = 1
j = 1
# exp_lr_scheduler.step()
model.train() # 把module设成training模式,对Dropout和BatchNorm有影响
else:
i = 1
j = 2
model.eval() # 把module设置为评估模式
running_loss = 0.0
running_corrects = 0
matrix = [[0 for i in range(num_classes)] for i in range(num_classes)]
# Iterate over data.
for data in dataloders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
# if use_gpu:
# inputs = inputs.cuda()
# labels = labels.cuda()
# else:
# inputs, labels = Variable(inputs), Variable(labels)
# PyTorch更新至0.4.0后,将Variable和Tensor合并
if use_gpu:
inputs = inputs.cuda()
labels = labels.cuda()
# 先将网络中的所有梯度置0
optimizer.zero_grad()
# 网络的前向传播
outputs = model(inputs)
# 计算损失
loss = loss_fn(outputs, labels)
# 得到模型预测该样本属于哪个类别的信息
# '_'就是一个变量,换成a也是可以的,没有特别的意思,不过一般用_表示的变量好像都是没什么用的一个临时变量,大概是
# 一个编程习惯吧。所以这边'_,'没有特殊的含义,'_'就是一个变量,只是为了让preds取到max函数返回值的第二项,
# 即找到的最大值的索引位置(对应到这里就是类别标签)
# (max函数解释见https://pytorch.org/docs/stable/torch.html?highlight=max#torch.max)
_, preds = torch.max(outputs.data, 1)
# 训练时,应用回传和优化
if phase == 'train':
loss.backward()
optimizer.step()
# 记录当前batch_size的loss以及数据对应的分类准确数量
running_loss += loss.item()
running_corrects += torch.sum(preds == labels.data)
if phase == 'validation':
for k in range(0, num_classes):
matrix[labels.data.cpu().numpy()[k]][preds.cpu().numpy()[k]] += 1
print('\t{} {}-{}: Loss: {:.4f} Acc: {:.4f}%'.format(phase, epoch + 1, i, loss.item()/4, torch.sum(preds == labels.data).item()/4.0*100))
i = i + 1
# 计算并打印这一轮训练的loss和分类准确率
if j == 1:
epoch_loss_train = running_loss / dataset_sizes['train']
epoch_acc_train = running_corrects.item() / dataset_sizes['train']
loss_train.append(epoch_loss_train)
acc_train.append(epoch_acc_train)
else:
epoch_loss_val = running_loss / dataset_sizes['validation']
epoch_acc_val = running_corrects.item() / dataset_sizes['validation']
loss_val.append(epoch_loss_val)
acc_val.append(epoch_acc_val)
if epoch % display_step == 0 and j == 2:
print('\ttrain Loss: {:.4f} Acc: {:.4f}%'.format(epoch_loss_train, epoch_acc_train*100))
print('\tvalidation Loss: {:.4f} Acc: {:.4f}%'.format(epoch_loss_val, epoch_acc_val*100))
# deep copy the model
if phase == 'validation' and epoch_acc_val > best_acc:
best_acc = epoch_acc_val
best_model_wts = model.state_dict()
print("网络参数更新")
# 保存最优参数
torch.save(best_model_wts, './parameter/params_resnet152.pth')
best_matrix = copy.deepcopy(matrix)
# print("Model's state_dict:")
# for param_tensor in best_model_wts:
# print(param_tensor, "\t", best_model_wts[param_tensor].size())
time_elapsed = time.time() - since
print('Time passed {:.0f}h {:.0f}m {:.0f}s'.format(time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
print('-' * 20)
# 计算训练所耗时间
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
print('Best validation Acc: {:4f}'.format(best_acc))
# In[57]:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
print('loss_train: ' + str(loss_train))
print('loss_val: ' + str(loss_val))
print('acc_train: ' + str(acc_train))
print('acc_val: ' + str(acc_val))
# 绘制第一个图,在一幅图上画两条曲线
plt.figure()
plt.title("Loss",fontsize=16)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.xticks(np.arange(1, 21, 1.0))
plt.plot(range(1,epochs + 1), loss_train,color='r', linewidth = 3.0, label='train')
plt.plot(range(1,epochs + 1), loss_val,color='b', linewidth = 3.0, label='validation')
plt.legend() # 设置图例和其中的文本的显示
# 绘制第二个图,在一幅图上画两条曲线
plt.figure()
plt.title("Predicted accuracy",fontsize=16)
plt.xlabel("Epochs")
plt.ylabel("Acc")
plt.xticks(np.arange(1, 21, 1.0))
plt.plot(range(1,epochs + 1), acc_train,color='r', linewidth = 3.0, label='train')
plt.plot(range(1,epochs + 1), acc_val,color='b', linewidth = 3.0, label='validation')
plt.legend() # 设置图例和其中的文本的显示
plt.show()
# In[29]:
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
classes = ('75', '77', '79', '81')
dataiter = iter(dataloders['validation'])
images, labels = dataiter.next()
print(images.size())
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[z]] for z in range(4)))
# test
outputs = model(images.cuda())
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[z]] for z in range(4)))
# In[14]:
conc = {
'0': '75 ',
'1': '77 ',
'2': '79 ',
'3': '81 '
}
print("\t Predicted\n")
print("\t 75\t77\t79\t81\n")
for i in range(0, num_classes):
print("Actual ", end='')
print(conc[str(i)], end='')
for j in range(0, num_classes):
print(str(best_matrix[i][j]) + '\t', end='')
print('\n')
|
# Generated by Django 3.1.5 on 2021-02-12 16:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("blog", "0001_initial"),
]
operations = [
migrations.RenameModel(
old_name="Blog",
new_name="MyBlog",
),
]
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import os
import sys
import numpy
import pytest
import uproot
def tobytes(x):
if hasattr(x, "tobytes"):
return x.tobytes()
else:
return x.tostring()
def test_file(tmpdir):
filename = os.path.join(str(tmpdir), "tmp.raw")
with open(filename, "wb") as tmp:
tmp.write(b"****** ...+++++++!!!!!@@@@@")
expected = [
b"******",
b" ",
b"...",
b"+++++++",
b"!!!!!",
b"@@@@@",
]
for num_workers in [1, 2]:
with uproot.source.file.MultithreadedFileSource(
filename, num_workers=num_workers
) as source:
for i, (start, stop) in enumerate(
[(0, 6), (6, 10), (10, 13), (13, 20), (20, 25), (25, 30)]
):
chunk = source.chunk(start, stop)
assert tobytes(chunk.raw_data) == expected[i]
with pytest.raises(Exception):
uproot.source.file.MultithreadedFileSource(
filename + "-does-not-exist", num_workers=num_workers
)
def test_memmap(tmpdir):
filename = os.path.join(str(tmpdir), "tmp.raw")
with open(filename, "wb") as tmp:
tmp.write(b"****** ...+++++++!!!!!@@@@@")
expected = [
b"******",
b" ",
b"...",
b"+++++++",
b"!!!!!",
b"@@@@@",
]
with uproot.source.file.MemmapSource(filename, num_fallback_workers=1) as source:
for i, (start, stop) in enumerate(
[(0, 6), (6, 10), (10, 13), (13, 20), (20, 25), (25, 30)]
):
chunk = source.chunk(start, stop)
assert tobytes(chunk.raw_data) == expected[i]
with pytest.raises(Exception):
uproot.source.file.MemmapSource(
filename + "-does-not-exist", num_fallback_workers=1
)
@pytest.mark.network
def test_http():
for num_workers in [1, 2]:
with uproot.source.http.MultithreadedHTTPSource(
"https://example.com", num_workers=num_workers, timeout=10
) as source:
for start, stop in [(0, 100), (50, 55), (200, 400)]:
chunk = source.chunk(start, stop)
assert len(tobytes(chunk.raw_data)) == stop - start
with pytest.raises(Exception):
with uproot.source.http.MultithreadedHTTPSource(
"https://wonky.cern/does-not-exist",
num_workers=num_workers,
timeout=0.1,
) as source:
source.chunk(0, 100)
@pytest.mark.network
def test_http_multipart():
with uproot.source.http.HTTPSource(
"https://example.com", timeout=10, num_fallback_workers=1
) as source:
for start, stop in [(0, 100), (50, 55), (200, 400)]:
chunk = source.chunk(start, stop)
assert len(tobytes(chunk.raw_data)) == stop - start
with pytest.raises(Exception):
with uproot.source.http.HTTPSource(
"https://wonky.cern/does-not-exist", timeout=0.1, num_fallback_workers=1
) as source:
tobytes(source.chunk(0, 100).raw_data)
@pytest.mark.skip(
reason="RECHECK: Run2012B_DoubleMuParked.root is super-flaky right now"
)
@pytest.mark.network
@pytest.mark.xrootd
def test_xrootd():
pytest.importorskip("XRootD")
with uproot.source.xrootd.MultithreadedXRootDSource(
"root://eospublic.cern.ch//eos/root-eos/cms_opendata_2012_nanoaod/Run2012B_DoubleMuParked.root",
num_workers=1,
timeout=10,
) as source:
one = tobytes(source.chunk(0, 100).raw_data)
assert len(one) == 100
two = tobytes(source.chunk(50, 55).raw_data)
assert len(two) == 5
three = tobytes(source.chunk(200, 400).raw_data)
assert len(three) == 200
assert one[:4] == b"root"
@pytest.mark.skip(
reason="RECHECK: Run2012B_DoubleMuParked.root is super-flaky right now"
)
@pytest.mark.network
@pytest.mark.xrootd
def test_xrootd_worker():
pytest.importorskip("XRootD")
with uproot.source.xrootd.MultithreadedXRootDSource(
"root://eospublic.cern.ch//eos/root-eos/cms_opendata_2012_nanoaod/Run2012B_DoubleMuParked.root",
num_workers=5,
timeout=10,
) as source:
one = tobytes(source.chunk(0, 100).raw_data)
assert len(one) == 100
two = tobytes(source.chunk(50, 55).raw_data)
assert len(two) == 5
three = tobytes(source.chunk(200, 400).raw_data)
assert len(three) == 200
assert one[:4] == b"root"
@pytest.mark.skip(
reason="RECHECK: Run2012B_DoubleMuParked.root is super-flaky right now"
)
@pytest.mark.network
@pytest.mark.xrootd
def test_xrootd_vectorread():
pytest.importorskip("XRootD")
with uproot.source.xrootd.XRootDSource(
"root://eospublic.cern.ch//eos/root-eos/cms_opendata_2012_nanoaod/Run2012B_DoubleMuParked.root",
timeout=10,
max_num_elements=None,
num_workers=1,
) as source:
one = tobytes(source.chunk(0, 100).raw_data)
assert len(one) == 100
two = tobytes(source.chunk(50, 55).raw_data)
assert len(two) == 5
three = tobytes(source.chunk(200, 400).raw_data)
assert len(three) == 200
assert one[:4] == b"root"
|
from MCSampler import MCSampler
from Sample import Samples, Sample, SampleType, DistributionType
from Sampler import Sampler
|
import logging
log11 = 1
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:[%(threadName)-12.12s]:[%(levelname)-5.5s]: %(message)s")
fh = logging.FileHandler('log.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
|
from OCC.Core.Bnd import Bnd_Box
class BoundaryBox:
def __init__(self, xl=None, xh=None, yl=None, yh=None, zl=None, zh=None):
if isinstance(xl, Bnd_Box):
self.assign_Bnd_Box(xl)
return
if (xl is None):
self.assign_coords(0, 0, 0, 0, 0, 0)
self.inited = False
return
self.assign_coords(xl, xh, yl, yh, zl, zh)
def assign_coords(self, xl, xh, yl, yh, zl, zh):
self.xmin = xl
self.xmax = xh
self.ymin = yl
self.ymax = yh
self.zmin = zl
self.zmax = zh
self.inited = True
def assign(self, box):
self.xmin = box.xmin
self.xmax = box.xmax
self.ymin = box.ymin
self.ymax = box.ymax
self.zmin = box.zmin
self.zmax = box.zmax
self.inited = True
def assign_Bnd_Box(self, Box):
a, b, c, d, e, f = Box.Get()
self.xmin = a
self.xmax = d
self.ymin = b
self.ymax = e
self.zmin = c
self.zmax = f
self.inited = True
def add(self, bbox):
if self.inited is False:
self.assign(bbox)
else:
self.xmin = min(self.xmin, bbox.xmin)
self.ymin = min(self.ymin, bbox.ymin)
self.zmin = min(self.zmin, bbox.zmin)
self.xmax = max(self.xmax, bbox.xmax)
self.ymax = max(self.ymax, bbox.ymax)
self.zmax = max(self.zmax, bbox.zmax)
def xrange(self): return (self.xmin, self.xmax)
def yrange(self): return (self.ymin, self.ymax)
def zrange(self): return (self.zmin, self.zmax)
def xlength(self): return self.xmax - self.xmin
def ylength(self): return self.ymax - self.ymin
def zlength(self): return self.zmax - self.zmin
def shape(self):
from zencad.geom.solid import box
return box(self.xlength(), self.ylength(), self.zlength()).move(self.xmin, self.ymin, self.zmin)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import (
setup,
find_packages,
)
DIR = os.path.dirname(os.path.abspath(__file__))
readme = open(os.path.join(DIR, 'README.md')).read()
install_requires = [
"cytoolz>=0.8.2",
"ethereum-abi-utils>=0.4.3",
"ethereum-keyfile>=0.3.0",
"ethereum-keys>=0.1.0-alpha.7",
"ethereum-utils>=0.5.0",
"pylru>=1.0.9",
"pysha3>=0.3",
"requests>=2.12.4",
"rlp>=0.4.7",
"toolz>=0.8.2",
"ethereum-tester~=0.1.0b1",
]
setup(
name='web3',
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version='4.0.0-beta.2',
description="""Web3.py""",
long_description_markdown_filename='README.md',
author='Piper Merriam',
author_email='pipermerriam@gmail.com',
url='https://github.com/pipermerriam/web3.py',
include_package_data=True,
install_requires=install_requires,
setup_requires=['setuptools-markdown'],
extras_require={
'tester': ["eth-testrpc>=1.3.3"],
'win': ["pypiwin32"],
},
py_modules=['web3', 'ens'],
license="MIT",
zip_safe=False,
keywords='ethereum',
packages=find_packages(exclude=["tests", "tests.*"]),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
import os
import re
import logging
from collections import defaultdict
from remus.bio.bed.beds_loading import BedLoader
from remus.bio.bed.beds_operations import BedOperations
def convert_genome_build(genome, hg19_expected="hg19", hg38_expected="GRCh38"):
if re.match("(hg37|hg19|b37)", genome, re.IGNORECASE):
return hg19_expected
elif re.match("(hg38|grch38|b38)", genome, re.IGNORECASE):
return hg38_expected
raise InvalidGenomeBuildException(genome)
def get_dirname_for_merged_with_liftover(genome_build):
return genome_build + "_with_liftover";
class RegulatoryRegionsFilesRegistry:
instances = None # dictionary of singleton objects
FANTOM5_PROMOTERS_KEY = "PR_F"
FANTOM5_ENHANCERS_KEY = "ENH_F"
SCREEN_PROMOTERS_KEY = "PR_S"
SCREEN_ENHANCERS_KEY = "ENH_S"
SCREEN_CHROMATIN_KEY = "CHR_S"
ENCODE_ENHANCERS_KEY = "ENH_E"
ENCODE_CHROMATIN_KEY = "CHR_E"
DATA_DIRECTORIES_MAP = {
"enhancers/encode": ENCODE_ENHANCERS_KEY,
"enhancers/fantom5": FANTOM5_ENHANCERS_KEY,
"enhancers/screen": SCREEN_ENHANCERS_KEY,
"chromatin/encode": ENCODE_CHROMATIN_KEY,
"chromatin/screen": SCREEN_CHROMATIN_KEY,
"promoters/fantom5": FANTOM5_PROMOTERS_KEY,
"promoters/screen": SCREEN_PROMOTERS_KEY
}
@staticmethod
def get_registry(genome_build='hg19'):
genome_build = convert_genome_build(genome_build)
if RegulatoryRegionsFilesRegistry.instances is None:
RegulatoryRegionsFilesRegistry.instances = {}
if genome_build not in RegulatoryRegionsFilesRegistry.instances:
RegulatoryRegionsFilesRegistry.instances[genome_build] = RegulatoryRegionsFilesRegistry(genome_build)
return RegulatoryRegionsFilesRegistry.instances[genome_build]
def __init__(self, genome_build, merge_lifted_over=True, root="data",
directories_and_symbols=DATA_DIRECTORIES_MAP,
extensions=(".bed", ".bed.gz")):
self.logger = logging.getLogger(self.__class__.__name__)
sources_map = self._make_sources_map(directories_and_symbols, genome_build, merge_lifted_over, extensions, root)
self._available_tissues = self._create_available_tissues_map(sources_map)
def _make_sources_map(self, directories_and_symbols, genome_build, merge_lifted_over, extensions, root):
merge_with_liftover_genome_build = get_dirname_for_merged_with_liftover(genome_build)
self.logger.info("Making sources map for root: %s ; paths: %s ; genome: %s (%smerging with lifted over coordinates); and extensions: %s" \
% (root, str(directories_and_symbols),
genome_build, "" if merge_lifted_over else "not ",
str(extensions)))
pattern = re.compile(r"(\w+_\d+)_(.+?)(|_embryonic)\.")
sources = defaultdict(dict)
for path in directories_and_symbols:
genome_build_dir = genome_build
if merge_lifted_over and not os.path.isdir(os.path.join(root, path, merge_with_liftover_genome_build)):
self.logger.warning("BED dir for path [%s] and genome [%s] (merged with lifted over) does not exist. Using defult genome build dir: [%s]" \
% (path, merge_with_liftover_genome_build, genome_build))
else:
genome_build_dir = merge_with_liftover_genome_build
if not os.path.isdir(os.path.join(root, path, genome_build_dir)):
self.logger.warning("BED dir for path [%s] and genome [%s] does not exist. Skipping." % (path, genome_build))
continue
for bed in os.listdir(os.path.join(root, path, genome_build_dir)):
termid_and_name = pattern.match(bed)
if not termid_and_name and any([bed.endswith(ext) for ext in extensions]):
self.logger.warn("File %s not matching expected file name format" % bed)
continue
if any([bed.endswith(ext) for ext in extensions]):
termid = termid_and_name.group(1)
name = termid_and_name.group(2)
life_stage = termid_and_name.group(3)
self.logger.debug("%s was split into [%s],[%s],[%s]" % (bed, termid, name, life_stage))
symbol = directories_and_symbols[path]
sources[termid, life_stage][symbol] = os.path.join(root, path, genome_build_dir, bed)
sources[termid, life_stage]["name"] = name.replace("_", " ")
self.logger.debug("Sources map:\n%s" % str(sources))
return sources
def _create_available_tissues_map(self, sources_map):
tissues_map = {}
for (_, life_stage), files_data in sources_map.items():
source_name = files_data.pop("name")
name = "{}{} ({})".format(source_name, life_stage.replace("_"," "), ", ".join(sorted(files_data.keys())))
tissues_map[name] = files_data
self.logger.debug("Tissues map:\n%s" % str(tissues_map))
return tissues_map
@property
def available_tissues(self):
self.logger.debug("Available tissues:\n%s" % str(self._available_tissues.keys()))
return list(self._available_tissues.keys())
# not used any more. Substituted by get_bed_fragment
def get_bed(self, tissue, source_symbol):
return self.get_bed_fragment(tissue, source_symbol, None)
def get_bed_fragment(self, tissue, source_symbol, regions):
"""
Get slice of a BED. Filtering on non-tabixed BED files is not supported.
If regions is None, entire BED is returned
"""
self.logger.info('Requested {}tissue [{}] from source [{}]'
.format("fragment [%s] from " % regions if regions else "",
tissue,
source_symbol))
if tissue not in self._available_tissues:
raise InvalidTissueNameException("Querried tissue [%s] was not among available tissue keys:\n%s" %
(tissue, str(self._available_tissues.keys())))
try:
bed_path = self._available_tissues[tissue][source_symbol]
track_name = source_symbol + "(" + tissue.split('(')[0].strip().replace(" ", "_") + ")"
self.logger.info('Found %s. Adding name %s' % (bed_path, track_name))
full_bed = BedLoader(bed_path)
if regions is None:
return BedOperations.add_name(full_bed.bed, track_name)
else:
beds = [ full_bed.filter_by(i) for i in regions ]
if any(beds):
filtered_bed = BedOperations.union([e for e in beds if e], merge=False).result
return BedOperations.add_name(filtered_bed, track_name)
except KeyError:
self.logger.info('No tissue [%s] in source [%s]' % (tissue, source_symbol))
return None
def get_matching_tissues(self, pattern, limit):
pattern = pattern if pattern else " "
try:
pattern = re.sub("\s+", ".*", pattern, re.IGNORECASE)
matches = sorted([i for i in self.available_tissues if re.search(pattern, i, re.IGNORECASE)])
return matches[:limit] if limit else matches
except re.error:
return []
class InvalidTissueNameException(Exception):
pass
class InvalidGenomeBuildException(Exception):
pass
|
# Reverse first K elements
# Given a queue and an integer k, reverse first k elements. Return the updated queue.
# Do the problem in O(n) complexity.
from sys import setrecursionlimit
import queue
def reverseFirstK(q, k):
# Implement Your Code Here
setrecursionlimit(11000)
n = int(input())
li = [int(ele) for ele in input().split()]
q = queue.Queue()
for ele in li:
q.put(ele)
k = int(input())
reverseFirstK(q, k)
while(q.qsize() > 0):
print(q.get())
n -= 1
|
import sys
# import source files
sys.path.append('../../../src/python/')
from xrt_binding import *
sys.path.append('../')
from utils_binding import *
XSIMPLE_CONTROL_ADDR_AP_CTRL = 0x00
XSIMPLE_CONTROL_ADDR_GIE = 0x04
XSIMPLE_CONTROL_ADDR_IER = 0x08
XSIMPLE_CONTROL_ADDR_ISR = 0x0c
XSIMPLE_CONTROL_ADDR_GROUP_ID_X_DATA = 0x10
XSIMPLE_CONTROL_BITS_GROUP_ID_X_DATA = 32
XSIMPLE_CONTROL_ADDR_GROUP_ID_Y_DATA = 0x18
XSIMPLE_CONTROL_BITS_GROUP_ID_Y_DATA = 32
XSIMPLE_CONTROL_ADDR_GROUP_ID_Z_DATA = 0x20
XSIMPLE_CONTROL_BITS_GROUP_ID_Z_DATA = 32
XSIMPLE_CONTROL_ADDR_GLOBAL_OFFSET_X_DATA = 0x28
XSIMPLE_CONTROL_BITS_GLOBAL_OFFSET_X_DATA = 32
XSIMPLE_CONTROL_ADDR_GLOBAL_OFFSET_Y_DATA = 0x30
XSIMPLE_CONTROL_BITS_GLOBAL_OFFSET_Y_DATA = 32
XSIMPLE_CONTROL_ADDR_GLOBAL_OFFSET_Z_DATA = 0x38
XSIMPLE_CONTROL_BITS_GLOBAL_OFFSET_Z_DATA = 32
XSIMPLE_CONTROL_ADDR_S1_DATA = 0x40
XSIMPLE_CONTROL_BITS_S1_DATA = 64
XSIMPLE_CONTROL_ADDR_S2_DATA = 0x4c
XSIMPLE_CONTROL_BITS_S2_DATA = 64
XSIMPLE_CONTROL_ADDR_FOO_DATA = 0x58
XSIMPLE_CONTROL_BITS_FOO_DATA = 32
def runKernel(opt):
count = 1024
DATA_SIZE = ctypes.sizeof(ctypes.c_int64) * count
boHandle1 = xclAllocBO(opt.handle, DATA_SIZE, xclBOKind.XCL_BO_DEVICE_RAM, opt.first_mem)
boHandle2 = xclAllocBO(opt.handle, DATA_SIZE, xclBOKind.XCL_BO_DEVICE_RAM, opt.first_mem)
bo1 = xclMapBO(opt.handle, boHandle1, True, 'int')
bo2 = xclMapBO(opt.handle, boHandle2, True, 'int')
ctypes.memset(bo1, 0, opt.DATA_SIZE)
ctypes.memset(bo2, 0, opt.DATA_SIZE)
# bo1
bo1_arr = [0X586C0C6C for _ in range(count)]
arr = (ctypes.c_int * len(bo1_arr))(*bo1_arr)
ctypes.memmove(bo1, arr, count*5)
#bo2
bo2_arr = [i*i for i in range(count)]
arr = (ctypes.c_int * len(bo2_arr))(*bo2_arr)
ctypes.memmove(bo2, arr, count*5)
# bufReference
bufReference = [i * i+i*16 for i in range(count)]
if xclSyncBO(opt.handle, boHandle1, xclBOSyncDirection.XCL_BO_SYNC_BO_TO_DEVICE, DATA_SIZE, 0):
return 1
if xclSyncBO(opt.handle, boHandle2, xclBOSyncDirection.XCL_BO_SYNC_BO_TO_DEVICE, DATA_SIZE, 0):
return 1
p = xclBOProperties()
bo1devAddr = p.paddr if not (xclGetBOProperties(opt.handle, boHandle1, p)) else -1
bo2devAddr = p.paddr if not (xclGetBOProperties(opt.handle, boHandle2, p)) else -1
if bo1devAddr is -1 or bo2devAddr is -1:
return 1
# Allocate the exec_bo
execHandle = xclAllocBO(opt.handle, DATA_SIZE, xclBOKind.XCL_BO_SHARED_VIRTUAL, (1 << 31))
execData = xclMapBO(opt.handle, execHandle, True, 'int', 32) # required buffer size = 128
if execData is None:
print("execData is NULL")
xclOpenContext(opt.handle, opt.xuuid, 0, True)
print("Construct the exec command to run the kernel on FPGA")
print("Due to the 1D OpenCL group size, the kernel must be launched %d times") % count
# construct the exec buffer cmd to start the kernel
for id in range(count):
start_cmd = ert_start_kernel_cmd.from_buffer(execData.contents)
rsz = XSIMPLE_CONTROL_ADDR_FOO_DATA/4 + 2 # regmap array size
ctypes.memset(execData.contents, 0, ctypes.sizeof(ert_start_kernel_cmd) + rsz*4)
start_cmd.m_uert.m_start_cmd_struct.state = 1 # ERT_CMD_STATE_NEW
start_cmd.m_uert.m_start_cmd_struct.opcode = 0 # ERT_START_CU
start_cmd.m_uert.m_start_cmd_struct.count = 1 + rsz
start_cmd.cu_mask = 0x1
# Prepare kernel reg map
new_data = (ctypes.c_uint32 * rsz).from_buffer(execData.contents, 8)
new_data[XSIMPLE_CONTROL_ADDR_AP_CTRL] = 0x0
new_data[XSIMPLE_CONTROL_ADDR_GROUP_ID_X_DATA/4] = id
new_data[XSIMPLE_CONTROL_ADDR_S1_DATA / 4] = bo1devAddr & 0xFFFFFFFF # output
new_data[XSIMPLE_CONTROL_ADDR_S2_DATA / 4] = bo2devAddr & 0xFFFFFFFF
new_data[XSIMPLE_CONTROL_ADDR_S1_DATA / 4 + 1] = (bo1devAddr >> 32) & 0xFFFFFFFF # output
new_data[XSIMPLE_CONTROL_ADDR_S2_DATA / 4 + 1] = (bo2devAddr >> 32) & 0xFFFFFFFF # input
new_data[XSIMPLE_CONTROL_ADDR_FOO_DATA/4] = 0x10 # foo
if xclExecBuf(opt.handle, execHandle):
print("Unable to issue xclExecBuf")
return 1
if id is 1:
print("Wait until the command finish")
while xclExecWait(opt.handle, 100) == 0:
print("reentering wait... \n")
if start_cmd.m_uert.m_start_cmd_struct.state != 4:
print("configure command failed")
return 1
# get the output xclSyncBO
print("Get the output data from the device")
if xclSyncBO(opt.handle, boHandle1, xclBOSyncDirection.XCL_BO_SYNC_BO_FROM_DEVICE, DATA_SIZE, 0):
return 1
xclCloseContext(opt.handle, opt.xuuid, 0)
xclFreeBO(opt.handle, execHandle)
xclFreeBO(opt.handle, boHandle1)
xclFreeBO(opt.handle, boHandle2)
print("RESULT: ")
if bufReference[:count] != bo1[:count]:
print("FAILED TEST")
print("Value read back does not match value written")
sys.exit()
def main(args):
opt = Options()
Options.getOptions(opt, args)
try:
if initXRT(opt):
xclClose(opt.handle)
return 1
if opt.first_mem < 0:
xclClose(opt.handle)
return 1
if runKernel(opt):
xclClose(opt.handle)
return 1
except Exception as exp:
print("Exception: ")
print(exp) # prints the err
print("FAILED TEST")
sys.exit()
print("PASSED TEST")
def alterprint():
pass
if __name__ == "__main__":
main(sys.argv)
|
import numpy as np
import matplotlib.pyplot as plt
import abel
import bz2
transforms = [
("basex", abel.basex.basex_transform, '#880000'),
("direct", abel.direct.direct_transform, '#EE0000'),
("hansenlaw", abel.hansenlaw.hansenlaw_transform, '#CCAA00'),
("onion_bordas", abel.onion_bordas.onion_bordas_transform, '#00AA00'),
("onion_peeling", abel.dasch.onion_peeling_transform, '#00CCFF'),
("three_point", abel.dasch.three_point_transform, '#0000FF'),
("two_point", abel.dasch.two_point_transform, '#CC00FF'),
("linbasex", abel.linbasex.linbasex_transform, '#AAAAAA'),
("rbasex", abel.rbasex.rbasex_transform, '#AACC00'),
]
infile = bz2.BZ2File('../../../../examples/data/O2-ANU1024.txt.bz2')
IM = np.loadtxt(infile)
IModd = abel.tools.center.center_image(IM, origin="convolution",
odd_size=True, square=True)
Q = abel.tools.symmetry.get_image_quadrants(IModd, reorient=True)
Q0 = Q[1]
h, w = np.shape(IM)
fig, axs = plt.subplots(3, 3, figsize=(7, 6.5), sharex=True, sharey=True)
fig1, axs1 = plt.subplots(3, 1, figsize=(6, 7))
for num, (ax, (label, transFunc, color), letter) in enumerate(zip(axs.ravel(),
transforms,
'abcdefghij')):
print(label)
if label == 'linbasex':
targs = dict(proj_angles=np.arange(0, np.pi, np.pi/10))
else:
targs = dict()
if label == 'basex':
targs = dict(reg=200)
if label == 'rbasex':
trans = transFunc(Q0, direction="inverse", origin='ll')[0]
else:
trans = transFunc(Q0, direction="inverse", **targs)
if label == 'linbasex': # bugfix smoothing=0 transform offset by 1 pixel
trans[:, 1:] = trans[:, :-1]
r, inten = abel.tools.vmi.angular_integration_3D(trans[::-1],
origin=(0, 0),
dr=0.1)
inten /= 1e6
im = ax.imshow(trans[::-1], cmap='gist_heat_r', origin='lower',
aspect='equal', vmin=0, vmax=5)
ax.set_title(letter + ') ' + label,
x=0.05, y=0.93, ha='left', va='top',
weight='bold', color='k')
pargs = dict(lw=1, color=color, zorder=-num)
axs1[0].plot(r, inten, **pargs)
axs1[1].plot(r, inten, label=label, **pargs)
axs1[2].plot(r, inten, **pargs)
axc = fig.add_axes([0.93, 0.06, 0.01, 0.93])
cbar = plt.colorbar(im, orientation="vertical", cax=axc, label='Intensity')
cbar.ax.xaxis.set_ticks_position('top')
cbar.ax.xaxis.set_label_position('top')
for ax in axs.ravel():
ax.set_xlim(0, 450)
ax.set_ylim(0, 450)
major = range(0, 500, 100)
minor = range(50, 550, 100)
ax.set_xticks(major)
ax.set_xticklabels(major, fontdict={'fontsize': 6})
ax.set_xticks(minor, minor=True)
ax.set_yticks(major)
ax.set_yticklabels(major, fontdict={'fontsize': 6},
rotation='vertical', verticalalignment='center')
ax.set_yticks(minor, minor=True)
fig.subplots_adjust(left=0.05, bottom=0.06, right=0.92, top=0.99,
wspace=0.04, hspace=0.04)
for ax in axs[-1]:
ax.set_xlabel('$r$ (pixels)')
for ax in axs[:, 0]:
ax.set_ylabel('$z$ (pixels)')
for ax in axs1:
ax.grid(color='k', alpha=0.1)
axs1[0].set_xlim(0, 512)
axs1[0].set_xticks(np.arange(0, 514, 20), minor=True)
axs1[1].set_xlim(355, 385)
axs1[1].set_xticks(np.arange(355, 385), minor=True)
axs1[2].set_xlim(80, 160)
axs1[2].set_xticks(np.arange(80, 160, 10), minor=True)
axs1[2].set_ylim(-0.01, 0.065)
def place_letter(letter, ax, color='k', offset=(0, 0)):
ax.annotate(letter, xy=(0.02, 0.97), xytext=offset,
xycoords='axes fraction', textcoords='offset points',
color=color, ha='left', va='top', weight='bold')
for ax, letter in zip(axs1.ravel(), 'abc'):
place_letter(letter+')', ax, color='k')
axs1[-1].set_xlabel('$r$ (pixels)')
fig1.tight_layout()
axs1[1].legend(fontsize=8)
fig.savefig('experiment.svg')
fig1.savefig('integration.svg')
# plt.show()
|
# Checks methods for getting 3d world locations from depth map and from point cloud.
import copy
import numpy as np
import pptk
import time
import carla
import pylot.utils
from pylot.simulation.carla_utils import get_world
import pylot.simulation.messages
import pylot.simulation.utils
from pylot.simulation.utils import depth_to_array, to_bgra_array,\
camera_to_unreal_transform,\
get_3d_world_position_with_depth_map,\
get_3d_world_position_with_point_cloud,\
lidar_point_cloud_to_camera_coordinates,\
to_pylot_transform
from matplotlib import pyplot as plt
lidar_pc = None
depth_pc = None
last_frame = None
# Pixels to check for when the target vehicle is set at (20, 2, 0) and
# the sensor position at (2, 8, 1.4)
#pixels_to_check = [(400, 285), (400, 350), (500, 285), (245, 320)]
# Pixels to check for when the target vehicle is set at (242, 131.239990234, 0)
# and the sensor position at (237.699996948, 132.239990234, 1.32062494755).
pixels_to_check = [(200, 370)]
target_vehicle_transform = carla.Transform(
carla.Location(242, 131.239990234, 0),
carla.Rotation(pitch=0, yaw=0, roll=0))
print ("Target Vehicle Location:", target_vehicle_transform.location.x,
target_vehicle_transform.location.y,
target_vehicle_transform.location.z)
# target_vehicle_transform = carla.Transform(
# carla.Location(20, 2, 0),
# carla.Rotation(pitch=0, yaw=0, roll=0))
# Create the camera, lidar, depth camera position.
sensor_transform = carla.Transform(
carla.Location(237.699996948, 132.239990234, 1.32062494755),
carla.Rotation(pitch=0, yaw=0, roll=0))
print ("Our Location:", sensor_transform.location.x,
sensor_transform.location.y,
sensor_transform.location.z)
# sensor_transform = carla.Transform(
# carla.Location(2, 8, 1.4),
# carla.Rotation(pitch=0, yaw=0, roll=0))
vehicle_transform = pylot.simulation.utils.Transform(
pylot.simulation.utils.Location(0, 0, 0),
pylot.simulation.utils.Rotation(pitch=0, yaw=0, roll=0))
def on_lidar_msg(carla_pc):
game_time = int(carla_pc.timestamp * 1000)
print("Received lidar msg {}".format(game_time))
points = np.frombuffer(carla_pc.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 3), 3))
lidar_transform = to_pylot_transform(carla_pc.transform)
# Transform lidar points from lidar coordinates to camera coordinates.
points = lidar_point_cloud_to_camera_coordinates(points)
for (x, y) in pixels_to_check:
pos3d_pc = get_3d_world_position_with_point_cloud(
x, y, points.tolist(), lidar_transform, 800, 600, 90.0)
print("{} Computed using lidar {}".format((x, y), pos3d_pc))
global lidar_pc
lidar_pc = points.tolist()
# pptk.viewer(points)
def on_camera_msg(carla_image):
game_time = int(carla_image.timestamp * 1000)
print("Received camera msg {}".format(game_time))
global last_frame
last_frame = pylot.utils.bgra_to_bgr(to_bgra_array(carla_image))
def on_depth_msg(carla_image):
game_time = int(carla_image.timestamp * 1000)
print("Received depth camera msg {}".format(game_time))
depth_camera_transform = to_pylot_transform(carla_image.transform)
depth_msg = pylot.simulation.messages.DepthFrameMessage(
depth_to_array(carla_image),
depth_camera_transform,
carla_image.fov,
None)
for (x, y) in pixels_to_check:
print("{} Depth at pixel {}".format((x, y), depth_msg.frame[y][x]))
pos3d_depth = get_3d_world_position_with_depth_map(
x, y, depth_msg.frame, depth_msg.width, depth_msg.height,
depth_msg.fov, depth_camera_transform)
print("{} Computed using depth map {}".format((x, y), pos3d_depth))
depth_point_cloud = pylot.simulation.utils.depth_to_local_point_cloud(
depth_msg.frame, depth_msg.width, depth_msg.height,
depth_msg.fov, max_depth=1.0)
# Transform the depth cloud to world coordinates.
transform = camera_to_unreal_transform(depth_camera_transform)
depth_point_cloud = transform.transform_points(depth_point_cloud)
global depth_pc
depth_pc = depth_point_cloud.tolist()
# pptk.viewer(depth_point_cloud)
def add_lidar(world, transform, callback):
lidar_blueprint = world.get_blueprint_library().find(
'sensor.lidar.ray_cast')
lidar_blueprint.set_attribute('channels', '32')
lidar_blueprint.set_attribute('range', '5000')
lidar_blueprint.set_attribute('points_per_second', '500000')
lidar_blueprint.set_attribute('rotation_frequency', '20')
lidar_blueprint.set_attribute('upper_fov', '15')
lidar_blueprint.set_attribute('lower_fov', '-30')
lidar = world.spawn_actor(lidar_blueprint, transform)
# Register callback to be invoked when a new point cloud is received.
lidar.listen(callback)
return lidar
def add_depth_camera(world, transform, callback):
depth_blueprint = world.get_blueprint_library().find(
'sensor.camera.depth')
depth_blueprint.set_attribute('image_size_x', '800')
depth_blueprint.set_attribute('image_size_y', '600')
depth_camera = world.spawn_actor(depth_blueprint, transform)
# Register callback to be invoked when a new frame is received.
depth_camera.listen(callback)
return depth_camera
def add_camera(world, transform, callback):
camera_blueprint = world.get_blueprint_library().find(
'sensor.camera.rgb')
camera_blueprint.set_attribute('image_size_x', '800')
camera_blueprint.set_attribute('image_size_y', '600')
camera = world.spawn_actor(camera_blueprint, transform)
# Register callback to be invoked when a new frame is received.
camera.listen(callback)
return camera
def add_vehicle(world, transform):
# Location of the vehicle in world coordinates.
v_blueprint = world.get_blueprint_library().find('vehicle.audi.a2')
vehicle = world.spawn_actor(v_blueprint, transform)
return vehicle
# Connect to the Carla simulator.
client, world = get_world()
settings = world.get_settings()
settings.synchronous_mode = True
world.apply_settings(settings)
print("Adding sensors")
target_vehicle = add_vehicle(world, target_vehicle_transform)
lidar = add_lidar(world, sensor_transform, on_lidar_msg)
depth_camera = add_depth_camera(world, sensor_transform, on_depth_msg)
camera = add_camera(world, sensor_transform, on_camera_msg)
# Move the spectactor view to the camera position.
world.get_spectator().set_transform(sensor_transform)
try:
# Tick the simulator once to get 1 data reading.
world.tick()
while lidar_pc is None or depth_pc is None or last_frame is None:
time.sleep(0.2)
plt.imshow(last_frame)
plt.show()
# Sleep a bit to give time to inspect the image.
finally:
# Destroy the actors.
lidar.destroy()
depth_camera.destroy()
target_vehicle.destroy()
camera.destroy()
|
#!/usr/bin/python
# -- Content-Encoding: utf-8 --
"""
Provides functions for reading Java objects serialized by ObjectOutputStream.
This form of object representation is a standard data interchange format in
Java world.
javaobj module exposes an API familiar to users of the standard library
marshal, pickle and json modules.
See:
http://download.oracle.com/javase/6/docs/platform/serialization/spec/protocol.html
:authors: Volodymyr Buell, Thomas Calmant
:license: Apache License 2.0
:version: 0.4.3
:status: Alpha
..
Copyright 2021 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
# Standard library
from typing import Any, Union
import os
import struct
# Javaobj modules
from .beans import (
JavaClass,
JavaString,
JavaObject,
JavaByteArray,
JavaEnum,
JavaArray,
)
from ..constants import (
StreamConstants,
ClassDescFlags,
TerminalCode,
TypeCode,
StreamCodeDebug,
)
from ..utils import (
log_debug,
log_error,
read_to_str,
to_unicode,
unicode_char,
hexdump,
)
numpy = None # Imported only when really used
# ------------------------------------------------------------------------------
__all__ = ("JavaObjectUnmarshaller",)
# Module version
__version_info__ = (0, 4, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Convertion of a Java type char to its NumPy equivalent
NUMPY_TYPE_MAP = {
TypeCode.TYPE_BYTE: "B",
TypeCode.TYPE_CHAR: "b",
TypeCode.TYPE_DOUBLE: ">d",
TypeCode.TYPE_FLOAT: ">f",
TypeCode.TYPE_INTEGER: ">i",
TypeCode.TYPE_LONG: ">l",
TypeCode.TYPE_SHORT: ">h",
TypeCode.TYPE_BOOLEAN: ">B",
}
# ------------------------------------------------------------------------------
class JavaObjectUnmarshaller:
"""
Deserializes a Java serialization stream
"""
def __init__(self, stream, use_numpy_arrays=False):
"""
Sets up members
:param stream: An input stream (opened in binary/bytes mode)
:raise IOError: Invalid input stream
"""
self.use_numpy_arrays = use_numpy_arrays
# Numpy array support
if self.use_numpy_arrays:
try:
global numpy
import numpy as np
numpy = np
except ImportError:
pass
# Check stream
if stream is None:
raise IOError("No input stream given")
# Prepare the association Terminal Symbol -> Reading method
self.opmap = {
TerminalCode.TC_NULL: self.do_null,
TerminalCode.TC_CLASSDESC: self.do_classdesc,
TerminalCode.TC_OBJECT: self.do_object,
TerminalCode.TC_STRING: self.do_string,
TerminalCode.TC_LONGSTRING: self.do_string_long,
TerminalCode.TC_ARRAY: self.do_array,
TerminalCode.TC_CLASS: self.do_class,
TerminalCode.TC_BLOCKDATA: self.do_blockdata,
TerminalCode.TC_BLOCKDATALONG: self.do_blockdata_long,
TerminalCode.TC_REFERENCE: self.do_reference,
TerminalCode.TC_ENUM: self.do_enum,
# note that we are reusing do_null:
TerminalCode.TC_ENDBLOCKDATA: self.do_null,
}
# Set up members
self.current_object = None
self.reference_counter = 0
self.references = []
self.object_transformers = []
self.object_stream = stream
# Read the stream header (magic & version)
self._readStreamHeader()
def readObject(self, ignore_remaining_data=False):
"""
Reads an object from the input stream
:param ignore_remaining_data: If True, don't log an error when
unused trailing bytes are remaining
:return: The unmarshalled object
:raise Exception: Any exception that occurred during unmarshalling
"""
try:
# TODO: add expects
_, res = self._read_and_exec_opcode(ident=0)
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if not ignore_remaining_data and len(the_rest) != 0:
log_error(
"Warning!!!!: Stream still has {0} bytes left. "
"Enable debug mode of logging to see the hexdump.".format(
len(the_rest)
)
)
log_debug("\n{0}".format(hexdump(the_rest)))
else:
log_debug("Java Object unmarshalled successfully!")
self.object_stream.seek(position_bak)
return res
except Exception:
self._oops_dump_state(ignore_remaining_data)
raise
def add_transformer(self, transformer):
"""
Appends an object transformer to the deserialization process
:param transformer: An object with a transform(obj) method
"""
self.object_transformers.append(transformer)
def _readStreamHeader(self):
"""
Reads the magic header of a Java serialization stream
:raise IOError: Invalid magic header (not a Java stream)
"""
(magic, version) = self._readStruct(">HH")
if (
magic != StreamConstants.STREAM_MAGIC
or version != StreamConstants.STREAM_VERSION
):
raise IOError(
"The stream is not java serialized object. "
"Invalid stream header: {0:04X}{1:04X}".format(magic, version)
)
def _read_and_exec_opcode(self, ident=0, expect=None):
"""
Reads the next opcode, and executes its handler
:param ident: Log identation level
:param expect: A list of expected opcodes
:return: A tuple: (opcode, result of the handler)
:raise IOError: Read opcode is not one of the expected ones
:raise RuntimeError: Unknown opcode
"""
position = self.object_stream.tell()
(opid,) = self._readStruct(">B")
log_debug(
"OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})".format(
opid, StreamCodeDebug.op_id(opid), position
),
ident,
)
if expect and opid not in expect:
raise IOError(
"Unexpected opcode 0x{0:X} -- {1} "
"(at offset 0x{2:X})".format(
opid, StreamCodeDebug.op_id(opid), position
)
)
try:
handler = self.opmap[opid]
except KeyError:
raise RuntimeError(
"Unknown OpCode in the stream: 0x{0:X} "
"(at offset 0x{1:X})".format(opid, position)
)
else:
return opid, handler(ident=ident)
def _readStruct(self, unpack):
"""
Reads from the input stream, using struct
:param unpack: An unpack format string
:return: The result of struct.unpack (tuple)
:raise RuntimeError: End of stream reached during unpacking
"""
length = struct.calcsize(unpack)
ba = self.object_stream.read(length)
if len(ba) != length:
raise RuntimeError(
"Stream has been ended unexpectedly while unmarshaling."
)
return struct.unpack(unpack, ba)
def _readString(self, length_fmt="H"):
"""
Reads a serialized string
:param length_fmt: Structure format of the string length (H or Q)
:return: The deserialized string
:raise RuntimeError: Unexpected end of stream
"""
(length,) = self._readStruct(">{0}".format(length_fmt))
ba = self.object_stream.read(length)
return to_unicode(ba)
def do_classdesc(self, parent=None, ident=0):
"""
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
class_name = self._readString()
clazz.name = class_name
log_debug("Class name: %s" % class_name, ident)
# serialVersionUID is a Java (signed) long => 8 bytes
serialVersionUID, classDescFlags = self._readStruct(">qB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
self._add_reference(clazz, ident)
log_debug(
"Serial: 0x{0:X} / {0:d} - classDescFlags: 0x{1:X} {2}".format(
serialVersionUID,
classDescFlags,
StreamCodeDebug.flags(classDescFlags),
),
ident,
)
(length,) = self._readStruct(">H")
log_debug("Fields num: 0x{0:X}".format(length), ident)
clazz.fields_names = []
clazz.fields_types = []
for fieldId in range(length):
(typecode,) = self._readStruct(">B")
field_name = self._readString()
base_field_type = self._convert_char_to_type(typecode)
log_debug("> Reading field {0}".format(field_name), ident)
if base_field_type == TypeCode.TYPE_ARRAY:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1,
expect=(TerminalCode.TC_STRING, TerminalCode.TC_REFERENCE),
)
if type(field_type) is not JavaString: # pylint:disable=C0123
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
elif base_field_type == TypeCode.TYPE_OBJECT:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1,
expect=(TerminalCode.TC_STRING, TerminalCode.TC_REFERENCE),
)
if isinstance(field_type, JavaClass):
# FIXME: ugly trick
field_type = JavaString(field_type.name)
if type(field_type) is not JavaString: # pylint:disable=C0123
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
else:
# Convert the TypeCode to its char value
field_type = JavaString(str(chr(base_field_type.value)))
log_debug(
"< FieldName: 0x{0:X} Name:{1} Type:{2} ID:{3}".format(
typecode, field_name, field_type, fieldId
),
ident,
)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
if parent:
parent.__fields = clazz.fields_names # pylint:disable=W0212
parent.__types = clazz.fields_types # pylint:disable=W0212
# classAnnotation
(opid,) = self._readStruct(">B")
log_debug(
"OpCode: 0x{0:X} -- {1} (classAnnotation)".format(
opid, StreamCodeDebug.op_id(opid)
),
ident,
)
if opid != TerminalCode.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
log_debug("Reading Super Class of {0}".format(clazz.name), ident)
_, superclassdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(
TerminalCode.TC_CLASSDESC,
TerminalCode.TC_NULL,
TerminalCode.TC_REFERENCE,
),
)
log_debug(
"Super Class for {0}: {1}".format(clazz.name, str(superclassdesc)),
ident,
)
clazz.superclass = superclassdesc
return clazz
def do_blockdata(self, parent=None, ident=0):
"""
Handles TC_BLOCKDATA opcode
:param parent:
:param ident: Log indentation level
:return: A string containing the block data
"""
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
log_debug("[blockdata]", ident)
(length,) = self._readStruct(">B")
ba = self.object_stream.read(length)
# Ensure we have an str
return read_to_str(ba)
def do_blockdata_long(self, parent=None, ident=0):
"""
Handles TC_BLOCKDATALONG opcode
:param parent:
:param ident: Log indentation level
:return: A string containing the block data
"""
# TC_BLOCKDATALONG (int)<size> (byte)[size]
log_debug("[blockdatalong]", ident)
(length,) = self._readStruct(">I")
ba = self.object_stream.read(length)
# Ensure we have an str
return read_to_str(ba)
def do_class(self, parent=None, ident=0):
"""
Handles TC_CLASS opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASS classDesc newHandle
log_debug("[class]", ident)
# TODO: what to do with "(ClassDesc)prevObject".
# (see 3rd line for classDesc:)
_, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(
TerminalCode.TC_CLASSDESC,
TerminalCode.TC_PROXYCLASSDESC,
TerminalCode.TC_NULL,
TerminalCode.TC_REFERENCE,
),
)
log_debug("Classdesc: {0}".format(classdesc), ident)
self._add_reference(classdesc, ident)
return classdesc
def do_object(self, parent=None, ident=0):
"""
Handles a TC_OBJECT opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_OBJECT classDesc newHandle classdata[] // data for each class
java_object = JavaObject()
log_debug("[object]", ident)
log_debug(
"java_object.annotations just after instantiation: {0}".format(
java_object.annotations
),
ident,
)
# TODO: what to do with "(ClassDesc)prevObject".
# (see 3rd line for classDesc:)
opcode, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(
TerminalCode.TC_CLASSDESC,
TerminalCode.TC_PROXYCLASSDESC,
TerminalCode.TC_NULL,
TerminalCode.TC_REFERENCE,
),
)
# self.TC_REFERENCE hasn't shown in spec, but actually is here
# Create object
for transformer in self.object_transformers:
java_object = transformer.create(classdesc, self)
if java_object is not None:
break
# Store classdesc of this object
java_object.classdesc = classdesc
# Store the reference
self._add_reference(java_object, ident)
# classdata[]
if (
classdesc.flags & ClassDescFlags.SC_EXTERNALIZABLE
and not classdesc.flags & ClassDescFlags.SC_BLOCK_DATA
):
# TODO:
raise NotImplementedError("externalContents isn't implemented yet")
if classdesc.flags & ClassDescFlags.SC_SERIALIZABLE:
# TODO: look at ObjectInputStream.readSerialData()
# FIXME: Handle the SC_WRITE_METHOD flag
# create megalist
tempclass = classdesc
megalist = []
megatypes = []
log_debug("Constructing class...", ident)
while tempclass:
log_debug("Class: {0}".format(tempclass.name), ident + 1)
class_fields_str = " - ".join(
" ".join((str(field_type), field_name))
for field_type, field_name in zip(
tempclass.fields_types, tempclass.fields_names
)
)
if class_fields_str:
log_debug(class_fields_str, ident + 2)
fieldscopy = tempclass.fields_names[:]
fieldscopy.extend(megalist)
megalist = fieldscopy
fieldscopy = tempclass.fields_types[:]
fieldscopy.extend(megatypes)
megatypes = fieldscopy
tempclass = tempclass.superclass
log_debug("Values count: {0}".format(len(megalist)), ident)
log_debug("Prepared list of values: {0}".format(megalist), ident)
log_debug("Prepared list of types: {0}".format(megatypes), ident)
for field_name, field_type in zip(megalist, megatypes):
log_debug(
"Reading field: {0} - {1}".format(field_type, field_name)
)
res = self._read_value(field_type, ident, name=field_name)
java_object.__setattr__(field_name, res)
if (
classdesc.flags & ClassDescFlags.SC_SERIALIZABLE
and classdesc.flags & ClassDescFlags.SC_WRITE_METHOD
or classdesc.flags & ClassDescFlags.SC_EXTERNALIZABLE
and classdesc.flags & ClassDescFlags.SC_BLOCK_DATA
or classdesc.superclass is not None
and classdesc.superclass.flags & ClassDescFlags.SC_SERIALIZABLE
and classdesc.superclass.flags & ClassDescFlags.SC_WRITE_METHOD
):
# objectAnnotation
log_debug(
"java_object.annotations before: {0}".format(
java_object.annotations
),
ident,
)
while opcode != TerminalCode.TC_ENDBLOCKDATA:
opcode, obj = self._read_and_exec_opcode(ident=ident + 1)
# , expect=[self.TC_ENDBLOCKDATA, self.TC_BLOCKDATA,
# self.TC_OBJECT, self.TC_NULL, self.TC_REFERENCE])
if opcode != TerminalCode.TC_ENDBLOCKDATA:
java_object.annotations.append(obj)
log_debug("objectAnnotation value: {0}".format(obj), ident)
log_debug(
"java_object.annotations after: {0}".format(
java_object.annotations
),
ident,
)
# Allow extra loading operations
if hasattr(java_object, "__extra_loading__"):
log_debug("Java object has extra loading capability.")
java_object.__extra_loading__(self, ident)
log_debug(">>> java_object: {0}".format(java_object), ident)
return java_object
def do_string(self, parent=None, ident=0):
"""
Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[string]", ident)
ba = JavaString(self._readString())
self._add_reference(ba, ident)
return ba
def do_string_long(self, parent=None, ident=0):
"""
Handles a TC_LONGSTRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[long string]", ident)
ba = JavaString(self._readString("Q"))
self._add_reference(ba, ident)
return ba
def do_array(self, parent=None, ident=0):
"""
Handles a TC_ARRAY opcode
:param parent:
:param ident: Log indentation level
:return: A list of deserialized objects
"""
# TC_ARRAY classDesc newHandle (int)<size> values[size]
log_debug("[array]", ident)
_, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(
TerminalCode.TC_CLASSDESC,
TerminalCode.TC_PROXYCLASSDESC,
TerminalCode.TC_NULL,
TerminalCode.TC_REFERENCE,
),
)
array = JavaArray(classdesc)
self._add_reference(array, ident)
(size,) = self._readStruct(">i")
log_debug("size: {0}".format(size), ident)
array_type_code = TypeCode(ord(classdesc.name[0]))
assert array_type_code == TypeCode.TYPE_ARRAY
type_code = TypeCode(ord(classdesc.name[1]))
if type_code in (TypeCode.TYPE_OBJECT, TypeCode.TYPE_ARRAY):
for _ in range(size):
_, res = self._read_and_exec_opcode(ident=ident + 1)
log_debug("Object value: {0}".format(res), ident)
array.append(res)
elif type_code == TypeCode.TYPE_BYTE:
array = JavaByteArray(self.object_stream.read(size), classdesc)
elif self.use_numpy_arrays and numpy is not None:
array = numpy.fromfile(
self.object_stream,
dtype=NUMPY_TYPE_MAP[type_code],
count=size,
)
else:
for _ in range(size):
res = self._read_value(type_code, ident)
log_debug("Native value: {0}".format(repr(res)), ident)
array.append(res)
return array
def do_reference(self, parent=None, ident=0):
"""
Handles a TC_REFERENCE opcode
:param parent:
:param ident: Log indentation level
:return: The referenced object
"""
(handle,) = self._readStruct(">L")
log_debug("## Reference handle: 0x{0:X}".format(handle), ident)
ref = self.references[handle - StreamConstants.BASE_REFERENCE_IDX]
log_debug("###-> Type: {0} - Value: {1}".format(type(ref), ref), ident)
return ref
@staticmethod
def do_null(parent=None, ident=0):
"""
Handles a TC_NULL opcode
:param parent:
:param ident: Log indentation level
:return: Always None
"""
return None
def do_enum(self, parent=None, ident=0):
"""
Handles a TC_ENUM opcode
:param parent:
:param ident: Log indentation level
:return: A JavaEnum object
"""
# TC_ENUM classDesc newHandle enumConstantName
enum = JavaEnum()
_, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(
TerminalCode.TC_CLASSDESC,
TerminalCode.TC_PROXYCLASSDESC,
TerminalCode.TC_NULL,
TerminalCode.TC_REFERENCE,
),
)
enum.classdesc = classdesc
self._add_reference(enum, ident)
(
_,
enumConstantName,
) = self._read_and_exec_opcode( # pylint:disable=C0103
ident=ident + 1,
expect=(TerminalCode.TC_STRING, TerminalCode.TC_REFERENCE),
)
enum.constant = enumConstantName
return enum
def _read_value(self, raw_field_type, ident, name=""):
# type: (Union[bytes, int, TypeCode], int, str) -> Any
"""
Reads the next value, of the given type
:param raw_field_type: A serialization typecode
:param ident: Log indentation
:param name: Field name (for logs)
:return: The read value
:raise RuntimeError: Unknown field type
"""
if isinstance(raw_field_type, TypeCode):
field_type = raw_field_type
elif isinstance(raw_field_type, int):
field_type = TypeCode(raw_field_type)
else:
# We don't need details for arrays and objects
raw_code = raw_field_type[0]
if isinstance(raw_code, int):
field_type = TypeCode(raw_code)
else:
field_type = TypeCode(ord(raw_code))
if field_type == TypeCode.TYPE_BOOLEAN:
(val,) = self._readStruct(">B")
res = bool(val) # type: Any
elif field_type == TypeCode.TYPE_BYTE:
(res,) = self._readStruct(">b")
elif field_type == TypeCode.TYPE_CHAR:
# TYPE_CHAR is defined by the serialization specification
# but not used in the implementation, so this is
# a hypothetical code
res = unicode_char(self._readStruct(">H")[0])
elif field_type == TypeCode.TYPE_SHORT:
(res,) = self._readStruct(">h")
elif field_type == TypeCode.TYPE_INTEGER:
(res,) = self._readStruct(">i")
elif field_type == TypeCode.TYPE_LONG:
(res,) = self._readStruct(">q")
elif field_type == TypeCode.TYPE_FLOAT:
(res,) = self._readStruct(">f")
elif field_type == TypeCode.TYPE_DOUBLE:
(res,) = self._readStruct(">d")
elif field_type in (TypeCode.TYPE_OBJECT, TypeCode.TYPE_ARRAY):
_, res = self._read_and_exec_opcode(ident=ident + 1)
else:
raise RuntimeError("Unknown typecode: {0}".format(field_type))
log_debug(
"* {0} {1}: {2}".format(chr(field_type.value), name, repr(res)),
ident,
)
return res
@staticmethod
def _convert_char_to_type(type_char):
# type: (Any) -> TypeCode
"""
Ensures a read character is a typecode.
:param type_char: Read typecode
:return: The typecode as an integer (using ord)
:raise RuntimeError: Unknown typecode
"""
typecode = type_char
if not isinstance(type_char, int):
typecode = ord(type_char)
try:
return TypeCode(typecode)
except ValueError:
raise RuntimeError(
"Typecode {0} ({1}) isn't supported.".format(
type_char, typecode
)
)
def _add_reference(self, obj, ident=0):
"""
Adds a read reference to the marshaler storage
:param obj: Reference to add
:param ident: Log indentation level
"""
log_debug(
"## New reference handle 0x{0:X}: {1} -> {2}".format(
len(self.references) + StreamConstants.BASE_REFERENCE_IDX,
type(obj).__name__,
repr(obj),
),
ident,
)
self.references.append(obj)
def _oops_dump_state(self, ignore_remaining_data=False):
"""
Log a deserialization error
:param ignore_remaining_data: If True, don't log an error when
unused trailing bytes are remaining
"""
log_error("==Oops state dump" + "=" * (30 - 17))
log_error("References: {0}".format(self.references))
log_error(
"Stream seeking back at -16 byte "
"(2nd line is an actual position!):"
)
# Do not use a keyword argument
self.object_stream.seek(-16, os.SEEK_CUR)
position = self.object_stream.tell()
the_rest = self.object_stream.read()
if not ignore_remaining_data and len(the_rest) != 0:
log_error(
"Warning!!!!: Stream still has {0} bytes left:\n{1}".format(
len(the_rest), hexdump(the_rest, position)
)
)
log_error("=" * 30)
|
# Update the Settings File
import json
import requests
import urllib
def initialize_settings(api_key):
settings={'thermostats':{},'tokens':{'api_key':api_key}}
pretty_settings = json.dumps(settings,indent=4)
settingsfile = open('settings.json', 'w')
settingsfile.write(pretty_settings)
settingsfile.close()
def read_settings(b):
settingsfile = open('settings.json', 'r')
settings = json.loads(settingsfile.read())
settingsfile.close()
return settings[b]
def write_settings(name,dict):
settingsfile = open('settings.json', 'r')
settings = json.loads(settingsfile.read())
settingsfile.close()
settings.update({name:dict})
pretty_settings = json.dumps(settings,indent=4)
settingsfile = open('settings.json', 'w')
settingsfile.write(pretty_settings)
settingsfile.close()
def refresh_tokens():
tokens=read_settings('tokens')
authurl = 'https://api.ecobee.com/token'
authrequest = requests.post(
authurl,
params={
"grant_type":"refresh_token",
"client_id":tokens["api_key"],
"code":tokens['refresh_token']
},
headers={
"Content-Type":"application/json;charset=UTF-8"
}
)
if authrequest.status_code == 200:
print ('Token Refresh Sucessful, Continuing...')
tokens.update({"access_token": authrequest.json()['access_token'], "refresh_token": authrequest.json()['refresh_token']})
print('New Access Token: ' + tokens['access_token'])
print('New Refresh Token: ' + tokens['refresh_token'])
write_settings('tokens',tokens)
return tokens
else:
print (authrequest.status_code)
print (authrequest.headers)
print (authrequest.json)
def get_thermostats():
thermostats=read_settings('thermostats')
tokens=read_settings('tokens')
# Build The Reporting Request
loadbody = {
"selection": {
"selectionType": "registered",
"selectionMatch": ""
}
}
jsonbody = urllib.parse.quote_plus(json.dumps(loadbody))
# Get Data From Ecobee
url = 'https://api.ecobee.com/1/thermostat'
request = requests.get(
url,
params={
"json":jsonbody
},
headers={
"Content-Type":"application/json;charset=UTF-8",
"Authorization": 'Bearer ' + tokens['access_token']
}
)
if request.status_code == 200:
print ('Thermostat Fetch Sucessful, Continuing...')
thermostatjson = request.json()['thermostatList']
thermostats={}
for thermostat in thermostatjson:
thermostats.update({thermostat['name'] : thermostat['identifier']})
print('Here are your Thermostats: ' + json.dumps(thermostats,indent=4))
write_settings('thermostats',thermostats)
return thermostats
else:
print (request.status_code)
print (request.headers)
print (request.json)
|
# Copyright 2021 The NPLinker Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import json
import os
import pickle
#import sys
#sys.path.append('/home/grimur/git/lda')
#from lda.code.formula import Formula
from . formula import Formula
global _ALL_DAG_MASSES
global _ALL_TREE_MASSES
_ALL_DAG_MASSES = None
_ALL_TREE_MASSES = None
global datapath
datapath = ''
def load_formula_dag(filename):
formula = set([])
with open(filename, 'r') as f:
for line in f.readlines():
if not line.startswith('v'):
continue
fields = line.strip().split()
if fields[1] == '->':
# network edge
f = fields[3].split('="')[1][:-3]
else:
# network vertex
f = line.split('\\n')[0].split('"')[-1]
formula.add(f)
return list(formula)
def load_formula_tree(filename):
with open(filename, 'rb') as f:
data = json.load(f)
formula = [data['molecularFormula'], data['root']]
formula.extend([x['molecularFormula'] for x in data['fragments']])
formula.extend([x['source'] for x in data['losses']])
formula.extend([x['target'] for x in data['losses']])
formula.extend([x['molecularFormula'] for x in data['losses']])
return list(set(formula))
def load_peaks_from_tree(filename):
with open(filename, 'rb') as f:
data = json.load(f)
spectrum = []
for f in data['fragments']:
if len(f['peaks']) > 0:
for p in f['peaks']:
mass = p['mz']
intensity = p['int']
spectrum.append((mass, intensity))
else:
mass = f['mz']
intensity = f['intensity']
spectrum.append((mass, intensity))
return numpy.array(spectrum)
# These functions should be hanged onto the MS object
def filter_by_tree(self):
treepath = '/home/grimur/iokr/data/trees'
tol = 0.005
try:
formula = load_formula_tree(treepath + os.path.sep + self.id + '.json')
except:
formula = load_formula_dag(treepath + os.path.sep + self.id + '.dot')
formula_objects = [Formula(x) for x in formula]
formula_masses = sorted([x.compute_exact_mass() for x in formula_objects])
return filter_by_mass(self.shifted_spectrum, formula_masses, tol)
def filter_by_tree_unshifted(self):
treepath = '/home/grimur/iokr/data/trees'
try:
spectrum = load_peaks_from_tree(treepath + os.path.sep + self.id + '.json')
except FileNotFoundError:
spectrum = self.raw_spectrum
return spectrum
def filter_by_dag(self):
treepath = '/home/grimur/iokr/data/trees'
tol = 0.005
formula = load_formula_dag(treepath + os.sep + self.id + '.dot')
formula_objects = [Formula(x) for x in formula]
formula_masses = sorted([x.compute_exact_mass() for x in formula_objects])
return filter_by_mass(self.shifted_spectrum, formula_masses, tol)
def load_all_dag_masses(path):
formula_masses_collected = []
for filename in os.listdir(path):
if filename.endswith('.dot'):
formula = load_formula_dag(path + os.sep + filename)
formula_objects = [Formula(x) for x in formula]
formula_masses_collected.extend([x.compute_exact_mass() for x in formula_objects])
return sorted(list(set(formula_masses_collected)))
def filter_by_collected_dag(self):
treepath = '/home/grimur/iokr/data/trees'
tol = 0.005
global _ALL_DAG_MASSES
if _ALL_DAG_MASSES is None:
_ALL_DAG_MASSES = load_all_dag_masses(treepath)
return filter_by_mass(self.shifted_spectrum, _ALL_DAG_MASSES, tol)
def filter_by_frozen_dag(self):
with open(os.path.join(datapath, 'dag_masses.bin'), 'rb') as f:
dag_masses = pickle.load(f)
tol = 0.005
return filter_by_mass(self.shifted_spectrum, dag_masses, tol)
def load_all_tree_masses(path):
formula_masses_collected = []
for filename in os.listdir(path):
if filename.endswith('.json'):
formula = load_formula_tree(path + os.sep + filename)
formula_objects = [Formula(x) for x in formula]
formula_masses_collected.extend([x.compute_exact_mass() for x in formula_objects])
return sorted(list(set(formula_masses_collected)))
def filter_by_collected_tree(self):
treepath = '/home/grimur/iokr/data/trees'
tol = 0.005
global _ALL_TREE_MASSES
if _ALL_TREE_MASSES is None:
_ALL_TREE_MASSES = load_all_tree_masses(treepath)
return filter_by_mass(self.shifted_spectrum, _ALL_TREE_MASSES, tol)
def filter_by_mass(raw_spectrum, formula_masses, tol):
filtered_spectrum = []
spectrum_index = 0
mass_index = 0
while mass_index < len(formula_masses):
while spectrum_index < len(raw_spectrum) and raw_spectrum[spectrum_index, 0] < formula_masses[mass_index] + tol:
while spectrum_index < len(raw_spectrum) and raw_spectrum[spectrum_index, 0] < formula_masses[mass_index] - tol:
spectrum_index += 1
if spectrum_index < len(raw_spectrum) and raw_spectrum[spectrum_index, 0] < formula_masses[mass_index] + tol:
filtered_spectrum.append(raw_spectrum[spectrum_index, :])
spectrum_index += 1
mass_index += 1
return numpy.array(filtered_spectrum)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Hooks for Cloud Memorystore service
"""
from typing import Dict, Optional, Sequence, Tuple, Union
from google.api_core.exceptions import NotFound
from google.api_core.retry import Retry
from google.cloud.redis_v1 import CloudRedisClient
from google.cloud.redis_v1.gapic.enums import FailoverInstanceRequest
from google.cloud.redis_v1.types import FieldMask, InputConfig, Instance, OutputConfig
from google.protobuf.json_format import ParseDict
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudMemorystoreHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client: Optional[CloudRedisClient] = None
def get_conn(self):
"""
Retrieves client library object that allow access to Cloud Memorystore service.
"""
if not self._client:
self._client = CloudRedisClient(credentials=self._get_credentials())
return self._client
@staticmethod
def _append_label(instance: Instance, key: str, val: str) -> Instance:
"""
Append labels to provided Instance type
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:type instance: google.cloud.container_v1.types.Cluster
:param key: The key label
:type key: str
:param val:
:type val: str
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: Union[Dict, Instance],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:type instance_id: str
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:type instance: Union[Dict, google.cloud.redis_v1.types.Instance]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
parent = CloudRedisClient.location_path(project_id, location)
instance_name = CloudRedisClient.instance_path(project_id, location, instance_id)
try:
instance = client.get_instance(
name=instance_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
if isinstance(instance, dict):
instance = ParseDict(instance, Instance())
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
parent=parent,
instance_id=instance_id,
instance=instance,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance created.")
return client.get_instance(name=instance_name, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Deletes a specific Redis instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = CloudRedisClient.instance_path(project_id, location, instance)
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(name=name, retry=retry, timeout=timeout, metadata=metadata)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(name=name, retry=retry, timeout=timeout, metadata=metadata)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def export_instance(
self,
location: str,
instance: str,
output_config: Union[Dict, OutputConfig],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:type output_config: Union[Dict, google.cloud.redis_v1.types.OutputConfig]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = CloudRedisClient.instance_path(project_id, location, instance)
self.log.info("Exporting Instance: %s", name)
result = client.export_instance(
name=name, output_config=output_config, retry=retry, timeout=timeout, metadata=metadata
)
result.result()
self.log.info("Instance exported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def failover_instance(
self,
location: str,
instance: str,
data_protection_mode: FailoverInstanceRequest.DataProtectionMode,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Initiates a failover of the master node to current replica node for a specific STANDARD tier Cloud
Memorystore for Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param data_protection_mode: Optional. Available data protection modes that the user can choose. If
it's unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
:type data_protection_mode: google.cloud.redis_v1.gapic.enums.FailoverInstanceRequest
.DataProtectionMode
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = CloudRedisClient.instance_path(project_id, location, instance)
self.log.info("Failovering Instance: %s", name)
result = client.failover_instance(
name=name,
data_protection_mode=data_protection_mode,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance failovered: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Gets the details of a specific Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = CloudRedisClient.instance_path(project_id, location, instance)
result = client.get_instance(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(
self,
location: str,
instance: str,
input_config: Union[Dict, InputConfig],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation.
When complete, the instance will contain only data from the imported file.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance: The logical name of the Redis instance in the customer project.
:type instance: str
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:type input_config: Union[Dict, google.cloud.redis_v1.types.InputConfig]
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
name = CloudRedisClient.instance_path(project_id, location, instance)
self.log.info("Importing Instance: %s", name)
result = client.import_instance(
name=name, input_config=input_config, retry=retry, timeout=timeout, metadata=metadata
)
result.result()
self.log.info("Instance imported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
page_size: int,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Lists all Redis instances owned by a project in either the specified location (region) or all
locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:type location: str
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:type page_size: int
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
parent = CloudRedisClient.location_path(project_id, location)
result = client.list_instances(
parent=parent, page_size=page_size, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: Union[Dict, FieldMask],
instance: Union[Dict, Instance],
project_id: str,
location: Optional[str] = None,
instance_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Updates the metadata and configuration of a specific Redis instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.FieldMask`
:type update_mask: Union[Dict, google.cloud.redis_v1.types.FieldMask]
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:type instance: Union[Dict, google.cloud.redis_v1.types.Instance]
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:type location: str
:param instance_id: The logical name of the Redis instance in the customer project.
:type instance_id: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = ParseDict(instance, Instance())
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = CloudRedisClient.instance_path(project_id, location, instance_id)
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
update_mask=update_mask, instance=instance, retry=retry, timeout=timeout, metadata=metadata
)
result.result()
self.log.info("Instance updated: %s", instance.name)
|
import sys
from io import BytesIO
import telegram
from flask import Flask, request, send_file
from fsm import TocMachine
API_TOKEN = '480513885:AAEwQboNf9fJUuE13pBQLl4URAj5F3QCT00'
WEBHOOK_URL = 'https://21684a07.ngrok.io/hook'
app = Flask(__name__)
bot = telegram.Bot(token=API_TOKEN)
machine = TocMachine(
states=[
'initial',
'name',
'check',
'normal',
'hungry',
'hungry_option1',
'hungry_option2',
'sick',
'sick_option1',
'sick_option2',
'boring',
'boring_option1',
'boring_option2',
'boring_option3',
'angry',
'angry_option1',
'angry_option2',
'angry_option3',
'dead'
],
transitions=[
{
'trigger': 'advance',
'source': 'initial',
'dest': 'name',
'conditions': 'is_going_to_name'
},
{
'trigger': 'advance',
'source': 'dead',
'dest': 'initial',
'conditions': 'is_going_to_initial'
},
{
'trigger': 'advance',
'source': 'boring',
'dest': 'boring_option1',
'conditions': 'is_going_to_boring_option1'
},
{
'trigger': 'advance',
'source': 'boring',
'dest': 'boring_option2',
'conditions': 'is_going_to_boring_option2'
},
{
'trigger': 'advance',
'source': 'boring',
'dest': 'boring_option3',
'conditions': 'is_going_to_boring_option3'
},
{
'trigger': 'advance',
'source': 'angry',
'dest': 'angry_option1',
'conditions': 'is_going_to_angry_option1'
},
{
'trigger': 'advance',
'source': 'angry',
'dest': 'angry_option2',
'conditions': 'is_going_to_angry_option2'
},
{
'trigger': 'advance',
'source': 'angry',
'dest': 'angry_option3',
'conditions': 'is_going_to_angry_option3'
},
{
'trigger': 'advance',
'source': 'hungry',
'dest': 'hungry_option1',
'conditions': 'is_going_to_hungry_option1'
},
{
'trigger': 'advance',
'source': 'hungry',
'dest': 'hungry_option2',
'conditions': 'is_going_to_hungry_option2'
},
{
'trigger': 'advance',
'source': 'sick',
'dest': 'sick_option1',
'conditions': 'is_going_to_sick_option1'
},
{
'trigger': 'advance',
'source': 'sick',
'dest': 'sick_option2',
'conditions': 'is_going_to_sick_option2'
},
{
'trigger': 'advance',
'source' : ['hungry',
'angry',
'sick',
'boring'],
'dest': 'normal',
'conditions': 'do_nothing'
},
{
'trigger': 'advance',
'source' : ['hungry',
'hungry_option1',
'hungry_option2',
'sick',
'sick_option1',
'sick_option2',
'boring',
'boring_option1',
'boring_option2',
'boring_option3',
'angry',
'angry_option1',
'angry_option2',
'angry_option3',],
'dest': 'dead',
'conditions': 'kill_this'
},
{
'trigger': 'is_going_to_check',
'source': 'normal',
'dest': 'check',
},
{
'trigger': 'is_going_to_normal',
'source' : ['name',
'check',
'boring_option1',
'boring_option2',
'boring_option3',
'angry_option1',
'angry_option2',
'angry_option3',
'hungry_option1',
'hungry_option2',
'sick_option1',
'sick_option2'],
'dest': 'normal',
},
{
'trigger': 'is_going_to_hungry',
'source': 'normal',
'dest': 'hungry',
},
{
'trigger': 'is_going_to_sick',
'source' : ['normal',
'angry'],
'dest': 'sick',
},
{
'trigger': 'is_going_to_angry',
'source' : ['normal',
'boring'],
'dest': 'angry',
},
{
'trigger': 'is_going_to_boring',
'source': 'normal',
'dest': 'boring',
},
{
'trigger': 'is_going_to_dead',
'source' : ['hungry',
'sick'],
'dest': 'dead',
}
],
initial='initial',
auto_transitions=False,
show_conditions=True,
)
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
if __name__ == "__main__":
_set_webhook()
app.run()
|
# -*- coding: utf-8 -*-
import codecs
import datetime
import glob
import json
import os
import re
import shutil
import subprocess
import base64
CLIENT_VERSION = '1.0.1.1'
NAME = 'spoter.crewExtended'
ADD_LICENSE = True
class Build(object):
OUT_PATH = '.out'
PYC_PATH = os.path.join(OUT_PATH, 'res', 'scripts', 'client', 'gui', 'mods')
BUILD_PATH = os.path.join('source')
VERSION = None
RELEASE = '%s.wotmod' % NAME
DATE = datetime.datetime.now().strftime("%Y-%m-%d")
CONFIG_NAME = None
def __init__(self):
self.clear()
if not os.path.exists('release'): subprocess.check_call(['powershell', 'mkdir', 'release'])
self.readVersion()
self.createFileDict()
self.packWotmod()
self.clear()
print 'created: %s v%s (%s) to %s' % (self.RELEASE, self.VERSION["version"], self.DATE, CLIENT_VERSION)
def clear(self):
try:
shutil.rmtree(self.OUT_PATH, True)
except OSError:
pass
def readVersion(self):
filePath = os.path.join(self.BUILD_PATH, 'VERSION')
with codecs.open(filePath, 'r', encoding='utf-8') as versionFile:
data = versionFile.read().decode('utf-8')
versionFile.close()
self.VERSION = json.loads(data)
def createFileDict(self):
version = '{:.2f}'.format(float(self.VERSION["version"]))
files = []
if self.VERSION["source"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["source"]), 'self.version = ', "'v%s (%s)'" % (version, self.DATE)))
files.append((os.path.join(self.BUILD_PATH, self.VERSION["source"]), 'self.version_id = ', re.sub('[.\s]', '', '%s' % version)))
if self.VERSION["meta"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["meta"]), '<version>', '%s</version>' % version))
if self.VERSION["config"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["config"]), '"version": ', re.sub('[.\s]', '', '%s' % version)))
if self.VERSION["i18n"]:
for path in glob.glob(os.path.join(self.BUILD_PATH, self.VERSION["i18n"], "*.json")):
files.append((path, '"version": ', re.sub('[.\s]', '', '%s' % version)))
for path in files:
self.updateFiles(*path)
def updateFiles(self, path, string, text):
with open(path, 'a+') as xfile:
data = xfile.readlines()
newData = []
for line in data:
if 'self.ids = ' in line:
self.configName = re.split('self.ids = ', line)[1]
if string in line:
newData.append('%s%s%s\n' % (re.split(string, line)[0], string, text))
continue
newData.append(line)
xfile.close()
with open(path, 'w') as xfile:
xfile.writelines(newData)
xfile.close()
def packWotmod(self):
self.RELEASE = '%s_%s.wotmod' % (NAME, '{:.2f}'.format(float(self.VERSION["version"])))
subprocess.check_call(['powershell', 'mkdir', self.PYC_PATH])
py = '%s' % os.path.join(self.BUILD_PATH, self.VERSION["source"])
pyc = '%sc' % self.VERSION["source"]
ps = '%s\%s' % (os.path.realpath(self.OUT_PATH), 'create-7zip.ps1')
metaPath = '%s' % os.path.join(self.BUILD_PATH, os.path.dirname(self.VERSION["meta"]))
metaFile = os.path.basename(self.VERSION["meta"])
subprocess.check_call(['python', '-m', 'compileall', py])
subprocess.call('powershell robocopy %s %s %s /COPYALL /MOV' % (os.path.realpath(self.BUILD_PATH), os.path.realpath(self.PYC_PATH), pyc))
subprocess.call('powershell robocopy %s %s %s /COPYALL' % (os.path.realpath(metaPath), os.path.realpath(self.OUT_PATH), metaFile))
if self.VERSION["resources"]:
for directory in self.VERSION["resources"]:
if os.path.exists(os.path.join(self.BUILD_PATH, directory)):
subprocess.call('powershell robocopy %s %s /COPYALL /E' % (os.path.realpath(os.path.join(self.BUILD_PATH, directory)), os.path.realpath(os.path.join(self.OUT_PATH, 'res', directory))))
with open(ps, 'w') as xfile:
xfile.write('function create-7zip([String] $aDirectory, [String] $aZipfile){ [string]$pathToZipExe = "C:\Program Files\\7-zip\\7z.exe"; [Array]$arguments = "a", "-tzip", "-ssw", "-mx0", "$aZipfile", "$aDirectory"; & $pathToZipExe $arguments; }\n'
'create-7zip "%s" "%s"\n'
'create-7zip "%s" "%s"\n' % (os.path.realpath(os.path.join(self.OUT_PATH, 'res')), os.path.realpath(os.path.join('release', self.RELEASE)),
os.path.realpath(os.path.join(self.OUT_PATH, metaFile)), os.path.realpath(os.path.join('release', self.RELEASE))))
if ADD_LICENSE:
xfile.write('create-7zip "%s" "%s"\n' % (self.createLicense(), os.path.realpath(os.path.join('release', self.RELEASE))))
xfile.close()
subprocess.call('powershell -executionpolicy bypass -command "& {Set-ExecutionPolicy AllSigned; %s; Set-ExecutionPolicy Undefined}"' % ps)
def createLicense(self):
b64 = "DQogICAgICAgIERPIFdIQVQgVEhFIEZVQ0sgWU9VIFdBTlQgVE8gUFVCTElDIExJQ0VOU0UgDQogICAgICAgICAgICAgICAgICAgIFZlcnNpb24gMiwgRGVjZW1iZXIgMjAwNCANCg0KIENvcHlyaWdodCAoQykgMjAwNCBTYW0gSG9jZXZhciA8c2FtQGhvY2V2YXIubmV0PiANCg0KIEV2ZXJ5b25lIGlzIHBlcm1pdHRlZCB0byBjb3B5IGFuZCBkaXN0cmlidXRlIHZlcmJhdGltIG9yIG1vZGlmaWVkIA0KIGNvcGllcyBvZiB0aGlzIGxpY2Vuc2UgZG9jdW1lbnQsIGFuZCBjaGFuZ2luZyBpdCBpcyBhbGxvd2VkIGFzIGxvbmcgDQogYXMgdGhlIG5hbWUgaXMgY2hhbmdlZC4gDQoNCiAgICAgICAgICAgIERPIFdIQVQgVEhFIEZVQ0sgWU9VIFdBTlQgVE8gUFVCTElDIExJQ0VOU0UgDQogICBURVJNUyBBTkQgQ09ORElUSU9OUyBGT1IgQ09QWUlORywgRElTVFJJQlVUSU9OIEFORCBNT0RJRklDQVRJT04gDQoNCiAgMC4gWW91IGp1c3QgRE8gV0hBVCBUSEUgRlVDSyBZT1UgV0FOVCBUTy4NCg=="
output_name = os.path.realpath(os.path.join(self.OUT_PATH, 'LICENSE'))
data = base64.b64decode(b64)
with open(output_name, "wb") as output_file:
output_file.write(data)
output_file.close()
return output_name
build = Build()
|
# -----------------------------------------------------------------------------
# Functions for parsing args
# -----------------------------------------------------------------------------
import yaml
import os
from ast import literal_eval
import copy
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
super(CfgNode, self).__init__(init_dict)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def load_cfg_from_cfg_file(file):
cfg = {}
assert os.path.isfile(file) and file.endswith('.yaml'), \
'{} is not a yaml file'.format(file)
with open(file, 'r') as f:
cfg_from_file = yaml.safe_load(f)
for key in cfg_from_file:
for k, v in cfg_from_file[key].items():
cfg[k] = v
cfg = CfgNode(cfg)
return cfg
def merge_cfg_from_list(cfg, cfg_list):
new_cfg = copy.deepcopy(cfg)
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
subkey = full_key.split('.')[-1]
assert subkey in cfg, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, cfg[subkey], subkey, full_key
)
setattr(new_cfg, subkey, value)
return new_cfg
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
"""Checks that `replacement`, which is intended to replace `original` is of
the right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
original_type = type(original)
replacement_type = type(replacement)
# The types must match (with some exceptions)
if replacement_type == original_type:
return replacement
# Cast replacement from from_type to to_type if the replacement and original
# types match from_type and to_type
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
# Conditionally casts
# list <-> tuple
casts = [(tuple, list), (list, tuple)]
# For py2: allow converting from str (bytes) to a unicode string
try:
casts.append((str, unicode)) # noqa: F821
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
# def _assert_with_logging(cond, msg):
# if not cond:
# logger.debug(msg)
# assert cond, msg
|
# Create a list and save it to a variable
hobbies = ["Rock Climbing", "Bug Collecting", "Cooking", "Knitting", "Writing"]
print(hobbies)
# Select the first, second and fifth values from the list
print(hobbies[0])
print(hobbies[1])
print(hobbies[4])
# len() tells us how long the list is (5)
print(len(hobbies))
# Use index() to find the index of a specific value in a list
print(hobbies.index("Cooking"))
# Use append() to add values to the end of the list
hobbies.append("Gaming")
print(hobbies)
# Use remove() to remove values from the list
hobbies.remove("Bug Collecting")
print(hobbies)
|
# Generated by Django 3.2 on 2021-12-03 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tag', '0002_auto_20210922_1104'),
('task', '0002_task_annotated_slide'),
]
operations = [
migrations.AddField(
model_name='task',
name='pathology',
field=models.BooleanField(default=False, help_text='Is the task about pathology or not (general histology)'),
),
migrations.AddField(
model_name='task',
name='tags',
field=models.ManyToManyField(to='tag.Tag'),
),
]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import TestCase
import mock
from git import Repo
import tempfile
from pathlib import Path
from apimon import project as _project
class TestProject(TestCase):
def setUp(self):
super(TestProject, self).setUp()
self.project = _project.Project('fake_proj', 'fake_url', 'master',
'ansible', 'fake_loc', 'fake_cmd %s',
'wrk_dir')
def test_basic(self):
self.assertEqual('fake_proj', self.project.name)
self.assertEqual('fake_url', self.project.repo_url)
self.assertEqual('master', self.project.repo_ref)
self.assertEqual('ansible', self.project.type)
self.assertEqual('fake_loc', self.project.location)
self.assertEqual('fake_cmd %s', self.project.exec_cmd)
self.assertEqual('wrk_dir', self.project.work_dir)
def test_get_git_repo_present(self):
with tempfile.TemporaryDirectory() as tmp_dir:
repo_dir = Path(tmp_dir, 'fake_proj')
Repo.init(repo_dir)
prj = _project.Project('fake_proj', 'fake_url', 'master',
'ansible', 'fake_loc', 'fake_cmd %%s',
tmp_dir)
with mock.patch.object(prj, 'refresh_git_repo', return_value=True):
prj.get_git_repo()
@mock.patch.object(Repo, 'clone_from')
def test_get_git_repo_fresh(self, git_mock):
repo = mock.Mock(autospec=Repo)
git_mock.return_value = repo
self.project.get_git_repo()
git_mock.assert_called_with('fake_url',
Path('wrk_dir', 'fake_proj'),
recurse_submodules='.')
def test_refresh_git_repo(self):
with mock.patch.object(self.project, 'repo') as repo_mock:
self.assertIsNone(self.project.refresh_git_repo())
repo_mock.head.reset.assert_called_with(index=True,
working_tree=True)
repo_mock.remotes.origin.pull.assert_called_with(
'master',
recurse_submodules=True)
def test_refresh_git_repo_branch(self):
self.project.repo_ref = 'branch'
with mock.patch.object(self.project, 'repo') as repo_mock:
self.assertIsNone(self.project.refresh_git_repo())
repo_mock.remotes.origin.update.assert_called()
repo_mock.head.reset.assert_called_with(index=True,
working_tree=True)
repo_mock.remotes.origin.pull.assert_called_with(
'branch',
recurse_submodules=True)
def test_is_repo_update_necessary(self):
with mock.patch.object(self.project, 'repo') as repo_mock:
remote_ref = mock.Mock()
remote_ref.commit = 2
local_ref = mock.Mock()
local_ref.commit = 1
repo_mock.remotes.origin.refs = {'master': remote_ref}
repo_mock.head = local_ref
self.assertTrue(self.project.is_repo_update_necessary())
repo_mock.remotes.origin.update.assert_called()
repo_mock.head = remote_ref
self.assertFalse(self.project.is_repo_update_necessary())
repo_mock.remotes.origin.update.assert_called()
def test_ansible_galaxy_install(self):
with tempfile.TemporaryDirectory() as tmp_dir:
repo_dir = Path(tmp_dir, 'fake_proj')
Repo.init(repo_dir)
prj = _project.Project('fake_proj', 'fake_url', 'master',
'ansible', 'fake_loc', 'fake_cmd %%s',
tmp_dir)
requirements_file = Path(repo_dir, 'requirements.yml')
with mock.patch.object(prj, 'refresh_git_repo'), \
mock.patch('subprocess.Popen') as process_mock:
open(requirements_file, 'a').close()
prj._ansible_galaxy_install('role', requirements_file)
process_mock.assert_called_with(
'ansible-galaxy role install -r {file}'.format(
file=Path(requirements_file).resolve()).split(' '),
stdout=-1, stderr=-1
)
prj._ansible_galaxy_install('collection', requirements_file)
process_mock.assert_called_with(
'ansible-galaxy collection install -r {file}'.format(
file=Path(requirements_file).resolve()).split(' '),
stdout=-1, stderr=-1
)
def test_prepare_ansible(self):
with tempfile.TemporaryDirectory() as tmp_dir:
repo_dir = Path(tmp_dir, 'fake_proj')
Repo.init(repo_dir)
prj = _project.Project('fake_proj', 'fake_url', 'master',
'ansible', 'fake_loc', 'fake_cmd %%s',
tmp_dir)
requirements_file = Path(repo_dir, 'requirements.yml')
with mock.patch.object(prj, 'refresh_git_repo'), \
mock.patch.object(prj, '_ansible_galaxy_install') \
as install_mock:
prj.prepare()
install_mock.assert_not_called()
install_mock.return_value = 0
# Now write file and ensure galaxy invoked
open(requirements_file, 'a').close()
res = prj.prepare()
calls = [
mock.call('role', requirements_file),
mock.call('collection', requirements_file)
]
install_mock.assert_has_calls(calls)
self.assertEqual(res, 0)
install_mock.return_value = 1
# Now write file and ensure galaxy invoked
open(requirements_file, 'a').close()
res = prj.prepare()
calls = [
mock.call('role', requirements_file),
]
install_mock.assert_has_calls(calls)
self.assertEqual(res, 1)
def test_tasks_all(self):
with tempfile.TemporaryDirectory() as tmp_dir:
repo_dir = Path(tmp_dir, 'fake_proj')
repo_dir.mkdir()
prj = _project.Project('fake_proj', 'fake_url', 'master',
'ansible', 'fake_loc', 'fake_cmd %%s',
tmp_dir)
task_loc = Path(repo_dir, 'fake_loc')
task_loc.mkdir()
fake_tasks = ('scenario_task1.yaml', 'scenario_task2.yaml',
'scenario_task3.yaml')
for task in fake_tasks:
open(Path(task_loc, task).as_posix(), 'a').close()
found_tasks = set(prj.tasks())
self.assertEqual(found_tasks, set('fake_loc/' + v for v in
fake_tasks))
# def test_tasks_filtered(self):
# with tempfile.TemporaryDirectory() as tmp_dir:
#
# repo_dir = Path(tmp_dir, 'fake_proj')
# repo_dir.mkdir()
#
# scenarios = ('scenario_test1.tst', 'scenario_test2.tst')
#
# prj = _project.Project('fake_proj', 'fake_url', 'master',
# 'ansible', 'fake_loc', 'fake_cmd %%s',
# tmp_dir, scenarios=scenarios)
#
# task_loc = Path(repo_dir, 'fake_loc')
#
# task_loc.mkdir()
#
# fake_tasks = ('scenario_test1.tst', 'scenario_test2.tst',
# 'scenario3.yaml')
#
# for task in fake_tasks:
# open(Path(task_loc, task).as_posix(), 'a').close()
#
# found_tasks = set(prj.tasks())
# self.assertEqual(found_tasks, set('fake_loc/' + v for v in
# scenarios))
def test_get_exec_cmd(self):
self.assertEqual('fake_cmd test_item',
self.project.get_exec_cmd('test_item'))
def test_is_task_valid(self):
with tempfile.TemporaryDirectory() as tmp_dir:
repo_dir = Path(tmp_dir, 'fake_proj')
repo_dir.mkdir()
prj = _project.Project('fake_proj', 'fake_url', 'master',
'ansible', 'fake_loc', 'fake_cmd %%s',
tmp_dir)
with open(Path(repo_dir, 'test_item'), 'a'):
self.assertTrue(prj.is_task_valid('test_item'))
self.assertFalse(prj.is_task_valid('missing_item'))
|
import sys
sys.path.append("..")
from ..Modifiers import WithName,DecoratedBy,WithBody,Append,WithArg
from CodeGenerationCore import Command
from .utils import CommandOn
from ast import FunctionDef
@CommandOn(FunctionDef)
class CloneMethodCommand(Command,WithName.WithName,DecoratedBy.DecoratedBy,WithBody.WithBody,Append.Append,WithArg.WithArg):
pass
|
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.quality.ssi` module.
"""
from __future__ import division, unicode_literals
import unittest
from colour.quality import spectral_similarity_index
from colour.colorimetry import SDS_ILLUMINANTS, SpectralDistribution
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['TestSpectralSimilarityIndex']
DATA_HMI = {
300: 0.000000000000000,
301: 0.000000000000000,
302: 0.000000000000000,
303: 0.000000000000000,
304: 0.000000000000000,
305: 0.000000000000000,
306: 0.000000000000000,
307: 0.000000000000000,
308: 0.000000000000000,
309: 0.000000000000000,
310: 0.000000000000000,
311: 0.000000000000000,
312: 0.000000000000000,
313: 0.000000000000000,
314: 0.000000000000000,
315: 0.000000000000000,
316: 0.000000000000000,
317: 0.000000000000000,
318: 0.000000000000000,
319: 0.000000000000000,
320: 0.000000000000000,
321: 0.000000000000000,
322: 0.000000000000000,
323: 0.000000000000000,
324: 0.000000000000000,
325: 0.000000000000000,
326: 0.000000000000000,
327: 0.000000000000000,
328: 0.000000000000000,
329: 0.000000000000000,
330: 0.000000000000000,
331: 0.000000000000000,
332: 0.000000000000000,
333: 0.000000000000000,
334: 0.000000000000000,
335: 0.000000000000000,
336: 0.000000000000000,
337: 0.000000000000000,
338: 0.000000000000000,
339: 0.000000000000000,
340: 0.000000000000000,
341: 0.000000000000000,
342: 0.000000000000000,
343: 0.000000000000000,
344: 0.000000000000000,
345: 0.000000000000000,
346: 0.000000000000000,
347: 0.000000000000000,
348: 0.000000000000000,
349: 0.000000000000000,
350: 0.000000000000000,
351: 0.000000000000000,
352: 0.000000000000000,
353: 0.000000000000000,
354: 0.000000000000000,
355: 0.000000000000000,
356: 0.000000000000000,
357: 0.000000000000000,
358: 0.000000000000000,
359: 0.000000000000000,
360: 0.000000000000000,
361: 0.000000000000000,
362: 0.000000000000000,
363: 0.000000000000000,
364: 0.000000000000000,
365: 0.000000000000000,
366: 0.000000000000000,
367: 0.000000000000000,
368: 0.000000000000000,
369: 0.000000000000000,
370: 0.000000000000000,
371: 0.000000000000000,
372: 0.000000000000000,
373: 0.000000000000000,
374: 0.000000000000000,
375: 0.000000000000000,
376: 0.000000000000000,
377: 0.000000000000000,
378: 0.000000000000000,
379: 0.000000000000000,
380: 1.204633204633200,
381: 1.101029601029600,
382: 0.997425997425997,
383: 1.008365508365510,
384: 1.019305019305020,
385: 1.070141570141570,
386: 1.120978120978120,
387: 1.177606177606180,
388: 1.234234234234230,
389: 1.162162162162160,
390: 1.090090090090090,
391: 1.153796653796660,
392: 1.217503217503220,
393: 1.166023166023170,
394: 1.114543114543110,
395: 1.180823680823680,
396: 1.247104247104250,
397: 1.190476190476190,
398: 1.133848133848130,
399: 1.137065637065640,
400: 1.140283140283140,
401: 1.220077220077220,
402: 1.299871299871300,
403: 1.325611325611320,
404: 1.351351351351350,
405: 1.415701415701410,
406: 1.480051480051480,
407: 1.486486486486490,
408: 1.492921492921490,
409: 1.544401544401540,
410: 1.595881595881600,
411: 1.583011583011580,
412: 1.570141570141570,
413: 1.608751608751610,
414: 1.647361647361650,
415: 1.679536679536680,
416: 1.711711711711710,
417: 1.718146718146710,
418: 1.724581724581720,
419: 1.743886743886740,
420: 1.763191763191760,
421: 1.776061776061770,
422: 1.788931788931790,
423: 1.711711711711710,
424: 1.634491634491630,
425: 1.550836550836550,
426: 1.467181467181470,
427: 1.409266409266410,
428: 1.351351351351350,
429: 1.318532818532820,
430: 1.285714285714290,
431: 1.299227799227800,
432: 1.312741312741310,
433: 1.396396396396400,
434: 1.480051480051480,
435: 1.518661518661520,
436: 1.557271557271560,
437: 1.486486486486490,
438: 1.415701415701420,
439: 1.277348777348780,
440: 1.138996138996140,
441: 1.046975546975550,
442: 0.954954954954955,
443: 0.939510939510940,
444: 0.924066924066924,
445: 0.935006435006435,
446: 0.945945945945946,
447: 0.962676962676962,
448: 0.979407979407979,
449: 0.984555984555984,
450: 0.989703989703990,
451: 1.014800514800520,
452: 1.039897039897040,
453: 1.069498069498070,
454: 1.099099099099100,
455: 1.133848133848140,
456: 1.168597168597170,
457: 1.195624195624200,
458: 1.222651222651220,
459: 1.212998712998710,
460: 1.203346203346200,
461: 1.182110682110680,
462: 1.160875160875160,
463: 1.135135135135140,
464: 1.109395109395110,
465: 1.102960102960110,
466: 1.096525096525100,
467: 1.075289575289580,
468: 1.054054054054050,
469: 1.059202059202060,
470: 1.064350064350060,
471: 1.064993564993570,
472: 1.065637065637070,
473: 1.057915057915060,
474: 1.050193050193050,
475: 1.049549549549550,
476: 1.048906048906050,
477: 1.042471042471050,
478: 1.036036036036040,
479: 1.048262548262550,
480: 1.060489060489060,
481: 1.052767052767050,
482: 1.045045045045050,
483: 1.023166023166020,
484: 1.001287001287000,
485: 1.001287001287000,
486: 1.001287001287000,
487: 1.003217503217510,
488: 1.005148005148010,
489: 1.013513513513510,
490: 1.021879021879020,
491: 1.014800514800510,
492: 1.007722007722010,
493: 1.005791505791510,
494: 1.003861003861000,
495: 0.994208494208493,
496: 0.984555984555985,
497: 0.986486486486487,
498: 0.988416988416988,
499: 1.009009009009010,
500: 1.029601029601030,
501: 1.054697554697550,
502: 1.079794079794080,
503: 1.099099099099100,
504: 1.118404118404120,
505: 1.105534105534110,
506: 1.092664092664090,
507: 1.068854568854570,
508: 1.045045045045050,
509: 1.030244530244540,
510: 1.015444015444020,
511: 1.014157014157020,
512: 1.012870012870010,
513: 1.010939510939510,
514: 1.009009009009010,
515: 0.992277992277993,
516: 0.975546975546976,
517: 0.956241956241957,
518: 0.936936936936937,
519: 0.924710424710425,
520: 0.912483912483913,
521: 0.894465894465895,
522: 0.876447876447876,
523: 0.884813384813385,
524: 0.893178893178893,
525: 0.907979407979408,
526: 0.922779922779923,
527: 0.946589446589447,
528: 0.970398970398970,
529: 0.983268983268983,
530: 0.996138996138996,
531: 0.992921492921493,
532: 0.989703989703990,
533: 0.969111969111970,
534: 0.948519948519949,
535: 0.933719433719434,
536: 0.918918918918919,
537: 0.929214929214929,
538: 0.939510939510939,
539: 0.959459459459459,
540: 0.979407979407979,
541: 1.083011583011580,
542: 1.186615186615190,
543: 1.416988416988420,
544: 1.647361647361650,
545: 1.724581724581720,
546: 1.801801801801800,
547: 1.756756756756760,
548: 1.711711711711710,
549: 1.505791505791510,
550: 1.299871299871300,
551: 1.150579150579150,
552: 1.001287001287000,
553: 0.985199485199485,
554: 0.969111969111969,
555: 0.965250965250966,
556: 0.961389961389962,
557: 0.963320463320464,
558: 0.965250965250965,
559: 0.982625482625483,
560: 1.000000000000000,
561: 1.047619047619050,
562: 1.095238095238100,
563: 1.131274131274140,
564: 1.167310167310170,
565: 1.187902187902190,
566: 1.208494208494210,
567: 1.196267696267700,
568: 1.184041184041180,
569: 1.171814671814670,
570: 1.159588159588160,
571: 1.176962676962680,
572: 1.194337194337190,
573: 1.305019305019310,
574: 1.415701415701420,
575: 1.518661518661520,
576: 1.621621621621620,
577: 1.647361647361640,
578: 1.673101673101670,
579: 1.589446589446590,
580: 1.505791505791510,
581: 1.394465894465890,
582: 1.283140283140280,
583: 1.212355212355210,
584: 1.141570141570140,
585: 1.167953667953670,
586: 1.194337194337190,
587: 1.222651222651220,
588: 1.250965250965250,
589: 1.251608751608750,
590: 1.252252252252250,
591: 1.232303732303730,
592: 1.212355212355210,
593: 1.205920205920200,
594: 1.199485199485200,
595: 1.231016731016730,
596: 1.262548262548260,
597: 1.281209781209780,
598: 1.299871299871300,
599: 1.274774774774770,
600: 1.249678249678250,
601: 1.198198198198200,
602: 1.146718146718150,
603: 1.105534105534110,
604: 1.064350064350060,
605: 1.064993564993570,
606: 1.065637065637070,
607: 1.079150579150580,
608: 1.092664092664090,
609: 1.092020592020590,
610: 1.091377091377090,
611: 1.085585585585580,
612: 1.079794079794080,
613: 1.072072072072070,
614: 1.064350064350060,
615: 1.059202059202060,
616: 1.054054054054050,
617: 1.051480051480050,
618: 1.048906048906050,
619: 1.028314028314030,
620: 1.007722007722010,
621: 1.007078507078510,
622: 1.006435006435010,
623: 1.012226512226520,
624: 1.018018018018020,
625: 1.006435006435010,
626: 0.994851994851995,
627: 0.965250965250966,
628: 0.935649935649936,
629: 0.903474903474903,
630: 0.871299871299871,
631: 0.855212355212355,
632: 0.839124839124839,
633: 0.837194337194337,
634: 0.835263835263835,
635: 0.839768339768340,
636: 0.844272844272844,
637: 0.848777348777348,
638: 0.853281853281853,
639: 0.862290862290862,
640: 0.871299871299871,
641: 0.868725868725869,
642: 0.866151866151866,
643: 0.869369369369369,
644: 0.872586872586873,
645: 0.863577863577864,
646: 0.854568854568855,
647: 0.837194337194338,
648: 0.819819819819820,
649: 0.802445302445302,
650: 0.785070785070785,
651: 0.776061776061776,
652: 0.767052767052767,
653: 0.781209781209781,
654: 0.795366795366795,
655: 0.823680823680824,
656: 0.851994851994852,
657: 0.864864864864865,
658: 0.877734877734878,
659: 0.887387387387387,
660: 0.897039897039897,
661: 0.888674388674388,
662: 0.880308880308880,
663: 0.871299871299871,
664: 0.862290862290862,
665: 0.871943371943372,
666: 0.881595881595882,
667: 0.909266409266410,
668: 0.936936936936937,
669: 0.953667953667954,
670: 0.970398970398970,
671: 0.960746460746461,
672: 0.951093951093951,
673: 0.923423423423424,
674: 0.895752895752896,
675: 0.852638352638353,
676: 0.809523809523810,
677: 0.787644787644788,
678: 0.765765765765766,
679: 0.770270270270271,
680: 0.774774774774775,
681: 0.803088803088803,
682: 0.831402831402831,
683: 0.842985842985843,
684: 0.854568854568855,
685: 0.835263835263836,
686: 0.815958815958816,
687: 0.776061776061776,
688: 0.736164736164736,
689: 0.704633204633204,
690: 0.673101673101673,
691: 0.679536679536680,
692: 0.685971685971686,
693: 0.718790218790219,
694: 0.751608751608752,
695: 0.791505791505791,
696: 0.831402831402831,
697: 0.841698841698842,
698: 0.851994851994852,
699: 0.810810810810811,
700: 0.769626769626770,
701: 0.716216216216216,
702: 0.662805662805663,
703: 0.622265122265123,
704: 0.581724581724582,
705: 0.546975546975547,
706: 0.512226512226512,
707: 0.503217503217503,
708: 0.494208494208494,
709: 0.480051480051480,
710: 0.465894465894466,
711: 0.460746460746461,
712: 0.455598455598456,
713: 0.449806949806950,
714: 0.444015444015444,
715: 0.437580437580438,
716: 0.431145431145431,
717: 0.439510939510940,
718: 0.447876447876448,
719: 0.454954954954955,
720: 0.462033462033462,
721: 0.471685971685971,
722: 0.481338481338481,
723: 0.488416988416988,
724: 0.495495495495495,
725: 0.490347490347490,
726: 0.485199485199485,
727: 0.476190476190476,
728: 0.467181467181467,
729: 0.462676962676963,
730: 0.458172458172458,
731: 0.442084942084942,
732: 0.425997425997426,
733: 0.422136422136422,
734: 0.418275418275418,
735: 0.411196911196911,
736: 0.404118404118404,
737: 0.415701415701415,
738: 0.427284427284427,
739: 0.427927927927928,
740: 0.428571428571429,
741: 0.415701415701416,
742: 0.402831402831403,
743: 0.415057915057915,
744: 0.427284427284427,
745: 0.413770913770913,
746: 0.400257400257400,
747: 0.404761904761904,
748: 0.409266409266409,
749: 0.421492921492922,
750: 0.433719433719434,
751: 0.443371943371944,
752: 0.453024453024453,
753: 0.494208494208494,
754: 0.535392535392535,
755: 0.544401544401544,
756: 0.553410553410553,
757: 0.566924066924066,
758: 0.580437580437580,
759: 0.569498069498069,
760: 0.558558558558559,
761: 0.547619047619048,
762: 0.536679536679537,
763: 0.584298584298585,
764: 0.631917631917632,
765: 0.656370656370657,
766: 0.680823680823681,
767: 0.659588159588160,
768: 0.638352638352638,
769: 0.646718146718146,
770: 0.655083655083655,
771: 0.617760617760617,
772: 0.580437580437580,
773: 0.545045045045045,
774: 0.509652509652510,
775: 0.488416988416988,
776: 0.467181467181467,
777: 0.441441441441441,
778: 0.415701415701416,
779: 0.433075933075933,
780: 0.450450450450450,
781: 0.000000000000000,
782: 0.000000000000000,
783: 0.000000000000000,
784: 0.000000000000000,
785: 0.000000000000000,
786: 0.000000000000000,
787: 0.000000000000000,
788: 0.000000000000000,
789: 0.000000000000000,
790: 0.000000000000000,
791: 0.000000000000000,
792: 0.000000000000000,
793: 0.000000000000000,
794: 0.000000000000000,
795: 0.000000000000000,
796: 0.000000000000000,
797: 0.000000000000000,
798: 0.000000000000000,
799: 0.000000000000000,
800: 0.000000000000000,
801: 0.000000000000000,
802: 0.000000000000000,
803: 0.000000000000000,
804: 0.000000000000000,
805: 0.000000000000000,
806: 0.000000000000000,
807: 0.000000000000000,
808: 0.000000000000000,
809: 0.000000000000000,
810: 0.000000000000000,
811: 0.000000000000000,
812: 0.000000000000000,
813: 0.000000000000000,
814: 0.000000000000000,
815: 0.000000000000000,
816: 0.000000000000000,
817: 0.000000000000000,
818: 0.000000000000000,
819: 0.000000000000000,
820: 0.000000000000000,
821: 0.000000000000000,
822: 0.000000000000000,
823: 0.000000000000000,
824: 0.000000000000000,
825: 0.000000000000000,
826: 0.000000000000000,
827: 0.000000000000000,
828: 0.000000000000000,
829: 0.000000000000000,
830: 0.000000000000000
}
class TestSpectralSimilarityIndex(unittest.TestCase):
"""
Defines :func:`colour.quality.ssi.spectral_similarity_index`
definition unit tests methods.
"""
def test_spectral_similarity_index(self):
"""
Tests :func:`colour.quality.ssi.spectral_similarity_index` definition.
"""
self.assertEqual(
spectral_similarity_index(SDS_ILLUMINANTS['C'],
SDS_ILLUMINANTS['D65']), 94.0)
self.assertEqual(
spectral_similarity_index(
SpectralDistribution(DATA_HMI), SDS_ILLUMINANTS['D50']), 72.0)
if __name__ == '__main__':
unittest.main()
|
# @app.route('/volume', methods=['OPTIONS', 'POST'])
# # Split by year
# for year in range(y["startyear"], y["endyear"] + 1):
# pipeline = [
# {
# "$match": {
# "daypost": {"$gte": datetime(year), 1, 1), "$lte": datetime(year), 12, 31)},
# "location": {
# '$in': y['districts']
# }
# }
# },
# {
# "$group": {
# "_id": "$location",
# "count": {"$sum": 1}
# }
# }
# ]
# rent_result=list(
# mongo.db['tgrent'].aggregate(pipeline))
# sale_result=list(
# mongo.db['tgsale'].aggregate(pipeline))
# @app.route('/price', methods=['POST', 'OPTIONS'])
# for year in range(y["startyear"], y["endyear"] + 1):
# pipeline=[
# {
# "$match": {
# "listed_date": {"$gte": datetime(year, 1, 1), "$lte": datetime(year, 12, 31)}
# }
# },
# {
# '$group': {
# '_id': {'c': '$district_code', 'd': '$district'},
# 'count': {'$sum': 1},
# 'total': {'$sum': '$priceint'}
# }
# },
# {
# '$addFields': {
# 'avgRent': {'$divide': ['$total', '$count']}
# }
# }
# ]
# result[year] = []
# for dtr in list(mongo.db['thaihometown_rent'].aggregate(pipeline)):
# temp = [district_name[dtr['_id']['c'] - 1], dtr['avgRent']]
# result[year].append(temp)
|
#!/usr/bin/env python
# Copyright (C) 2012-2013, The CyanogenMod Project
# Copyright (C) 2012-2015, SlimRoms Project
# Copyright (C) 2016-2017, AOSiP
# Copyright (C) 2021 Stellar OS
# Copyright (C) 2021 Project Materium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import netrc
import os
import sys
from xml.etree import ElementTree
try:
# For python3
import urllib.error
import urllib.parse
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
import urlparse
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
DEBUG = True
custom_local_manifest = ".repo/local_manifests/blaze.xml"
custom_default_revision = "12.1"
custom_github_revision = "twelve"
custom_dependencies = "blaze.dependencies"
org_manifest = "ProjectBlaze-Devices" # leave empty if org is provided in manifest
org_display = "ProjectBlaze-Devices" # needed for displaying
github_auth = None
local_manifests = '.repo/local_manifests'
if not os.path.exists(local_manifests):
os.makedirs(local_manifests)
def debug(*args, **kwargs):
if DEBUG:
print(*args, **kwargs)
def add_auth(g_req):
global github_auth
if github_auth is None:
try:
auth = netrc.netrc().authenticators("api.github.com")
except (netrc.NetrcParseError, IOError):
auth = None
if auth:
github_auth = base64.b64encode(
('%s:%s' % (auth[0], auth[2])).encode()
)
else:
github_auth = ""
if github_auth:
g_req.add_header("Authorization", "Basic %s" % github_auth)
def indent(elem, level=0):
# in-place prettyprint formatter
i = "\n" + " " * level
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_manifest_path():
'''Find the current manifest path
In old versions of repo this is at .repo/manifest.xml
In new versions, .repo/manifest.xml includes an include
to some arbitrary file in .repo/manifests'''
m = ElementTree.parse(".repo/manifest.xml")
try:
m.findall('default')[0]
return '.repo/manifest.xml'
except IndexError:
return ".repo/manifests/{}".format(m.find("include").get("name"))
def load_manifest(manifest):
try:
man = ElementTree.parse(manifest).getroot()
except (IOError, ElementTree.ParseError):
man = ElementTree.Element("manifest")
return man
def get_default(manifest=None):
m = manifest or load_manifest(get_manifest_path())
d = m.findall('default')[0]
return d
def get_remote(manifest=None, remote_name=None):
m = manifest or load_manifest(get_manifest_path())
if not remote_name:
remote_name = get_default(manifest=m).get('remote')
remotes = m.findall('remote')
for remote in remotes:
if remote_name == remote.get('name'):
return remote
def get_revision(manifest=None, p="build"):
return custom_default_revision
m = manifest or load_manifest(get_manifest_path())
project = None
for proj in m.findall('project'):
if proj.get('path').strip('/') == p:
project = proj
break
revision = project.get('revision')
if revision:
return revision.replace('refs/heads/', '').replace('refs/tags/', '')
remote = get_remote(manifest=m, remote_name=project.get('remote'))
revision = remote.get('revision')
if not revision:
return custom_default_revision
return revision.replace('refs/heads/', '').replace('refs/tags/', '')
def get_from_manifest(device_name):
if os.path.exists(custom_local_manifest):
man = load_manifest(custom_local_manifest)
for local_path in man.findall("project"):
lp = local_path.get("path").strip('/')
if lp.startswith("device/") and lp.endswith("/" + device_name):
return lp
return None
def is_in_manifest(project_path):
for man in (custom_local_manifest, get_manifest_path()):
man = load_manifest(man)
for local_path in man.findall("project"):
if local_path.get("path") == project_path:
return True
return False
def add_to_manifest(repos, fallback_branch=None):
lm = load_manifest(custom_local_manifest)
for repo in repos:
repo_name = repo['repository']
repo_path = repo['target_path']
if 'remote' in repo:
repo_remote=repo['remote']
repo_branch=custom_default_revision
elif "/" not in repo_name:
repo_remote=org_manifest
repo_branch=custom_default_revision
elif "/" in repo_name:
repo_remote="github"
repo_branch=custom_github_revision
if 'branch' in repo:
repo_branch=repo['branch']
if is_in_manifest(repo_path):
print('already exists: %s' % repo_path)
continue
print('Adding dependency:\nRepository: %s\nBranch: %s\nRemote: %s\nPath: %s\n' % (repo_name, repo_branch, repo_remote, repo_path))
project = ElementTree.Element(
"project",
attrib={"path": repo_path,
"remote": repo_remote,
"name": "%s" % repo_name}
)
clone_depth = os.getenv('ROOMSERVICE_CLONE_DEPTH')
if clone_depth:
project.set('clone-depth', clone_depth)
if repo_branch is not None:
project.set('revision', repo_branch)
elif fallback_branch:
print("Using branch %s for %s" %
(fallback_branch, repo_name))
project.set('revision', fallback_branch)
else:
print("Using default branch for %s" % repo_name)
if 'clone-depth' in repo:
print("Setting clone-depth to %s for %s" % (repo['clone-depth'], repo_name))
project.set('clone-depth', repo['clone-depth'])
lm.append(project)
indent(lm)
raw_xml = "\n".join(('<?xml version="1.0" encoding="UTF-8"?>',
ElementTree.tostring(lm).decode()))
f = open(custom_local_manifest, 'w')
f.write(raw_xml)
f.close()
_fetch_dep_cache = []
def fetch_dependencies(repo_path, fallback_branch=None):
global _fetch_dep_cache
if repo_path in _fetch_dep_cache:
return
_fetch_dep_cache.append(repo_path)
print('Looking for dependencies')
dep_p = '/'.join((repo_path, custom_dependencies))
if os.path.exists(dep_p):
with open(dep_p) as dep_f:
dependencies = json.load(dep_f)
else:
dependencies = {}
print('%s has no additional dependencies.' % repo_path)
fetch_list = []
syncable_repos = []
for dependency in dependencies:
if not is_in_manifest(dependency['target_path']):
fetch_list.append(dependency)
syncable_repos.append(dependency['target_path'])
else:
print("Dependency already present in manifest: %s => %s" % (dependency['repository'], dependency['target_path']))
if fetch_list:
print('Adding dependencies to manifest\n')
add_to_manifest(fetch_list, fallback_branch)
if syncable_repos:
print('Syncing dependencies')
os.system('repo sync --force-sync --no-tags --current-branch --no-clone-bundle %s' % ' '.join(syncable_repos))
for deprepo in syncable_repos:
fetch_dependencies(deprepo)
def has_branch(branches, revision):
return revision in (branch['name'] for branch in branches)
def detect_revision(repo):
"""
returns None if using the default revision, else return
the branch name if using a different revision
"""
print("Checking branch info")
githubreq = urllib.request.Request(
repo['branches_url'].replace('{/branch}', ''))
add_auth(githubreq)
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
calc_revision = get_revision()
print("Calculated revision: %s" % calc_revision)
if has_branch(result, calc_revision):
return calc_revision
fallbacks = os.getenv('ROOMSERVICE_BRANCHES', '').split()
for fallback in fallbacks:
if has_branch(result, fallback):
print("Using fallback branch: %s" % fallback)
return fallback
if has_branch(result, custom_default_revision):
print("Falling back to custom revision: %s"
% custom_default_revision)
return custom_default_revision
print("Branches found:")
for branch in result:
print(branch['name'])
print("Use the ROOMSERVICE_BRANCHES environment variable to "
"specify a list of fallback branches.")
sys.exit()
def main():
global DEBUG
try:
depsonly = bool(sys.argv[2] in ['true', 1])
except IndexError:
depsonly = False
if os.getenv('ROOMSERVICE_DEBUG'):
DEBUG = True
product = sys.argv[1]
device = product[product.find("_") + 1:] or product
if depsonly:
repo_path = get_from_manifest(device)
if repo_path:
fetch_dependencies(repo_path)
else:
print("Trying dependencies-only mode on a "
"non-existing device tree?")
sys.exit()
print("Device {0} not found. Attempting to retrieve device repository from "
"{1} Github (http://github.com/{1}).".format(device, org_display))
githubreq = urllib.request.Request(
"https://api.github.com/search/repositories?"
"q={0}+user:{1}+in:name+fork:true".format(device, org_display))
add_auth(githubreq)
repositories = []
try:
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
except urllib.error.URLError:
print("Failed to search GitHub")
sys.exit()
except ValueError:
print("Failed to parse return data from GitHub")
sys.exit()
for res in result.get('items', []):
repositories.append(res)
for repository in repositories:
repo_name = repository['name']
print (repository['name'])
if not ((repo_name.startswith("device_") or repo_name.startswith("android_device_")) and
repo_name.endswith("_" + device)):
continue
print("Found repository: %s" % repository['name'])
fallback_branch = detect_revision(repository)
manufacturer = repo_name[(15 if repo_name.startswith("android_device_") else 7):-(len(device)+1)]
repo_path = "device/%s/%s" % (manufacturer, device)
adding = [{'repository': repo_name, 'target_path': repo_path}]
add_to_manifest(adding, fallback_branch)
print("Syncing repository to retrieve project.")
os.system('repo sync --force-sync --no-tags --current-branch --no-clone-bundle %s' % repo_path)
print("Repository synced!")
fetch_dependencies(repo_path, fallback_branch)
print("Done")
sys.exit()
print("Repository for %s not found in the %s Github repository list."
% (device, org_display))
print("If this is in error, you may need to manually add it to your "
"%s" % custom_local_manifest)
if __name__ == "__main__":
main()
|
"""
字节串使用示例
所有字符串都能转化为字节串,但不是所有字节串都能转化为字符串。
"""
# 定义一个字节串变量
b = b"hello world" # 用于ASCii
print(type(b))
# 定义一个非ASCII字节串变量
b1 = "你好".encode()
print(b1)
x1 = b'\xe4\xbd\xa0\xe5\xa5\xbd'.decode() # 等同于 x1 = b1.decode()
print(x1)
file = open("file.txt")
file_object = file.read()
file_list = file_object.split("\n")
print(file_list)
number = len(file_list)
|
import os
import glob
import shutil
from azureml.core import Workspace, Experiment
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core import ScriptRunConfig
from azureml.core.runconfig import DEFAULT_GPU_IMAGE
from azureml.data.data_reference import DataReference
from azureml.core import Dataset
ws = Workspace.create(
"yolo3_ws",
subscription_id="a6c2a7cc-d67e-4a1a-b765-983f08c0423a",
resource_group="doorfinder_rg",
location="westus2",
exist_ok=True,
)
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep="\n")
PROJECT_FOLDER = "./aml/staging"
if os.path.exists(PROJECT_FOLDER):
shutil.rmtree(PROJECT_FOLDER)
os.makedirs(PROJECT_FOLDER, exist_ok=True)
files = glob.glob("*.py")
for f in files:
shutil.copy(f, PROJECT_FOLDER)
files = glob.glob("*.cfg")
for f in files:
shutil.copy(f, PROJECT_FOLDER)
files = glob.glob("*.txt")
for f in files:
shutil.copy(f, PROJECT_FOLDER)
shutil.copytree("model_data", os.path.join(PROJECT_FOLDER, 'model_data'))
shutil.copytree("yolo3", os.path.join(PROJECT_FOLDER, 'yolo3'))
cd = CondaDependencies.create(pip_packages=['keras==2.1.5', 'tensorflow==1.6.0', 'pillow', 'matplotlib', 'h5py', 'tensorboard'], conda_packages=['python=3.6.11'])
myenv = Environment("yolov3")
myenv.python.conda_dependencies = cd
myenv.python.conda_dependencies.add_pip_package("azureml-sdk")
myenv.python.conda_dependencies.add_channel("conda-forge")
myenv.docker.enabled = True
myenv.docker.base_image = DEFAULT_GPU_IMAGE
# Choose a name for your CPU cluster
CLUSTER_NAME = "gpu-cluster"
# Verify that cluster does not exist already
try:
aml_cluster = AmlCompute(workspace=ws, name=CLUSTER_NAME)
print("Found existing cluster, use it.")
except ComputeTargetException:
print("provisioning new compute target")
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_NC6", max_nodes=8, vm_priority="lowpriority"
)
aml_cluster = ComputeTarget.create(ws, CLUSTER_NAME, compute_config)
aml_cluster.wait_for_completion(show_output=True)
def_blob_store = ws.get_default_datastore()
# def_blob_store.upload("VOCdevkit", target_path="/data/VOCdevkit")
dataset = Dataset.File.from_files(path=(def_blob_store, '/data/VOCdevkit'))
src = ScriptRunConfig(
source_directory=PROJECT_FOLDER,
script='train.py',
arguments=["--data", dataset.as_named_input('input').as_mount()],
compute_target=aml_cluster,
environment=myenv
)
EXPERIMENT_NAME = "keras-yolo3"
experiment = Experiment(workspace=ws, name=EXPERIMENT_NAME)
run = experiment.submit(config=src)
|
import pandas
import webbrowser
import os
# Read the dataset into a data table using Pandas
data_table = pandas.read_csv("movies.csv", index_col="movie_id")
# Create a web page view of the data for easy viewing
html = data_table.to_html()
# Save the html to a temporary file
with open("movie_list.html", "w") as f:
f.write(html)
# Open the web page in our web browser
full_filename = os.path.abspath("movie_list.html")
webbrowser.open("file://{}".format(full_filename))
|
import math
import time
from grpc import Call
import torch
from colossalai.utils import MultiTimer
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode, Config
from typing import List, Dict, Tuple, Callable
def get_time_stamp() -> int:
"""
Return the time stamp for profiling.
Returns:
time_stamp (int): the time given by time.time()
"""
torch.cuda.synchronize()
time_stamp = time.time()
return time_stamp
def get_memory_states() -> Tuple[float]:
"""
Return the memory statistics.
Returns:
max_allocated (float): the allocated CUDA memory
max_cached (float): the cached CUDA memory
"""
max_allocated = torch.cuda.max_memory_allocated() / (1024**3)
max_cached = torch.cuda.max_memory_reserved() / (1024**3)
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
return max_allocated, max_cached
def find_all_configs(device_cnt: int) -> List[Dict]:
"""
Find all possible configurations for tensor parallelism
Args:
device_cnt (int): the number of devices
Returns:
config_list (List[Dict]): a list of configurations
"""
def _is_square(num):
return math.floor(math.sqrt(num))**2 == num
def _is_cube(num):
return math.floor(num**(1. / 3.))**3 == num
config_list = []
# add non-parallel config
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode=None)))
config_list.append(config)
# add 1D config
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='1d')))
config_list.append(config)
# add 1D config only if device_cnt is a square
if _is_square(device_cnt):
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='2d')))
config_list.append(config)
# check for 2.5D
# iterate over depth
for depth in range(1, device_cnt):
if device_cnt % depth == 0 and _is_square(device_cnt // depth):
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='2.5d', depth=depth)))
config_list.append(config)
# check for 3D if device_cnt is a cube
if _is_cube(device_cnt):
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='3d')))
config_list.append(config)
config_list = [Config(cfg) for cfg in config_list]
return config_list
def profile_model(model: torch.nn.Module, warmup_steps: int, profile_steps: int, data_func: Callable,
timer: MultiTimer) -> Tuple[float]:
"""
Profile the forward and backward of a model
Args:
model (torch.nn.Module): a PyTorch model
warmup_steps (int): the number of steps for warmup
profile_steps (int): the number of steps for profiling
data_func (Callable): a function to generate random data
timer (colossalai.utils.Multitimer): a timer instance for time recording
Returns:
fwd_time (float): the average forward time taken by forward pass in second
bwd_time (float): the average backward time taken by forward pass in second
max_allocated (float): the maximum GPU memory allocated in GB
max_cached (float): the maximum GPU memory cached in GB
"""
def _run_step(data):
timer.start('forward')
out = model(data)
timer.stop('forward', keep_in_history=True)
timer.start('backward')
out.mean().backward()
timer.stop('backward', keep_in_history=True)
data_list = [data_func() for _ in range(warmup_steps)]
for data in data_list:
_run_step(data)
timer.reset('forward')
timer.reset('backward')
for _ in range(profile_steps):
data = data_func()
_run_step(data)
max_allocated, max_cached = get_memory_states()
fwd_time = timer.get_timer('forward').get_history_mean()
bwd_time = timer.get_timer('backward').get_history_mean()
return fwd_time, bwd_time, max_allocated, max_cached
def get_batch_data(dim: int, batch_size: int, seq_length: int, mode: ParallelMode) -> torch.Tensor:
"""
Return a random data of shape (batch_size, seq_length, dim) for profiling.
Args:
dim (int): hidden size
batch_size (int): the number of data samples
seq_length (int): the number of tokens
mode (ParallelMode): Colossal-AI ParallelMode enum
Returns:
data (torch.Tensor): random data
"""
if mode in ['2d', '2.5d']:
batch_size = batch_size // 2
dim = dim // 2
elif mode == '3d':
batch_size = batch_size // 4
dim = dim // 2
data = torch.rand(batch_size, seq_length, dim).cuda()
return data
|
r"""
Number fields
"""
#*****************************************************************************
# Copyright (C) 2005 David Kohel <kohel@maths.usyd.edu>
# William Stein <wstein@math.ucsd.edu>
# 2008 Teresa Gomez-Diaz (CNRS) <Teresa.Gomez-Diaz@univ-mlv.fr>
# 2008-2009 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category_singleton import Category_singleton
from sage.categories.basic import Fields
class NumberFields(Category_singleton):
r"""
The category of number fields.
EXAMPLES:
We create the category of number fields::
sage: C = NumberFields()
sage: C
Category of number fields
By definition, it is infinite::
sage: NumberFields().Infinite() is NumberFields()
True
Notice that the rational numbers `\QQ` *are* considered as
an object in this category::
sage: RationalField() in C
True
However, we can define a degree 1 extension of `\QQ`, which is of
course also in this category::
sage: x = PolynomialRing(RationalField(), 'x').gen()
sage: K = NumberField(x - 1, 'a'); K
Number Field in a with defining polynomial x - 1
sage: K in C
True
Number fields all lie in this category, regardless of the name
of the variable::
sage: K = NumberField(x^2 + 1, 'a')
sage: K in C
True
TESTS::
sage: TestSuite(NumberFields()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: NumberFields().super_categories()
[Category of infinite fields]
"""
return [Fields().Infinite()]
def __contains__(self, x):
r"""
Returns True if ``x`` is a number field.
EXAMPLES::
sage: NumberField(x^2+1,'a') in NumberFields()
True
sage: QuadraticField(-97,'theta') in NumberFields()
True
sage: CyclotomicField(97) in NumberFields()
True
Note that the rational numbers QQ are a number field::
sage: QQ in NumberFields()
True
sage: ZZ in NumberFields()
False
"""
import sage.rings.number_field.number_field_base
return sage.rings.number_field.number_field_base.is_NumberField(x)
def _call_(self, x):
r"""
Constructs an object in this category from the data in ``x``,
or throws a TypeError.
EXAMPLES::
sage: C = NumberFields()
sage: C(QQ)
Rational Field
sage: C(NumberField(x^2+1,'a'))
Number Field in a with defining polynomial x^2 + 1
sage: C(UnitGroup(NumberField(x^2+1,'a'))) # indirect doctest
Number Field in a with defining polynomial x^2 + 1
sage: C(ZZ)
Traceback (most recent call last):
...
TypeError: unable to canonically associate a number field to Integer Ring
"""
try:
return x.number_field()
except AttributeError:
raise TypeError("unable to canonically associate a number field to %s"%x)
class ParentMethods:
pass
class ElementMethods:
pass
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional, Sequence, Tuple, Union, List
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util
import oneflow.core.record.image_pb2 as image_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate
import traceback
@oneflow_export("data.ImagePreprocessor")
class ImagePreprocessor(object):
def __init__(self, preprocessor: str) -> None:
assert isinstance(preprocessor, str)
if preprocessor.lower() != "bgr2rgb" and preprocessor.lower() != "mirror":
raise ValueError('preprocessor must be "bgr2rgb" or "mirror".')
self.preprocessor = preprocessor
def to_proto(
self, proto: Optional[image_util.ImagePreprocess] = None
) -> image_util.ImagePreprocess:
if proto is None:
proto = image_util.ImagePreprocess()
if self.preprocessor == "bgr2rgb":
proto.bgr2rgb.SetInParent()
elif self.preprocessor == "mirror":
proto.mirror.SetInParent()
else:
raise NotImplementedError
return proto
@oneflow_export("data.ImageResizePreprocessor")
class ImageResizePreprocessor(object):
def __init__(self, width: int, height: int) -> None:
assert isinstance(width, int)
assert isinstance(height, int)
self.width = width
self.height = height
def to_proto(
self, proto: Optional[image_util.ImagePreprocess] = None
) -> image_util.ImagePreprocess:
proto = proto or image_util.ImagePreprocess()
setattr(proto.resize, "width", self.width)
setattr(proto.resize, "height", self.height)
return proto
@oneflow_export("data.ImageCodec")
class ImageCodec(object):
def __init__(
self,
image_preprocessors: Optional[
Union[List[ImageResizePreprocessor], Tuple[ImageResizePreprocessor]]
] = None,
) -> None:
if isinstance(image_preprocessors, (list, tuple)):
self.image_preprocessors = list(image_preprocessors)
else:
self.image_preprocessors = []
def to_proto(
self, proto: Optional[op_conf_util.EncodeConf] = None
) -> op_conf_util.EncodeConf:
if proto is None:
proto = op_conf_util.EncodeConf()
proto.jpeg.preprocess.extend([p.to_proto() for p in self.image_preprocessors])
return proto
@oneflow_export("data.RawCodec")
class RawCodec(object):
def __init__(self, auto_zero_padding: bool = False) -> None:
self.auto_zero_padding = auto_zero_padding
def to_proto(
self, proto: Optional[op_conf_util.EncodeConf] = None
) -> op_conf_util.EncodeConf:
if proto is None:
proto = op_conf_util.EncodeConf()
proto.raw.dim1_varying_length = False
proto.raw.auto_zero_padding = self.auto_zero_padding
return proto
@oneflow_export("data.BytesListCodec")
class BytesListCodec(object):
def __init__(self) -> None:
pass
def to_proto(
self, proto: Optional[op_conf_util.EncodeConf] = None
) -> op_conf_util.EncodeConf:
if proto is None:
proto = op_conf_util.EncodeConf()
proto.bytes_list.SetInParent()
return proto
@oneflow_export("data.NormByChannelPreprocessor")
class NormByChannelPreprocessor(object):
def __init__(
self,
mean_values: Union[List[float], Tuple[float]],
std_values: Union[List[float], Tuple[float]] = (1.0, 1.0, 1.0),
data_format: str = "channels_last",
) -> None:
assert isinstance(mean_values, (list, tuple))
assert isinstance(std_values, (list, tuple))
assert isinstance(data_format, str)
self.mean_values = mean_values
self.std_values = std_values
self.data_format = data_format
def to_proto(
self, proto: Optional[op_conf_util.PreprocessConf] = None
) -> op_conf_util.PreprocessConf:
if proto is None:
proto = op_conf_util.PreprocessConf()
proto.norm_by_channel_conf.mean_value.extend(self.mean_values)
proto.norm_by_channel_conf.std_value.extend(self.std_values)
proto.norm_by_channel_conf.data_format = self.data_format
return proto
@oneflow_export("data.BlobConf")
class BlobConf(object):
def __init__(
self,
name: str,
shape: Sequence[int],
dtype: dtype_util.dtype,
codec: Union[ImageCodec, RawCodec],
preprocessors: Optional[
Sequence[
Union[
ImagePreprocessor,
ImageResizePreprocessor,
NormByChannelPreprocessor,
]
]
] = None,
) -> None:
assert isinstance(name, str)
assert isinstance(shape, (list, tuple))
self.name = name
self.shape = shape
self.dtype = dtype
self.codec = codec
if isinstance(preprocessors, (list, tuple)):
self.preprocessors = list(preprocessors)
else:
self.preprocessors = []
def to_proto(self) -> op_conf_util.BlobConf:
blob_conf = op_conf_util.BlobConf()
blob_conf.name = self.name
blob_conf.shape.dim.extend(self.shape)
blob_conf.data_type = self.dtype.oneflow_proto_dtype
self.codec.to_proto(blob_conf.encode_case)
blob_conf.preprocess.extend([p.to_proto() for p in self.preprocessors])
return blob_conf
@oneflow_export("data.decode_ofrecord")
@oneflow_deprecate()
def decode_ofrecord(
ofrecord_dir: str,
blobs: Sequence[BlobConf],
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
shuffle: bool = False,
buffer_size: int = 1024,
name: str = None,
) -> Tuple[remote_blob_util.BlobDef]:
print(
"WARNING:",
"oneflow.data.decode_ofrecord is deprecated, and NOT work in eager mode, please use: \n",
" 1) ofrecord = oneflow.data.ofrecord_reader(...) to read ofrecord; \n",
" 2) image = oneflow.data.ofrecord_image_decoder(...) to decode image; \n",
" 3) raw = oneflow.data.ofrecord_raw_decoder(...) to decode raw data like label; \n",
traceback.format_stack()[-2],
)
assert not flow.eager_execution_enabled()
if name is None:
name = id_util.UniqueStr("Decode_")
lbis = []
op_conf = op_conf_util.OperatorConf()
op_conf.name = name
op_conf.decode_ofrecord_conf.data_dir = ofrecord_dir
op_conf.decode_ofrecord_conf.data_part_num = data_part_num
op_conf.decode_ofrecord_conf.batch_size = batch_size
op_conf.decode_ofrecord_conf.part_name_prefix = part_name_prefix
op_conf.decode_ofrecord_conf.part_name_suffix_length = part_name_suffix_length
if shuffle == True:
op_conf.decode_ofrecord_conf.random_shuffle_conf.buffer_size = buffer_size
for blob_conf in blobs:
op_conf.decode_ofrecord_conf.blob.extend([blob_conf.to_proto()])
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = name
lbi.blob_name = blob_conf.name
lbis.append(lbi)
interpret_util.ConsistentForward(op_conf)
return tuple(map(lambda x: remote_blob_util.RemoteBlob(x), lbis))
@oneflow_export("data.ofrecord_loader")
def ofrecord_loader(
ofrecord_dir: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
shuffle: bool = False,
shuffle_buffer_size: int = 1024,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if name is None:
name = id_util.UniqueStr("OFRecord_Loader_")
op_conf = op_conf_util.OperatorConf()
op_conf.name = name
op_conf.record_load_conf.out = "out"
op_conf.record_load_conf.data_dir = ofrecord_dir
op_conf.record_load_conf.data_part_num = data_part_num
op_conf.record_load_conf.batch_size = batch_size
op_conf.record_load_conf.part_name_prefix = part_name_prefix
if part_name_suffix_length != -1:
op_conf.record_load_conf.part_name_suffix_length = part_name_suffix_length
if shuffle:
op_conf.record_load_conf.random_shuffle_conf.buffer_size = shuffle_buffer_size
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = name
lbi.blob_name = "out"
interpret_util.ConsistentForward(op_conf)
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export("data.ofrecord_reader")
def ofrecord_reader(
ofrecord_dir: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
random_shuffle: bool = False,
shuffle_buffer_size: int = 1024,
shuffle_after_epoch: bool = False,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
r"""Get ofrecord object from ofrecord dataset.
Args:
ofrecord_dir (str): Path to ofrecord dataset.
batch_size (int, optional): Batch size. Defaults to 1.
data_part_num (int, optional): Number of dataset's partitions. Defaults to 1.
part_name_prefix (str, optional): Prefix of dataset's parition file. Defaults to "part-".
part_name_suffix_length (int, optional): Total length of padded suffix number , -1 means no padding. eg: 3 for `part-001`. Defaults to -1.
random_shuffle (bool, optional): Determines records shuffled or not. Defaults to False.
shuffle_buffer_size (int, optional): Shuffle buffer size. Defaults to 1024.
shuffle_after_epoch (bool, optional): Shuffled or not after each epoch. Defaults to False.
name (Optional[str], optional): Optional name. Defaults to None.
Returns:
remote_blob_util.BlobDef: The result Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
with flow.scope.placement("cpu", "0:0"):
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./dataset/",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
# image shape is (28*28, )
image = flow.data.OFRecordRawDecoder(
ofrecord, "images", shape=(784, ), dtype=flow.int32
)
# label shape is (1, )
label = flow.data.OFRecordRawDecoder(
ofrecord, "labels", shape=(1, ), dtype=flow.int32
)
return image, label
if __name__ == "__main__":
images, labels = ofrecord_reader_job()
print("In per batch, images shape is", images.shape)
print("In per batch, labels shape is", labels.shape)
# In per batch, images shape is (16, 784)
# In per batch, labels shape is (16, 1)
"""
if name is None:
name = id_util.UniqueStr("OFRecord_Reader_")
return (
flow.user_op_builder(name)
.Op("OFRecordReader")
.Output("out")
.Attr("data_dir", ofrecord_dir)
.Attr("data_part_num", data_part_num)
.Attr("batch_size", batch_size)
.Attr("part_name_prefix", part_name_prefix)
.Attr("random_shuffle", random_shuffle)
.Attr("shuffle_buffer_size", shuffle_buffer_size)
.Attr("shuffle_after_epoch", shuffle_after_epoch)
.Attr("part_name_suffix_length", part_name_suffix_length)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("data.decode_random")
def decode_random(
shape: Sequence[int],
dtype: dtype_util.dtype,
batch_size: int = 1,
initializer: Optional[initializer_conf_util.InitializerConf] = None,
tick: Optional[remote_blob_util.BlobDef] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
op_conf = op_conf_util.OperatorConf()
if name is None:
name = id_util.UniqueStr("DecodeRandom_")
assert isinstance(name, str)
op_conf.name = name
assert isinstance(shape, (list, tuple))
op_conf.decode_random_conf.shape.dim.extend(shape)
assert dtype is not None
setattr(op_conf.decode_random_conf, "data_type", dtype.oneflow_proto_dtype)
op_conf.decode_random_conf.batch_size = batch_size
if initializer is not None:
op_conf.decode_random_conf.data_initializer.CopyFrom(initializer)
else:
op_conf.decode_random_conf.data_initializer.CopyFrom(
flow.random_uniform_initializer()
)
if tick:
op_conf.decode_random_conf.tick = tick.unique_name
op_conf.decode_random_conf.out = "out"
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
interpret_util.ConsistentForward(op_conf)
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export(
"data.image_decoder_random_crop_resize", "data.ImageDecoderRandomCropResize"
)
def image_decoder_random_crop_resize(
input_blob: remote_blob_util.BlobDef,
target_width: int,
target_height: int,
num_attempts: Optional[int] = None,
seed: Optional[int] = None,
random_area: Optional[Sequence[float]] = None,
random_aspect_ratio: Optional[Sequence[float]] = None,
num_workers: Optional[int] = None,
warmup_size: Optional[int] = None,
max_num_pixels: Optional[int] = None,
name: Optional[str] = None,
) -> Tuple[remote_blob_util.BlobDef]:
if name is None:
name = id_util.UniqueStr("ImageDecoderRandomCropResize_")
op_conf = op_conf_util.OperatorConf()
op_conf.name = name
setattr(op_conf.image_decoder_random_crop_resize_conf, "in", input_blob.unique_name)
op_conf.image_decoder_random_crop_resize_conf.out = "out"
op_conf.image_decoder_random_crop_resize_conf.target_width = target_width
op_conf.image_decoder_random_crop_resize_conf.target_height = target_height
if num_attempts is not None:
op_conf.image_decoder_random_crop_resize_conf.num_attempts = num_attempts
if seed is not None:
op_conf.image_decoder_random_crop_resize_conf.seed = seed
if random_area is not None:
assert len(random_area) == 2
op_conf.image_decoder_random_crop_resize_conf.random_area_min = random_area[0]
op_conf.image_decoder_random_crop_resize_conf.random_area_max = random_area[1]
if random_aspect_ratio is not None:
assert len(random_aspect_ratio) == 2
op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_min = random_aspect_ratio[
0
]
op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_max = random_aspect_ratio[
1
]
if num_workers is not None:
op_conf.image_decoder_random_crop_resize_conf.num_workers = num_workers
if warmup_size is not None:
op_conf.image_decoder_random_crop_resize_conf.warmup_size = warmup_size
if max_num_pixels is not None:
op_conf.image_decoder_random_crop_resize_conf.max_num_pixels = max_num_pixels
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export("data.onerec_reader")
def onerec_reader(
files,
batch_size=1,
random_shuffle=False,
shuffle_mode="instance",
shuffle_buffer_size=1024,
shuffle_after_epoch=False,
verify_example=True,
name=None,
):
assert isinstance(files, (list, tuple))
if name is None:
name = id_util.UniqueStr("OneRecReader_")
return (
flow.user_op_builder(name)
.Op("OneRecReader")
.Output("out")
.Attr("files", files)
.Attr("batch_size", batch_size)
.Attr("random_shuffle", random_shuffle)
.Attr("shuffle_mode", shuffle_mode)
.Attr("shuffle_buffer_size", shuffle_buffer_size)
.Attr("shuffle_after_epoch", shuffle_after_epoch)
.Attr("verify_example", verify_example)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
|
import pytest
from briefcase.commands.base import BaseCommand
from briefcase.config import AppConfig, BaseConfig
class DummyCommand(BaseCommand):
"""
A dummy command to test the BaseCommand interface.
"""
command = 'dummy',
platform = 'tester'
output_format = 'dumdum'
description = 'Dummy base command'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def add_options(self, parser):
# Provide some extra arguments:
# * some optional arguments
parser.add_argument('-x', '--extra')
parser.add_argument('-m', '--mystery')
# * a required argument
parser.add_argument('-r', '--required', required=True)
def binary_path(self, app):
raise NotImplementedError()
def distribution_path(self, app, packaging_format):
raise NotImplementedError()
@pytest.fixture
def base_command(tmp_path):
command = DummyCommand(base_path=tmp_path)
command.parse_options(['-r', 'default'])
return command
# Define some stub command classes
# These will be used to test the command accessor
class DummyCreateCommand(DummyCommand):
description = "Test Create"
class DummyUpdateCommand(DummyCommand):
description = "Test Update"
class DummyBuildCommand(DummyCommand):
description = "Test Build"
class DummyRunCommand(DummyCommand):
description = "Test Run"
class DummyPackageCommand(DummyCommand):
description = "Test Package"
class DummyPublishCommand(DummyCommand):
description = "Test Publish"
# Register the commands with the module
create = DummyCreateCommand
update = DummyUpdateCommand
build = DummyBuildCommand
run = DummyRunCommand
package = DummyPackageCommand
publish = DummyPublishCommand
# Define a command that defines a custom config class, and has no options.
class CustomGlobalConfig(BaseConfig):
def __init__(self, foo, **kwargs):
super().__init__(**kwargs)
self.foo = foo
def __repr__(self):
return '<Custom GlobalConfig {foo}>'.format(foo=self.foo)
class CustomAppConfig(AppConfig):
def __init__(self, foo, bar, **kwargs):
super().__init__(
app_name='custom',
bundle='com.example',
description='Custom app',
version="37.42",
sources=['src/custom'],
)
self.foo = foo
self.bar = bar
def __repr__(self):
return '<Custom AppConfig {foo}, {bar}>'.format(
foo=self.foo,
bar=self.bar
)
class OtherDummyCommand(BaseCommand):
GLOBAL_CONFIG_CLASS = CustomGlobalConfig
APP_CONFIG_CLASS = CustomAppConfig
command = 'other',
platform = 'tester'
output_format = 'dumdum'
description = 'Another dummy command'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def binary_path(self, app):
raise NotImplementedError()
def distribution_path(self, app, packaging_format):
raise NotImplementedError()
@pytest.fixture
def other_command(tmp_path):
return OtherDummyCommand(base_path=tmp_path)
@pytest.fixture
def my_app():
return AppConfig(
app_name='my-app',
formal_name='My App',
bundle='com.example',
version='1.2.3',
description='This is a simple app',
sources=['src/my_app'],
)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import h5py
import pytest
from wetterdienst.provider.dwd.radar import (
DwdRadarDataFormat,
DwdRadarDataSubset,
DwdRadarDate,
DwdRadarParameter,
DwdRadarPeriod,
DwdRadarResolution,
DwdRadarValues,
)
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
@pytest.mark.remote
def test_radar_request_site_current_sweep_pcp_v_hdf5():
"""
Example for testing radar sites full current SWEEP_PCP,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# wddump ras07-stqual-pcpng01_sweeph5onem_vradh_00-2020093000403400-boo-10132-hd5
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Pcp-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 1
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
shape = hdf["/dataset1/data1/data"].shape
assert shape == (360, 600) or shape == (361, 600)
@pytest.mark.remote
def test_radar_request_site_current_sweep_vol_v_hdf5_full():
"""
Example for testing radar sites full current SWEEP_VOL,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0].data
payload = buffer.getvalue()
# Verify data.
assert payload.startswith(b"\x89HDF\r\n")
# Verify more details.
# wddump ras07-stqual-vol5minng01_sweeph5onem_vradh_00-2020092917055800-boo-10132-hd5
hdf = h5py.File(buffer, "r")
assert hdf["/how/radar_system"] is not None
assert hdf["/how"].attrs.get("task") == b"Sc_Vol-5Min-NG-01_BOO"
assert hdf["/what"].attrs.get("source") == b"WMO:10132,NOD:deboo"
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 1
shape = hdf["/dataset1/data1/data"].shape
assert shape == (360, 180) or shape == (360, 720) or shape == (361, 720)
@pytest.mark.remote
def test_radar_request_site_current_sweep_vol_v_hdf5_single():
"""
Example for testing radar sites single current SWEEP_VOL,
this time in OPERA HDF5 (ODIM_H5) format.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.SWEEP_VOL_VELOCITY_H,
start_date=DwdRadarDate.CURRENT,
site=DwdRadarSite.BOO,
fmt=DwdRadarDataFormat.HDF5,
subset=DwdRadarDataSubset.SIMPLE,
elevation=1,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) <= 1
assert "vradh_01" in results[0].url
buffer = results[0].data
hdf = h5py.File(buffer, "r")
assert hdf["/how"].attrs.get("scan_count") == 10
assert hdf["/dataset1/how"].attrs.get("scan_index") == 2
@pytest.mark.remote
@pytest.mark.parametrize(
"resolution",
[
DwdRadarResolution.DAILY,
DwdRadarResolution.HOURLY,
],
)
def test_radar_request_radolan_cdc_current(resolution):
"""
Verify data acquisition for current RADOLAN_CDC/daily+hourly.
Remark: More often than not, this data is not
available when looking at CURRENT.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
start_date=DwdRadarDate.CURRENT,
resolution=resolution,
period=DwdRadarPeriod.RECENT,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
assert len(results) == 1
@pytest.mark.remote
def test_radar_request_radolan_cdc_current_5min():
"""
Verify failure for RADOLAN_CDC/5 minutes.
"""
with pytest.raises(ValueError):
DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.MINUTE_5,
start_date=DwdRadarDate.CURRENT,
)
|
# Define count_entries()
def countr_entries(csv_file, c_size, column_name):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Iterate over the file chunk by chunk
for chunk in pd.read_csv(csv_file, chunksize=c_size):
# Iterate over the column in DataFrame
for entry in chunk[column_name]:
if entry in counts_dict.keys():
counts_dict[entry] += 1
else:
counts_dict[entry] = 1
# Return counts_dict
return counts_dict
# Call count_entries(): result_counts
result_counts = countr_entries('tweets.csv',10,'lang')
# Print result_counts
print(result_counts)
|
COPY_GOOGLE_DOC_KEY = '0AiIfOsKv5mKldGF3Um1jekxRMUNra01MVldsU193QUE'
DEPLOY_SLUG = 'doors'
NUM_SLIDES_AFTER_CONTENT = 2
|
'''
>>> Welcome to Hangman!
_ _ _ _ _ _ _ _ _
>>> Guess your letter: S
Incorrect!
>>> Guess your letter: E
E _ _ _ _ _ _ _ E
'''
'''
Psuedocode approach with assignment to objects:
Welcome message [Player]
Choose a random word [Board]
Display the word with _ symbols where no letters are known [Board]
Display how many guesses the player has left [Player]
Ask for a guess [Player]
Check whether the player has guessed that already - if so they get another go [Player]
Check whether that guess is in the word [Board]
Loop until word is guessed or guesses are all used up [Main]
'''
import random
class Player:
def __init__(self):
print("Welcome to this game.")
self.guesses = []
self.guesses_left = 6
def print_how_many_guesses_left(self):
if self.guesses_left>1:
print(f"You have {self.guesses_left} guesses left.")
else:
print(f"You have {self.guesses_left} guess left.")
def ask_for_guess(self) -> str:
guess = input("Guess a letter: ").upper()
if guess in self.guesses:
return None
self.guesses.append(guess)
return guess
def display_whether_guess_correct(self, correct: bool):
if correct:
print("Correct")
else:
print("That's not in the word")
def print_message(self, message):
print(message)
class Board:
def __init__(self):
self.word = ""
self.guessed = [] # A list of Bools corresponding to which letters in word have been guessed
def choose_random_word(self):
with open("sowpods.txt", "r") as f:
word_list = f.readlines()
self.word = random.choice(word_list).strip()
self.guessed = [False] * len(self.word)
def display_word(self):
for index, letter in enumerate(self.word):
if self.guessed[index]:
print(letter, end="")
else:
print("_", end="")
print(" ", end="")
print("\n")
def check_if_guess_in_word(self, guess):
is_in_word = False
for index, letter in enumerate(self.word):
if letter == guess:
self.guessed[index] = True
is_in_word = True
return(is_in_word)
def all_letters_have_been_guessed(self):
return (not any(letter == False for letter in self.guessed))
def debug_print_out_variables(self):
print("word:", self.word)
print("guessed:", self.guessed)
if __name__ == "__main__":
print("\n")
p = Player()
b = Board()
b.choose_random_word()
while True:
b.display_word()
# b.debug_print_out_variables()
while True:
p.print_how_many_guesses_left()
guess = p.ask_for_guess()
if guess is None:
p.print_message("You already guessed that letter - have another go")
else:
break
correct = b.check_if_guess_in_word(guess)
print("\n")
p.display_whether_guess_correct(correct)
if not correct:
p.guesses_left -= 1
if b.all_letters_have_been_guessed():
p.print_message("All guessed")
break
if p.guesses_left == 0:
p.print_message("All guesses used up")
break
p.print_message(f"The word was: {b.word}")
|
class AutoCompleteSource(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the source for System.Windows.Forms.ComboBox and System.Windows.Forms.TextBox automatic completion functionality.
enum AutoCompleteSource,values: AllSystemSources (7),AllUrl (6),CustomSource (64),FileSystem (1),FileSystemDirectories (32),HistoryList (2),ListItems (256),None (128),RecentlyUsedList (4)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return AutoCompleteSource()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllSystemSources=None
AllUrl=None
CustomSource=None
FileSystem=None
FileSystemDirectories=None
HistoryList=None
ListItems=None
None_ =None
RecentlyUsedList=None
value__=None
|
import torch
import torch.nn as nn
class ConvB(nn.Conv2d):
@staticmethod
def from_conv(module: nn.Conv2d, bias):
module.__class__ = ConvB
module.register_parameter('bf', torch.nn.Parameter(bias))
return module
def forward(self, x):
x = super().forward(x)
return x + self.bf
class ConvExpand(nn.Conv2d):
@staticmethod
def from_conv(module: nn.Conv2d, idxs: torch.Tensor, bias):
module.__class__ = ConvExpand
module.register_parameter('bf', torch.nn.Parameter(bias))
setattr(module, "use_bf", bias.abs().sum() != 0)
module.register_buffer('idxs', idxs.to(module.weight.device))
module.register_buffer('zeros', torch.zeros(1, *bias.shape, dtype=bias.dtype, device=module.weight.device))
setattr(module, 'idxs_cache', module.idxs)
setattr(module, 'zero_cache', module.zeros)
return module
def forward(self, x):
x = super().forward(x)
zeros = self.zero_cache
index = self.idxs_cache
if zeros.shape[0] != x.shape[0]:
zeros = self.zeros.expand(x.shape[0], *self.zeros.shape[1:])
self.zero_cache = zeros
if index.shape != x.shape:
index = self.idxs[None, :, None, None].expand_as(x)
self.idxs_cache = index
expanded = torch.scatter(zeros, 1, index, x)
return expanded + self.bf if self.use_bf else expanded
def __repr__(self):
return f'ConvExpand({self.in_channels}, {self.out_channels}, exp={len(self.idxs)})'
class BatchNormB(nn.BatchNorm2d):
@staticmethod
def from_bn(module: nn.BatchNorm2d, bias):
module.__class__ = BatchNormB
module.register_parameter('bf', torch.nn.Parameter(bias))
return module
def forward(self, x):
x = super().forward(x)
return x + self.bf[:, None, None].expand_as(x[0])
class BatchNormExpand(nn.BatchNorm2d):
@staticmethod
def from_bn(module: nn.BatchNorm2d, idxs: torch.Tensor, bias, shape):
module.__class__ = BatchNormExpand
module.register_parameter('bf', torch.nn.Parameter(bias))
module.register_buffer('idxs', idxs.to(module.weight.device))
module.register_buffer('zeros', torch.zeros(1, *shape[1:], dtype=bias.dtype, device=module.weight.device))
setattr(module, 'zero_cache', module.zeros)
setattr(module, 'idxs_cache', module.idxs)
return module
def forward(self, x):
x = super().forward(x)
zeros = self.zero_cache
index = self.idxs_cache
if zeros.shape[0] != x.shape[0]:
zeros = self.zeros.expand(x.shape[0], *self.zeros.shape[1:])
self.zero_cache = zeros
if index.shape != x.shape:
index = self.idxs[None, :, None, None].expand_as(x)
self.idxs_cache = index
expanded = torch.scatter(zeros, 1, index, x)
return expanded + self.bf[:, None, None].expand_as(expanded)
def __repr__(self):
return f'BatchNormExpand({self.num_features}, eps={self.eps}, momentum={self.momentum}, affine={self.affine}, track_running_stats={self.track_running_stats})'
|
class Solution:
def mctFromLeafValues(self, A):
res, n = 0, len(A)
stack = [float('inf')]
for a in A:
while stack[-1] <= a:
mid = stack.pop()
res += mid * min(stack[-1], a)
stack.append(a)
while len(stack) > 2:
res += stack.pop() * stack[-1]
return res
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in midtrans_payment/__init__.py
from midtrans_payment import __version__ as version
setup(
name='midtrans_payment',
version=version,
description='Midtrans Payment',
author='uniklia.com',
author_email='unikliadotcom@gmail.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file unittests/libtests/materials/data/DruckerPrager3DElastic.py
## @brief Python application for generating C++ data files for testing
## C++ DruckerPrager3D object with elastic behavior.
from ElasticMaterialApp import ElasticMaterialApp
import numpy
import math
# ----------------------------------------------------------------------
dimension = 3
numElasticConsts = 36
tensorSize = 6
# DruckerPrager3DElastic class
class DruckerPrager3DElastic(ElasticMaterialApp):
"""
Python application for generating C++ data files for testing C++
DruckerPrager3D object with elastic behavior.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="druckerprager3delastic"):
"""
Constructor.
"""
ElasticMaterialApp.__init__(self, name)
# import pdb
# pdb.set_trace()
numLocs = 2
self.dimension = dimension
self.numLocs = numLocs
self.dbPropertyValues = ["density", "vs", "vp",
"friction-angle", "cohesion",
"dilatation-angle"]
self.numPropertyValues = numpy.array([1, 1, 1, 1, 1, 1], dtype=numpy.int32)
self.dbStateVarValues = ["plastic-strain-xx",
"plastic-strain-yy",
"plastic-strain-zz",
"plastic-strain-xy",
"plastic-strain-yz",
"plastic-strain-xz"
]
self.numStateVarValues = numpy.array([6], dtype=numpy.int32)
densityA = 2500.0
vsA = 3000.0
vpA = vsA*3**0.5
# First case has different values for friction angle and dilatation angle.
frictionAngleA = math.radians(30.0)
dilatationAngleA = math.radians(20.0)
cohesionA = 3.0e5
strainA = [-1.1e-4, -1.2e-4, -1.3e-4, 1.4e-4, 1.5e-4, 1.6e-4]
initialStressA = [2.1e4, 2.2e4, 2.3e4, 2.4e4, 2.5e4, 2.6e4]
initialStrainA = [3.1e-4, 3.2e-4, 3.3e-4, 3.4e-4, 3.5e-4, 3.6e-4]
muA = vsA*vsA*densityA
lambdaA = vpA*vpA*densityA - 2.0*muA
denomFrictionA = math.sqrt(3.0) * (3.0 - math.sin(frictionAngleA))
denomDilatationA = math.sqrt(3.0) * (3.0 - math.sin(dilatationAngleA))
alphaYieldA = 2.0 * math.sin(frictionAngleA)/denomFrictionA
betaA = 6.0 * cohesionA * math.cos(frictionAngleA)/denomFrictionA
alphaFlowA = 2.0 * math.sin(dilatationAngleA)/denomDilatationA
densityB = 2000.0
vsB = 1200.0
vpB = vsB*3**0.5
# Second case has same values for friction angle and dilatation angle.
frictionAngleB = math.radians(25.0)
dilatationAngleB = math.radians(25.0)
cohesionB = 1.0e5
strainB = [4.1e-4, 4.2e-4, 4.3e-4, 4.4e-4, 4.5e-4, 4.6e-4]
initialStressB = [5.1e4, 5.2e4, 5.3e4, 5.4e4, 5.5e4, 5.6e4]
initialStrainB = [6.1e-4, 6.2e-4, 6.3e-4, 6.4e-4, 6.5e-4, 6.6e-4]
muB = vsB*vsB*densityB
lambdaB = vpB*vpB*densityB - 2.0*muB
denomFrictionB = math.sqrt(3.0) * (3.0 - math.sin(frictionAngleB))
denomDilatationB = math.sqrt(3.0) * (3.0 - math.sin(dilatationAngleB))
alphaYieldB = 2.0 * math.sin(frictionAngleB)/denomFrictionB
betaB = 6.0 * cohesionB * math.cos(frictionAngleB)/denomFrictionB
alphaFlowB = 2.0 * math.sin(dilatationAngleB)/denomDilatationB
self.lengthScale = 1.0e+3
self.pressureScale = muA
self.timeScale = 1.0
self.densityScale = muA / (self.lengthScale / self.timeScale)**2
self.dbProperties = numpy.array([ [densityA, vsA, vpA, \
frictionAngleA, cohesionA, \
dilatationAngleA],
[densityB, vsB, vpB, \
frictionAngleB, cohesionB, \
dilatationAngleB] ],
dtype=numpy.float64)
self.properties = numpy.array([ [densityA, muA, lambdaA, \
alphaYieldA, betaA, \
alphaFlowA],
[densityB, muB, lambdaB, \
alphaYieldB, betaB, \
alphaFlowB] ],
dtype=numpy.float64)
# TEMPORARY, need to determine how to use initial state variables
self.dbStateVars = numpy.zeros( (numLocs, tensorSize), dtype=numpy.float64)
self.stateVars = numpy.zeros( (numLocs, tensorSize), dtype=numpy.float64)
mu0 = self.pressureScale
density0 = self.densityScale
self.propertiesNondim = \
numpy.array([ [densityA/density0, muA/mu0, lambdaA/mu0, \
alphaYieldA, betaA/mu0, \
alphaFlowA],
[densityB/density0, muB/mu0, lambdaB/mu0, \
alphaYieldB, betaB/mu0, \
alphaFlowB] ],
dtype=numpy.float64)
self.stateVarsNondim = self.stateVars # no scaling
self.initialStress = numpy.array([initialStressA,
initialStressB],
dtype=numpy.float64)
self.initialStrain = numpy.array([initialStrainA,
initialStrainB],
dtype=numpy.float64)
self.density = numpy.array([densityA,
densityB],
dtype=numpy.float64)
self.strain = numpy.array([strainA,
strainB],
dtype=numpy.float64)
self.stress = numpy.zeros( (numLocs, tensorSize), dtype=numpy.float64)
self.elasticConsts = numpy.zeros( (self.numLocs, numElasticConsts), \
dtype=numpy.float64)
(self.elasticConsts[0,:], self.stress[0,:]) = \
self._calcStress(strainA, muA, lambdaA, \
initialStressA, initialStrainA)
(self.elasticConsts[1,:], self.stress[1,:]) = \
self._calcStress(strainB, muB, lambdaB, \
initialStressB, initialStrainB)
self.dtStableImplicit = 1.0e+10
self.dtStableExplicit = 1000.0 / vpA
plasticStrainUpdated = numpy.zeros((numLocs, tensorSize),
dtype=numpy.float64)
self.stateVarsUpdated = numpy.array( [plasticStrainUpdated[0,:],
plasticStrainUpdated[1,:]],
dtype=numpy.float64)
return
def _calcStress(self, strainV, muV, lambdaV, initialStressV, initialStrainV):
"""
Compute stress and derivative of elasticity matrix.
"""
C1111 = lambdaV + 2.0*muV
C1122 = lambdaV
C1133 = lambdaV
C1112 = 0.0
C1123 = 0.0
C1113 = 0.0
C2211 = lambdaV
C2222 = lambdaV + 2.0*muV
C2233 = lambdaV
C2212 = 0.0
C2223 = 0.0
C2213 = 0.0
C3311 = lambdaV
C3322 = lambdaV
C3333 = lambdaV + 2.0*muV
C3312 = 0.0
C3323 = 0.0
C3313 = 0.0
C1211 = 0.0
C1222 = 0.0
C1233 = 0.0
C1212 = 2.0*muV
C1223 = 0.0
C1213 = 0.0
C2311 = 0.0
C2322 = 0.0
C2333 = 0.0
C2312 = 0.0
C2323 = 2.0*muV
C2313 = 0.0
C1311 = 0.0
C1322 = 0.0
C1333 = 0.0
C1312 = 0.0
C1323 = 0.0
C1313 = 2.0*muV
elasticConsts = numpy.array([C1111, C1122, C1133, C1112, C1123, C1113,
C2211, C2222, C2233, C2212, C2223, C2213,
C3311, C3322, C3333, C3312, C3323, C3313,
C1211, C1222, C1233, C1212, C1223, C1213,
C2311, C2322, C2333, C2312, C2323, C2313,
C1311, C1322, C1333, C1312, C1323, C1313],
dtype=numpy.float64)
strain = numpy.reshape(strainV, (6,1))
initialStress = numpy.reshape(initialStressV, (tensorSize,1))
initialStrain = numpy.reshape(initialStrainV, (tensorSize,1))
elastic = numpy.array([ [C1111, C1122, C1133, C1112, C1123, C1113],
[C2211, C2222, C2233, C2212, C2223, C2213],
[C3311, C3322, C3333, C3312, C3323, C3313],
[C1211, C1222, C1233, C1212, C1223, C1213],
[C2311, C2322, C2333, C2312, C2323, C2313],
[C1311, C1322, C1333, C1312, C1323, C1313] ],
dtype=numpy.float64)
stress = numpy.dot(elastic, strain-initialStrain) + initialStress
return (elasticConsts, numpy.ravel(stress))
# MAIN /////////////////////////////////////////////////////////////////
if __name__ == "__main__":
app = DruckerPrager3DElastic()
app.run()
# End of file
|
#!/usr/bin/env python3
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", required=True)
parser.add_argument("ref_nodes")
parser.add_argument("snarls")
args = parser.parse_args()
ref = {}
with open(args.ref_nodes) as infile:
for line in infile:
cols = line.strip().split("\t")
chrom = cols[0]
start = int(cols[1])
end = int(cols[2])
node = cols[3]
ref[node] = (chrom, start, end)
with open(args.snarls) as infile, open(args.output, "w") as outfile, open("filterout.bedpe", "w") as filterout:
for line in infile:
snarl = json.loads(line)
#if "parent" not in snarl and "type" in snarl:
#if snarl["type"] == 1:
if "parent" not in snarl:
node1 = snarl["start"]["node_id"]
node2 = snarl["end"]["node_id"]
if node1 in ref and node2 in ref:
if "backward" not in snarl["start"] and "backward" not in snarl["end"]:
source = node1
sink = node2
snarl_id = f">{source}>{sink}"
elif "backward" in snarl["start"] and "backward" in snarl["end"]:
source = node2
sink = node1
snarl_id = f">{source}>{sink}"
else:
source = node1
sink = node2
if "backward" in snarl["start"]:
snarl_id = f"<{source}>{sink}"
else:
snarl_id = f">{source}<{sink}"
chrom1 = ref[source][0]
start1 = ref[source][1]
end1 = ref[source][2]
chrom2 = ref[sink][0]
start2 = ref[sink][1]
end2 = ref[sink][2]
if chrom1 == chrom2 and start2 >= end1:
outfile.write(f"{chrom1}\t{start1}\t{end1}\t{chrom2}\t{start2}\t{end2}\t{snarl_id}\n")
else:
filterout.write(f"{chrom1}\t{start1}\t{end1}\t{chrom2}\t{start2}\t{end2}\t{snarl_id}\n")
|
import argparse,pydub.silence as sil,os,re
from pydub import AudioSegment
parser = argparse.ArgumentParser(
description='This function gets the silence boundaries given an input audio file in the foll. format fileset,book,chapter,db,sId,sBegin,sEnd')
required_args = parser.add_argument_group('required arguments')
required_args.add_argument(
'-i', required=True, nargs=1, type=str, help='Input audio file')
required_args.add_argument(
'-o', required=True, nargs=1, type=str, help='Output file name')
optional_args=parser.add_argument_group('optional arguments')
optional_args.add_argument('-db', default=[5], nargs=1, type=int, help='Threshold decibel level')
optional_args.add_argument('-sl', default=[500], nargs=1, type=int, help='Min. silence length(milli secs)')
optional_args.add_argument('-fileset', nargs=1, type=str,help='custom fileset id')
optional_args.add_argument('-book', nargs=1, type=str, help='custom book id')
optional_args.add_argument('-chapter', nargs=1, type=str,help='custom chapter id')
optional_args.add_argument('-noheader', action='store_true', help='Remove header: fileset,book,chapter,db,sId,sBegin,sEnd')
args = parser.parse_args()
input_file=args.i[0]
output_file=args.o[0]
decibels=args.db[0]
min_sil_len=args.sl[0]
#Open the write file
if os.path.exists(output_file): os.remove(output_file)
write_file=open(output_file,'w')
# Write header
if not(args.noheader):write_file.write('fileset,book,chapter,db,sId,sBegin,sEnd\n')
#Fill the header values and overwrite custom option
filename=input_file.split('/')[-1]
input=re.compile('\_+').split(filename.split('.')[0])
if args.fileset is not None:file_setid=args.fileset[0]
else:file_setid=input[-1]
if args.book is not None: book_name=args.book[0]
else:book_name=input[-2]
if args.chapter is not None:chapter_num=args.chapter[0]
else:chapter_num=input[-3]
sound = AudioSegment.from_mp3(input_file)
dBFS = sound.dBFS
silence_boundaries = sil.detect_silence(sound, min_silence_len=min_sil_len, silence_thresh=dBFS - decibels)
line_inc=0
for boundaries in silence_boundaries:
line_inc+=1
boundaries=[x/1000 for x in boundaries]
write_file.write(file_setid + ',' + book_name + ',' + chapter_num + ',' + '-' + str(decibels)+',')
write_file.write("{0},{1},{2}\n".format(line_inc,boundaries[0],boundaries[1]))
# print("{0} {1} silence".format(boundaries[0],boundaries[1]))
write_file.close()
|
import os
# 设置其他域名,也许需要代理才可以访问, 比如:cordcloud.org
CC_HOST = ""
# 看情况是否开启代理
PROXIES = {
# "http": "http://127.0.0.1:7890",
# "https": "http://127.0.0.1:7890",
}
# 登录CordCloud的帐号密码
LOGIN_FORM = {
"email": os.getenv("CC_EMAIL", ""),
"passwd": os.getenv("CC_PASSWD", ""),
"code": "",
}
# server酱配置(非必填)
SERVER_CHAN_CONFIG = {
"enable": False, # True打开
"key": os.getenv("SERVER_CHAN_KEY", "")
}
# 日志文件位置(可以不改)
LOG_FILE = "./cc_auto_check_in.log"
# 邮件通知配置(非必填)
EMAIL_CONFIG = {
"enable": False, # True打开
"user": os.getenv("CC_MAIL_USER", ""), # 用于发送通知的邮箱
"pw": os.getenv("CC_MAIL_PW", ""), # 用于发送通知的邮箱密码
"host": os.getenv("CC_MAIL_HOST", ""), # SMTP服务的host
"port": os.getenv("CC_MAIL_PORT"), # SMTP服务的端口,默认端口可不填
"receivers": ["dpc@deaglepc.cn"], # 接收人邮箱地址
}
|
from . import Plugin
class CatchallPlugin(Plugin):
"""
Turns metrics that aren't matched by any other plugin in something a bit more useful (than not having them at all)
Another way to look at it is.. plugin:catchall is the list of targets you can better organize ;)
Note that the assigned tags (i.e. source tags) are best guesses. We can't know for sure!
(this description goes for all catchall plugins)
"""
priority = -5
targets = [
{
'match': '^(?P<tosplit>[^=]*)$',
'target_type': 'unknown',
'tags': {
'unit': 'unknown',
'source': 'unknown'
}
},
]
# vim: ts=4 et sw=4:
|
'''
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
'''
import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_pretrain import ALBEF
from models.vit import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_mlm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps*step_size
if args.distributed:
data_loader.sampler.set_epoch(epoch)
for i, (image, image_aug, text) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
optimizer.zero_grad()
image = image.to(device,non_blocking=True)
image_aug = image_aug.to(device,non_blocking=True)
text_input = tokenizer(text, padding='longest', truncation=True, max_length=25, return_tensors="pt").to(device)
if epoch>0:
alpha = config['alpha']
else:
alpha = config['alpha']*min(1,i/len(data_loader))
loss_mlm, loss_ita, loss_itm = model(image, image_aug, text_input, alpha = alpha)
loss = loss_mlm + loss_ita + loss_itm
loss.backward()
optimizer.step()
metric_logger.update(loss_mlm=loss_mlm.item())
metric_logger.update(loss_ita=loss_ita.item())
metric_logger.update(loss_itm=loss_itm.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if epoch==0 and i%step_size==0 and i<=warmup_iterations:
scheduler.step(i//step_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_steps = config['schedular']['warmup_epochs']
#### Dataset ####
print("Creating dataset")
datasets = [create_dataset('pretrain', config)]
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
else:
samplers = [None]
data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
tokenizer = BertTokenizer.from_pretrained(args.text_encoder)
#### Model ####
print("Creating model")
model = ALBEF(config=config, text_encoder=args.text_encoder, tokenizer=tokenizer, init_deit=True)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
if args.resume:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint['epoch']+1
else:
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],model.visual_encoder_m)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
model.load_state_dict(state_dict)
print('load checkpoint from %s'%args.checkpoint)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
print("Start training")
start_time = time.time()
for epoch in range(start_epoch, max_epoch):
if epoch>0:
lr_scheduler.step(epoch+warmup_steps)
train_stats = train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, lr_scheduler, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
}
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/Pretrain.yaml')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--resume', default=False, type=bool)
parser.add_argument('--output_dir', default='Pretrain/')
parser.add_argument('--text_encoder', default='bert-base-uncased')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config)
|
#!/usr/bin/env python
"""
Written by Reubenur Rahman
Github: https://github.com/rreubenur/
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example script to upload a file from host to guest
"""
from __future__ import with_statement
import atexit
import requests
from tools import cli
from tools import tasks
from pyVim import connect
from pyVmomi import vim, vmodl
import re
def get_args():
"""Get command line args from the user.
"""
parser = cli.build_arg_parser()
parser.add_argument('-v', '--vm_uuid',
required=False,
action='store',
help='Virtual machine uuid')
parser.add_argument('-r', '--vm_user',
required=False,
action='store',
help='virtual machine user name')
parser.add_argument('-w', '--vm_pwd',
required=False,
action='store',
help='virtual machine password')
parser.add_argument('-l', '--path_inside_vm',
required=False,
action='store',
help='Path inside VM for upload')
parser.add_argument('-f', '--upload_file',
required=False,
action='store',
help='Path of the file to be uploaded from host')
args = parser.parse_args()
cli.prompt_for_password(args)
return args
def main():
"""
Simple command-line program for Uploading a file from host to guest
"""
args = get_args()
vm_path = args.path_inside_vm
try:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
vm = content.searchIndex.FindByUuid(None, args.vm_uuid, True)
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
raise SystemExit(
"VMwareTools is either not running or not installed. "
"Rerun the script after verifying that VMWareTools "
"is running")
creds = vim.vm.guest.NamePasswordAuthentication(
username=args.vm_user, password=args.vm_pwd)
with open(args.upload_file, 'rb') as myfile:
fileinmemory = myfile.read()
try:
file_attribute = vim.vm.guest.FileManager.FileAttributes()
url = content.guestOperationsManager.fileManager. \
InitiateFileTransferToGuest(vm, creds, vm_path,
file_attribute,
len(fileinmemory), True)
# When : host argument becomes https://*:443/guestFile?
# Ref: https://github.com/vmware/pyvmomi/blob/master/docs/ \
# vim/vm/guest/FileManager.rst
# Script fails in that case, saying URL has an invalid label.
# By having hostname in place will take take care of this.
url = re.sub(r"^https://\*:", "https://"+str(args.host)+":", url)
resp = requests.put(url, data=fileinmemory, verify=False)
if not resp.status_code == 200:
print "Error while uploading file"
else:
print "Successfully uploaded file"
except IOError, e:
print e
except vmodl.MethodFault as error:
print "Caught vmodl fault : " + error.msg
return -1
return 0
# Start program
if __name__ == "__main__":
main()
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from dataclasses import dataclass
from pants.backend.experimental.python.lockfile import (
PythonLockfileRequest,
PythonToolLockfileSentinel,
)
from pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase
from pants.backend.python.target_types import PythonProvidesField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.core.goals.package import PackageFieldSet
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest, UnexpandedTargets
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.docutil import git_url
from pants.util.logging import LogLevel
@dataclass(frozen=True)
class PythonDistributionFieldSet(PackageFieldSet):
required_fields = (PythonProvidesField,)
provides: PythonProvidesField
class Setuptools(PythonToolRequirementsBase):
options_scope = "setuptools"
help = "Python setuptools, used to package `python_distribution` targets."
default_version = "setuptools>=50.3.0,<57.0"
default_extra_requirements = ["wheel>=0.35.1,<0.37"]
register_lockfile = True
default_lockfile_resource = ("pants.backend.python.subsystems", "setuptools_lockfile.txt")
default_lockfile_path = "src/python/pants/backend/python/subsystems/setuptools_lockfile.txt"
default_lockfile_url = git_url(default_lockfile_path)
class SetuptoolsLockfileSentinel(PythonToolLockfileSentinel):
pass
@rule(
desc="Determine all Python interpreter versions used by setuptools in your project",
level=LogLevel.DEBUG,
)
async def setup_setuptools_lockfile(
_: SetuptoolsLockfileSentinel, setuptools: Setuptools, python_setup: PythonSetup
) -> PythonLockfileRequest:
all_build_targets = await Get(UnexpandedTargets, AddressSpecs([DescendantAddresses("")]))
transitive_targets_per_python_dist = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([tgt.address]))
for tgt in all_build_targets
if PythonDistributionFieldSet.is_applicable(tgt)
)
unique_constraints = {
InterpreterConstraints.create_from_targets(transitive_targets.closure, python_setup)
or InterpreterConstraints(python_setup.interpreter_constraints)
for transitive_targets in transitive_targets_per_python_dist
}
constraints = InterpreterConstraints(itertools.chain.from_iterable(unique_constraints))
return PythonLockfileRequest.from_tool(
setuptools, constraints or InterpreterConstraints(python_setup.interpreter_constraints)
)
def rules():
return (*collect_rules(), UnionRule(PythonToolLockfileSentinel, SetuptoolsLockfileSentinel))
|
# coding: utf-8
"""
OpenAPI
tinkoff.ru/invest OpenAPI. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: n.v.melnikov@tinkoff.ru
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CandleResolution(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
_1MIN = "1min"
_2MIN = "2min"
_3MIN = "3min"
_5MIN = "5min"
_10MIN = "10min"
_15MIN = "15min"
_30MIN = "30min"
HOUR = "hour"
DAY = "day"
WEEK = "week"
MONTH = "month"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""CandleResolution - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CandleResolution):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/python3
import os
import os.path
import re
import argparse
def cmdline():
parser = argparse.ArgumentParser(description='Show USV values of characters in a file')
parser.add_argument('--count', help='count characters instead of just listing them',
action='store_true')
parser.add_argument('--encoding', help='specify the encoding to display octets',
default='utf-8')
parser.add_argument('-o', '--octets', help='also display the bytes stored for each character',
action='store_true')
parser.add_argument('-e', '--escape', help='output escaped text for use with FTML',
action='store_true')
parser.add_argument('-p', '--python', help='output escaped text for use with Python',
action='store_true')
parser.add_argument('--debug', help='display extra messages when reading a file',
action='store_true')
parser.add_argument('-l', '--line', help='line number to start reading from',
type=int, default=1)
parser.add_argument('-c', '--column', help='column number to start reading from',
type=int, default=1)
parser.add_argument('--eol', help='read only to the end of the line',
action='store_true')
parser.add_argument('file', help='file to process', nargs='+')
parser.add_argument('--version', action='version',
version='%(prog)s ' + '(The Fox Utils) ' + '21.7')
return parser
def main():
parser = cmdline()
args = parser.parse_args()
nameslist_file = os.path.join(os.environ["HOME"], ".unidump", "nameslist.lst")
ucd = read_nameslist(nameslist_file)
options = Options(args, 'na', args.encoding, args.octets, args.python, args.eol, args.debug, ucd)
if args.count:
countfiles(options, args.file, args.line, args.column)
else:
dumpfiles(options, args.file, args.line, args.column)
class Options(object):
def __init__(self, args, mode, encoding, show_octets, python_escape, stop, debug, ucd):
self.args = args
self.mode = mode
self.encoding = encoding
# self.nameslistFile = nameslist_file
self.show_octets = show_octets
self.python_escape = python_escape
self.stop = stop
self.debug = debug
self.ucd = ucd
# self.read_nameslist()
self.count = dict()
def read_nameslist(nameslistFile):
"""Read data from customized nameslist file."""
ucd = {}
# Pre-populate ranges that are not in the nameslist file.
cjk_ranges = (
(0x4E00, 0x9FFC, ''),
(0x3400, 0x4DBF, 'A'),
(0x20000, 0x2A6DD, 'B'),
(0x2A700, 0x2B734, 'C'),
(0x2B740, 0x2B81D, 'D'),
(0x2B820, 0x2CEA1, 'E'),
(0x2CEB0, 0x2EBE0, 'F'),
(0x30000, 0x3134A, 'G')
)
for cjk_range in cjk_ranges:
start, end, label = cjk_range
for usvOrd in range(start, end + 1):
usv = codepoint2usv(usvOrd)
name = 'CJK Unified Ideograph'
if label != '':
name = ' '.join([name, 'Ext', label])
ucd[usv] = '{}-{}'.format(name, usv)
# Read nameslist file.
nameslist = open(nameslistFile, 'r')
re_usv_and_name = re.compile("([\dA-F]+)\t([\w\- <>]+)")
re_alt_name = re.compile("\t= ([\w\- \(\),]+)")
usv = ""
name = ""
for line in nameslist:
m = re_usv_and_name.match(line)
if m:
usv = m.group(1)
name = m.group(2)
if name == "<control>":
m = re_alt_name.match(line)
if m:
alt_name = m.group(1)
name = "(%s)" % alt_name
ucd[usv] = name
nameslist.close()
# Populate additional ranges that are not in the nameslist file.
for usvOrd in range(0xD800, 0xDB7F):
usv = codepoint2usv(usvOrd)
ucd[usv] = "(High Surrogate)"
for usvOrd in range(0xDB80, 0xDBFF):
usv = codepoint2usv(usvOrd)
ucd[usv] = "(High Private Use Surrogate)"
for usvOrd in range(0xDC00, 0xDFFF):
usv = codepoint2usv(usvOrd)
ucd[usv] = "(Low Surrogate)"
return ucd
def dumpfiles(options, input_filenames, start_line, start_column):
"""Show Unicode values for the characters in the files."""
for inputFilename in input_filenames:
for display in dumpfile(options, inputFilename, start_line, start_column):
print(formatoutput(options, display), end='')
def formatoutput(options, display):
"""Format contents of a file's output in a useful manor."""
if options.python_escape or options.args.escape:
if display == '\\u000a':
return '\n'
else:
return display
else:
return display + '\n'
def dumpfile(options, input_filename, start_line, start_column):
"""Show Unicode values for the characters in the file."""
for cc in readfile(options, input_filename, start_line, start_column):
display = format(options, cc)
yield display
def countfiles(options, input_filenames, start_line, start_column):
"""Count characters in the files"""
for input_filename in input_filenames:
countfile(options, input_filename, start_line, start_column)
characters = sorted(options.count.keys())
for cc in characters:
display = "%7d %s" % (options.count[cc], format(options, cc))
print(display)
def countfile(options, input_filename, start_line, start_column):
"""Count characters in the file"""
for cc in readfile(options, input_filename, start_line, start_column):
if cc in options.count:
options.count[cc] += 1
else:
options.count[cc] = 1
def readfile(options, input_filename, start_line, start_column):
"""Return each character in the file, or requested subset of the file."""
with open(input_filename, 'r', newline='') as input_file:
lineno = 0
columnno = 0
for line in input_file:
lineno = lineno + 1
if options.debug:
print("DEBUG: reading a line")
if lineno < start_line:
continue
for i in range(len(line)):
columnno = columnno + 1
if columnno < start_column:
continue
yield line[i]
if options.stop:
break
def format(options, cc):
"""Format the current character for display."""
display = "%s %s" % (usv_format(cc), name_format(options, cc))
if options.show_octets:
# 19 characters is enough to display four bytes in hex with leading 0x's
display = "%-19s %s" % (octets(options, cc), display)
if options.python_escape:
# string literals do not need name or octets
display = python(cc)
if options.args.escape:
# string literals do not need name or octets
display = escape(cc)
return display
def usv_format(cc):
"""Format the Unicode Scalar Value (USV) of the character."""
return "U+{}".format(cc2usv(cc))
def name_format(options, cc):
"""Find name of the character."""
usv = cc2usv(cc)
return options.ucd.get(usv, "(Unknown)")
def cc2usv(cc):
"""Convert a character to a string of the USV."""
return codepoint2usv(ord(cc))
def codepoint2usv(codepoint):
"""Convert a character to a string of the USV."""
return "%04X" % codepoint
def python(cc):
"""Format the character for a Python string."""
codepoint = ord(cc)
if 0x20 <= codepoint <= 0x7f:
return cc
if codepoint > 0xFFFF:
return "\\U%08x" % codepoint
return "\\u%04x" % codepoint
def escape(cc):
"""Format the character for a FTML string."""
codepoint = ord(cc)
if 0x20 <= codepoint <= 0x7f:
return cc
return "\\u%04x" % codepoint
def octets(options, cc):
"""Format each byte of the encoded character."""
utf8_bytes = cc.encode(options.encoding)
octets = []
for utf8_byte in utf8_bytes:
byte_in_hex = "0x%02X" % utf8_byte
octets.append(byte_in_hex)
return " ".join(octets)
if __name__ == "__main__":
main()
|
"""
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide",
# initial_sidebar_state="expanded",
)
# col2.title("Table of contents")
# col2.write("http://localhost:8502/#display-progress-and-status")
# toc.header("Header 1")
# toc.header("Header 2")
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# Thanks to streamlitopedia for the following code snippet
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# sidebar
# def cs_sidebar():
# st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True)
# st.sidebar.header('Streamlit cheat sheet')
# st.sidebar.markdown('''
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.markdown('__How to install and import__')
# st.sidebar.code('$ pip install streamlit')
# st.sidebar.markdown('Import convention')
# st.sidebar.code('>>> import streamlit as st')
# st.sidebar.markdown('__Add widgets to sidebar__')
# st.sidebar.code('''
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# st.sidebar.markdown('__Command line__')
# st.sidebar.code('''
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# st.sidebar.markdown('__Pre-release features__')
# st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)')
# st.sidebar.code('''
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
# st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True)
# return None
##########################
# Main body of cheat sheet
##########################
def cs_body():
col1 = st.columns(1)
col1.header('Ryan Paik')
col1.markdown('''
*“You don't learn to walk by following rules. You learn by doing, and by falling over.”*
-Richard Branson
-----
'''
i**Welcome to my Code Compendium.**
This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit that I fell in love with while I was building flask api's.
-----
**Programming is only as deep as you want to dive in.**
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-----
**Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign
Working Nights on my degree from the System Engineering Program
**Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
**Currently Working On**
Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# col2.code('''
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
# # Control flow
# col2.subheader('Control flow')
# col2.code('''
# st.stop()
# ''')
# # Lay out your app
# col2.subheader('Lay out your app')
# col2.code('''
# st.form('my_form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# col2.write('Batch widgets together in a form:')
# col2.code('''
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
# # Display code
# col2.subheader('Display code')
# col2.code('''
# st.echo()
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
# # Display progress and status
# col2.subheader('Display progress and status')
# col2.code('''
# st.progress(progress_variable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
# # Placeholders, help, and options
# col2.subheader('Placeholders, help, and options')
# col2.code('''
# st.empty()
# >>> my_placeholder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
# # Mutate data
# col2.subheader('Mutate data')
# col2.code('''
# DeltaGenerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
# # Optimize performance
# col2.subheader('Optimize performance')
# col2.code('''
# @st.cache
# >>> @st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# col2.subheader('Other key parts of the API')
# col2.markdown('''
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
# Column 3 TOC Generator
# col3.subheader('test')
# toc = Toc(col3)
# # col2.title("Table of contents")
# col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True)
# toc.header("Header 1")
# toc.header("Header 2")
# toc.generate()
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# return None
# Run main()
# if __name__ == '__main__':
# main()
# def main():
def app():
# cs_sidebar()
cs_body()
return None
|
import math
def point(tick, range, radius): # identical to plotterGui's
angle = tick * (360.0 / range) # but prints points and angle
radiansPerDegree = math.pi / 180
pointX = int( round( radius * math.sin(angle * radiansPerDegree) ))
pointY = int( round( radius * math.cos(angle * radiansPerDegree) ))
print(tick, ':', angle, '=', (pointX, pointY))
return (pointX, pointY)
def circle(points, radius, centerX, centerY):
print('-' * 10)
for i in range(points):
x, y = point(i+1, points, radius)
# draw from centerX, centerY to centerX+x, centerY-y
Width = Height = 400
originX = Width // 2
originY = Height // 2 # 3.X / is a float
circle(4, 200, originX, originY)
circle(12, 200, originX, originY)
circle(60, 200, originX, originY)
circle(360, 200, originX, originY)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 13:46:06 2021
@author: Sebastian
"""
import sys
sys.path.append('..\\src')
import unittest
import common.globalcontainer as glob
from dataobjects.stock import Stock
import engines.scaffold
import engines.analysis
import pandas as pd
import datetime
import logging
import random
from pytz import UTC
import os
from influxdb_client import InfluxDBClient, WriteOptions
class TestGC(unittest.TestCase):
gc=None
def step001(self):
"""
Test general GlobalContainer-Functions
"""
pass
#s = engines.scaffold.addStock(self.gc, "IE00B6R52259")
#s = engines.scaffold.addStock(self.gc, "LU0323577923")
#engines.scaffold.getFondDistributions(self.gc, s)
try:
ts = datetime.datetime.strptime('2018-07-09 00:00:00', '%Y-%m-%d %H:%M:%S')
engines.analysis.loadStock(self.gc, 'LU1681045370', ts)
sys.exit()
# https://github.com/influxdata/influxdb-client-python#queries
print (self.gc.influxClient)
query_api = self.gc.influxClient.query_api()
write_api = self.gc.influxClient.write_api(write_options=WriteOptions(batch_size=500,
flush_interval=10_000,
jitter_interval=2_000,
retry_interval=5_000,
max_retries=5,
max_retry_delay=30_000,
exponential_base=2))
_now = datetime.datetime.now(UTC)
_data_frame = pd.DataFrame(data=[["coyote_creek", random.uniform(1.5, 1.9)], ["coyote_creek", random.uniform(1.5, 1.9)]],
index=[_now, _now - datetime.timedelta(hours = random.uniform(0.5, 0.9))],
columns=["location", "water_level"])
print(_data_frame.to_string())
write_api.write("TestBucket", "SKO", record=_data_frame, data_frame_measurement_name='h2o_feet',
data_frame_tag_columns=['location'])
write_api.close()
data_frame = query_api.query_data_frame('from(bucket: "TestBucket") '
'|> range(start: -10m) '
'|> filter(fn: (r) => r["_measurement"] == "cpu") '
'|> filter(fn: (r) => r["_field"] == "usage_idle") '
'|> filter(fn: (r) => r["cpu"] == "cpu-total") '
'|> filter(fn: (r) => r["host"] == "h2934423.stratoserver.net") '
'|> yield(name: "mean")')
print(data_frame.to_string())
except Exception as e:
self.logger.exception('Crash!', exc_info=e)
sys.exit(99)
def step002(self):
"""
Text XIRR
"""
pass
df_trans = pd.read_excel(os.path.join(self.gc.data_root, "Transactions-XIRR.ods"), engine = "odf")
df_trans['ISIN'] = df_trans['ISIN'].str.strip()
df_trans['Depot'] = df_trans['Depot'].str.strip()
df_distri = pd.read_excel(os.path.join(self.gc.data_root, "TargetDistribution.ods"), engine = "odf")
df_distri['ISIN'] = df_distri['ISIN'].str.strip()
df_distri['Depot'] = df_distri['Depot'].str.strip()
df_full = engines.analysis.buildDepot(self.gc, df_trans, df_distri, "XIRR")
def step003(self):
"""
Test data quality
"""
pass
#self.gc.resetMySQLDatabases()
#self.gc.resetInfluxDatabases()
#s = Stock("DE000ETFL284")
s = self.gc.ses.query(Stock).filter(Stock.ISIN == "LU0360863863").first()
if (s is None):
s = Stock("LU0360863863")
engines.scaffold.enrichStock(self.gc, s)
engines.grabstocks.grabStock(self.gc, s)
engines.grabstocks.checkQuality(self.gc, s)
def step004(self):
"""
Test CASH
"""
pass
s = self.gc.ses.query(Stock).filter(Stock.ISIN == "CASH").first()
if (s is None):
s = Stock("CASH")
engines.scaffold.enrichStock(self.gc, s)
engines.grabstocks.grabStock(self.gc, s)
def step999(self):
self.assertEqual(self.gc.numErrors, 0, f"Error-Message: {self.gc.errMsg}")
self.gc.eng.dispose()
def step005(self):
"""
Test Satellit
"""
pass
df_trans = pd.read_excel(os.path.join(self.gc.data_root, "Transactions.ods"), engine = "odf")
df_trans['ISIN'] = df_trans['ISIN'].str.strip()
df_trans['Depot'] = df_trans['Depot'].str.strip()
df_distri = pd.read_excel(os.path.join(self.gc.data_root, "TargetDistribution.ods"), engine = "odf")
df_distri['ISIN'] = df_distri['ISIN'].str.strip()
df_distri['Depot'] = df_distri['Depot'].str.strip()
#print(df_distri)
#buildDepotStock(df_trans, df_distri, "Einzelwerte", "DE000UNSE018")
#sys.exit()
myDepot="Satellit"
df_full = engines.analysis.buildDepot(self.gc, df_trans, df_distri, myDepot)
df_full.to_excel(os.path.join(self.gc.data_root, f"Depot-{myDepot}.ods"), engine = "odf")
def step006(self):
"""
Test Satellit
"""
pass
engines.analysis.calcABeckKPI(self.gc)
def _steps(self):
for name in dir(self): # dir() result is implicitly sorted
if name.startswith("step"):
num = int(name[4:])
yield name, num, getattr(self, name)
def test_steps(self):
self.gc = glob.GlobalContainer("config-test.cfg", "TestRun")
try:
for name, num, step in self._steps():
if(num > 5):
step()
except Exception as e:
self.fail("{} failed ({}: {})".format(step, type(e), e))
finally:
print("####################### Disposing Engine #######################")
self.gc.ses.commit()
self.gc.ses.close()
self.gc.eng.dispose()
if __name__ == '__main__':
unittest.main()
|
import os
import csv
import datetime
import time
import pickle
import gib_detect_train
model_data = pickle.load(open('gib_model.pki', 'rb'))
csvFile = open("D:\\69M_reddit_accounts.csv")
vFile = open("D:\\AllSus.csv","w")
csvReader = csv.reader(csvFile)
interestingUsers = []
boringUsers = []
totalusers = {}
ratioUsers = {}
d = int(time.mktime(datetime.datetime(2015,1,1).timetuple()))
dE = int(time.mktime(datetime.datetime(2016,1,1).timetuple()))
i = 0
def gibgib ( username ):
model_mat = model_data['mat']
threshold = model_data['thresh']
return (gib_detect_train.avg_transition_prob(username, model_mat) < threshold)
for row in csvReader:
if i != 0:
row[2] = datetime.datetime.utcfromtimestamp(int(row[2]))
if gibgib(row[1]):
vFile.write((row[1])+'\t'+(str(row[2]))+'\n')
interestingUsers.append(row)
try:
totalusers[str(row[2].year)+str(row[2].month)] += 1
except:
totalusers[str(row[2].year)+str(row[2].month)] = 1
else:
try:
totalusers[str(row[2].year)+str(row[2].month)] += 1
except:
totalusers[str(row[2].year)+str(row[2].month)] = 1
else:
i+=1
vFile.close()
csvFile.close()
useryears = {}
for u in interestingUsers:
try:
useryears[str(u[2].year)+str(u[2].month)] += 1
except:
useryears[str(u[2].year)+str(u[2].month)] = 1
for y in useryears:
ratioUsers[y] = useryears[y]/float(totalusers[y])
print(y[0:4]+"-"+y[4:20].zfill(2)+"\t"+str(useryears[y])+"\t"+str(totalusers[y])+"\t"+str(ratioUsers[y]))
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Run a python script, adding extra directories to the python path.
"""
def main(args):
def usage():
print >>sys.stderr, "pythonpath.py -I directory script.py [args...]"
sys.exit(150)
paths = []
while True:
try:
arg = args[0]
except IndexError:
usage()
if arg == '-I':
args.pop(0)
try:
path = args.pop(0)
except IndexError:
usage()
paths.append(os.path.abspath(path))
continue
if arg.startswith('-I'):
paths.append(os.path.abspath(args.pop(0)[2:]))
continue
break
script = args[0]
sys.path[0:0] = [os.path.abspath(os.path.dirname(script))] + paths
sys.argv = args
sys.argc = len(args)
frozenglobals['__name__'] = '__main__'
frozenglobals['__file__'] = script
execfile(script, frozenglobals)
# Freeze scope here ... why this makes things work I have no idea ...
frozenglobals = globals()
import sys, os
if __name__ == '__main__':
main(sys.argv[1:])
|
#!/usr/bin/env python3
#python3 analyze_video.py {path/video_filename.mp4} {ID Folder Name}
# initalize
import sys
import argparse
import tensorflow as tf
import cv2
import dlib
import numpy as np
import detect_and_align
import os
from model import OpenNsfwModel
from image_utils import create_yahoo_image_loader
from wide_resnet import WideResNet
from sklearn.metrics.pairwise import pairwise_distances
from tensorflow.python.platform import gfile
class IdData:
"""Keeps track of known identities and calculates id matches"""
def __init__(
self, id_folder, mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, distance_treshold
):
print("Loading known identities: ", end="")
self.distance_treshold = distance_treshold
self.id_folder = id_folder
self.mtcnn = mtcnn
self.id_names = []
self.embeddings = None
image_paths = []
os.makedirs(id_folder, exist_ok=True)
ids = os.listdir(os.path.expanduser(id_folder))
if not ids:
return
for id_name in ids:
id_dir = os.path.join(id_folder, id_name)
image_paths = image_paths + [os.path.join(id_dir, img) for img in os.listdir(id_dir)]
print("Found %d images in id folder" % len(image_paths))
aligned_images, id_image_paths = self.detect_id_faces(image_paths)
feed_dict = {images_placeholder: aligned_images, phase_train_placeholder: False}
self.embeddings = sess.run(embeddings, feed_dict=feed_dict)
if len(id_image_paths) < 5:
self.print_distance_table(id_image_paths)
def add_id(self, embedding, new_id, face_patch):
if self.embeddings is None:
self.embeddings = np.atleast_2d(embedding)
else:
self.embeddings = np.vstack([self.embeddings, embedding])
self.id_names.append(new_id)
id_folder = os.path.join(self.id_folder, new_id)
os.makedirs(id_folder, exist_ok=True)
filenames = [s.split(".")[0] for s in os.listdir(id_folder)]
numbered_filenames = [int(f) for f in filenames if f.isdigit()]
img_number = max(numbered_filenames) + 1 if numbered_filenames else 0
cv2.imwrite(os.path.join(id_folder, f+{img_number}+".jpg"), face_patch)
def detect_id_faces(self, image_paths):
aligned_images = []
id_image_paths = []
for image_path in image_paths:
image = cv2.imread(os.path.expanduser(image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
face_patches, _, _ = detect_and_align.detect_faces(image, self.mtcnn)
if len(face_patches) > 1:
print(
"Warning: Found multiple faces in id image: %s" % image_path
+ "\nMake sure to only have one face in the id images. "
+ "If that's the case then it's a false positive detection and"
+ " you can solve it by increasing the thresolds of the cascade network"
)
aligned_images = aligned_images + face_patches
id_image_paths += [image_path] * len(face_patches)
path = os.path.dirname(image_path)
self.id_names += [os.path.basename(path)] * len(face_patches)
return np.stack(aligned_images), id_image_paths
def print_distance_table(self, id_image_paths):
"""Prints distances between id embeddings"""
distance_matrix = pairwise_distances(self.embeddings, self.embeddings)
image_names = [path.split("/")[-1] for path in id_image_paths]
print("Distance matrix:\n{:20}".format(""), end="")
[print("{:20}".format(name), end="") for name in image_names]
for path, distance_row in zip(image_names, distance_matrix):
print("\n{:20}".format(path), end="")
for distance in distance_row:
print("{:20}".format("%0.3f" % distance), end="")
print()
def find_matching_ids(self, embs):
if self.id_names:
matching_ids = []
matching_distances = []
distance_matrix = pairwise_distances(embs, self.embeddings)
for distance_row in distance_matrix:
min_index = np.argmin(distance_row)
if distance_row[min_index] < self.distance_treshold:
matching_ids.append(self.id_names[min_index])
matching_distances.append(distance_row[min_index])
else:
matching_ids.append(None)
matching_distances.append(None)
else:
matching_ids = [None] * len(embs)
matching_distances = [np.inf] * len(embs)
return matching_ids, matching_distances
def load_model(model):
model_exp = os.path.expanduser(model)
if os.path.isfile(model_exp):
print("Loading model filename: %s" % model_exp)
with gfile.FastGFile(model_exp, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
else:
raise ValueError("Specify model file, not directory!")
# draw labels on video for Age and Sex detection engine
def draw_label(image2, point, label, rectangle_height, font = cv2.FONT_HERSHEY_SIMPLEX, font_scale = 0.4, thickness = 1):
size = cv2.getTextSize(label, font, font_scale + 0.025, thickness)[0]
x, y = point
if(y<21): #rectangle is too high on screen to place text on top
y = y + rectangle_height + size[1]
cv2.rectangle(image2, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
cv2.putText(image2, label, (x, y), font, font_scale, (255, 255, 255), thickness, lineType = cv2.LINE_AA)
def main(argv):
# parse inputs
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="Path to the input video.")
parser.add_argument("id_folder", type=str, nargs="+", help="The folder that contains all the pictures used to identify people.")
args = parser.parse_args()
# initialize NSFW Model
model = OpenNsfwModel()
with tf.Graph().as_default():
with tf.Session() as sess:
# set variable defaults
videoFile = args.input_file
cap = cv2.VideoCapture(videoFile)
frameRate = cap.get(5) # get the frame rate
totalFrameCount = cap.get(7) # get the total number of frames
img_size = 64
margin = 0.4
frameNsfw=0
isMinor = False
minorDetected = False
# set weights and initialize SFW model IsSFW
with tf.variable_scope('IsSFW'):
model.build(weights_path="pretrained_models/open_nsfw-weights.npy")
fn_load_image = None
fn_load_image = create_yahoo_image_loader()
sess.run(tf.global_variables_initializer())
# initialize dlib face detector model and set variables
detector = dlib.get_frontal_face_detector()
model2 = WideResNet(img_size, 16, 8)()
model2.load_weights("pretrained_models/weights.29-3.76_utk.hdf5")
# initialize face identification model
mtcnn = detect_and_align.create_mtcnn(sess, None)
load_model("model/20170512-110547.pb")
threshold = 1.0
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Load anchor IDs for face identification model
id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, threshold)
while(cap.isOpened()):
ret, frame = cap.read()
frameId = cap.get(1) # get the current frame number
if (ret != True): # if there is no video frame detected then exit
break
# write video frame to disk and load as an image
frame2 = frame
frame2 = cv2.rotate(frame2, cv2.ROTATE_180)
cv2.imwrite('./temp_files/temp.jpg', frame)
image = fn_load_image('./temp_files/temp.jpg')
cv2.imwrite('./temp_files/temp_rotated.jpg', frame2)
image_rotated = fn_load_image('./temp_files/temp_rotated.jpg')
# determine SFW status
predictions = sess.run(model.predictions, feed_dict={model.input: image})
predictions2 = sess.run(model.predictions, feed_dict={model.input: image_rotated})
if (predictions[0][1]>=0.50) or (predictions2[0][1]>=0.50) :
frameNsfw= frameNsfw+1
display_lbl = "NSFW"
AlertColor = [0, 0, 255]
else:
display_lbl = "SFW"
AlertColor = [255, 0, 0]
# detect faces in dlib face detection model in standard rotation
was_found = 0
image2 = frame
image2_h, image2_w, _ = np.shape(image2)
detected = detector(image2, 0)
faces = np.empty((len(detected), img_size, img_size, 3))
if len(detected) > 0: # one or more faces were found in the frame
was_found = 1
for i, d in enumerate(detected):
# extract the coordinates of the face
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right()+1, d.bottom()+1, d.width(), d.height()
xw1 = max(int(x1 - margin * w), 0)
yw1 = max(int(y1 - margin * h), 0)
xw2 = min(int(x2 + margin * w), image2_w - 1)
yw2 = min(int(y2 + margin * h), image2_h - 1)
# draw a rectangle around the face
cv2.rectangle(image2, (x1, y1), (x2, y2), (255, 0, 0), 2)
faces[i, :, :, :] = cv2.resize(image2[yw1:yw2+ 1, xw1:xw2 + 1, :], (img_size, img_size))
# determine the height of the rectangle in case is near top of frame
rectangle_height = y2 - y1
# detect faces in dlib face detection model in rotation only if not found in standard rotation
if was_found != 1 :
image2 = frame2
image2_h, image2_w, _ = np.shape(image2)
detected2 = detector(image2, 0)
faces = np.empty((len(detected2), img_size, img_size, 3))
if was_found !=1 and len(detected2) > 0: # one or more faces were found in the frame
was_found = 2
detected = detected2
for i, d in enumerate(detected):
# extract the coordinates of the face
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right()+1, d.bottom()+1, d.width(), d.height()
xw1 = max(int(x1 - margin * w), 0)
yw1 = max(int(y1 - margin * h), 0)
xw2 = min(int(x2 + margin * w), image2_w - 1)
yw2 = min(int(y2 + margin * h), image2_h - 1)
# draw a rectangle around the face
cv2.rectangle(image2, (x1, y1), (x2, y2), (255, 0, 0), 2)
faces[i, :, :, :] = cv2.resize(image2[yw1:yw2+ 1, xw1:xw2 + 1, :], (img_size, img_size))
# determine the height of the rectangle in case is near top of frame
rectangle_height = y2 - y1
if was_found == 1 or was_found == 2: # predict ages and genders of faces using dlib model
results = model2.predict(faces)
predicted_genders = results[0]
ages = np.arange(0, 101).reshape(101, 1)
predicted_ages = results[1].dot(ages).flatten()
# draw predictions by faces using dlib model
for i, d in enumerate(detected):
isMinor = False
if(int(predicted_ages[i]<18)): # detect if a minor is present in the video
isMinor = True
minorDetected = True
label = "{},{},{}".format(int(predicted_ages[i]), "M" if predicted_genders[i][0] < 0.5 else"F", "-MINOR" if isMinor else "")
draw_label(image2, (d.left(), d.top()), label, rectangle_height)
# Locate faces and landmarks in frame for identification
face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)
if len(face_patches) > 0:
face_patches = np.stack(face_patches)
feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
embs = sess.run(embeddings, feed_dict=feed_dict)
matching_ids, matching_distances = id_data.find_matching_ids(embs)
for bb, landmark, matching_id, dist in zip(padded_bounding_boxes, landmarks, matching_ids, matching_distances):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame, matching_id, (bb[0]+30, bb[3] + 5), font, 1, (255, 0, 255), 1, cv2.LINE_AA)
# display whether frame is SFW or not
if was_found == 0 or was_found ==2 :
image2 = cv2.rotate(image2, cv2.ROTATE_180)
percentageComplete = round((frameId) / (totalFrameCount) * 100)
display_lbl = display_lbl + " " + str(percentageComplete) + "% fps= " + str(round(frameRate, 2))
size = cv2.getTextSize(display_lbl, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)[0]
cv2.rectangle(image2, (1, 15 - size[1]), (1 + size[0], 20), AlertColor, cv2.FILLED)
cv2.putText(image2, display_lbl, (1, 19), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, lineType = cv2.LINE_AA)
# display the frame as processed as quickly as possible
cv2.imshow('Video Frame', image2)
cv2.waitKey(1)
# end of video
cap.release()
cv2.destroyAllWindows()
if os.path.isfile('temp_files/temp.jpg'):
os.remove("temp_files/temp.jpg")
# print summary
if totalFrameCount > 0:
if(frameNsfw>0):
if(minorDetected):
print("This video contains minors, and " + str(round((frameNsfw / totalFrameCount * 100), 1)) + "% of the video contains NSFW elements.")
else:
print(str(round((frameNsfw / totalFrameCount * 100), 1)) + "% of the video contains NSFW elements.")
else:
print("Video is SFW.")
else:
print("No video frames were detected! Please check the file type or file name.")
if __name__ == "__main__":
main(sys.argv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.